debuggers.hg

view xen/arch/ia64/vmx/vmx_utility.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children b22f9ab1716a
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_utility.c:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Shaofan Li (Susue Li) <susie.li@intel.com>
20 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
21 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
22 */
24 #include <xen/types.h>
25 #include <asm/vmx_vcpu.h>
26 #include <asm/processor.h>
27 #include <asm/vmx_mm_def.h>
29 #ifdef CHECK_FAULT
30 /*
31 * Return:
32 * 0: Not reserved indirect registers
33 * 1: Is reserved indirect registers
34 */
35 int
36 is_reserved_indirect_register (
37 int type,
38 int index )
39 {
40 switch (type) {
41 case IA64_CPUID:
42 if ( index >= 5 ) {
43 return 1;
44 }
46 case IA64_DBR:
47 case IA64_IBR:
48 //bugbugbug:check with pal about the max ibr/dbr!!!!
49 break;
51 case IA64_PMC:
52 //bugbugbug:check with pal about the max ibr/dbr!!!!
53 break;
55 case IA64_PMD:
56 //bugbugbug:check with pal about the max ibr/dbr!!!!
57 break;
59 case IA64_PKR:
60 //bugbugbug:check with pal about the max pkr!!!!
61 break;
63 case IA64_RR:
64 //bugbugbug:check with pal about the max rr!!!!
65 break;
67 default:
68 panic ("Unsupported instruction!");
69 }
71 return 0;
73 }
74 #endif
76 /*
77 * Return:
78 * Set all ignored fields in value to 0 and return
79 */
80 u64
81 indirect_reg_igfld_MASK (
82 int type,
83 int index,
84 u64 value
85 )
86 {
87 u64 nvalue;
89 nvalue = value;
90 switch ( type ) {
91 case IA64_CPUID:
92 if ( index == 2 ) {
93 nvalue = 0;
94 }
95 break;
97 case IA64_DBR:
98 case IA64_IBR:
99 /* Refer to SDM Vol2 Table 7-1,7-2 */
100 if ( index % 2 != 0) {
101 /* Ignore field: {61:60} */
102 nvalue = value & (~MASK (60, 2));
103 }
104 break;
105 case IA64_PMC:
106 if ( index == 0 ) {
107 /* Ignore field: 3:1 */
108 nvalue = value & (~MASK (1, 3));
109 }
110 break;
111 case IA64_PMD:
112 if ( index >= 4 ) {
113 /* Ignore field: 7:7 */
114 /* bugbug: this code is correct for generic
115 * PMD. However, for implementation specific
116 * PMD, it's WRONG. need more info to judge
117 * what's implementation specific PMD.
118 */
119 nvalue = value & (~MASK (7, 1));
120 }
121 break;
122 case IA64_PKR:
123 case IA64_RR:
124 break;
125 default:
126 panic ("Unsupported instruction!");
127 }
129 return nvalue;
130 }
132 /*
133 * Return:
134 * Set all ignored fields in value to 0 and return
135 */
136 u64
137 cr_igfld_mask (int index, u64 value)
138 {
139 u64 nvalue;
141 nvalue = value;
143 switch ( index ) {
144 case IA64_REG_CR_IVA:
145 /* Ignore filed: 14:0 */
146 nvalue = value & (~MASK (0, 15));
147 break;
149 case IA64_REG_CR_IHA:
150 /* Ignore filed: 1:0 */
151 nvalue = value & (~MASK (0, 2));
152 break;
154 case IA64_REG_CR_LID:
155 /* Ignore filed: 63:32 */
156 nvalue = value & (~MASK (32, 32));
157 break;
159 case IA64_REG_CR_TPR:
160 /* Ignore filed: 63:17,3:0 */
161 nvalue = value & (~MASK (17, 47));
162 nvalue = nvalue & (~MASK (0, 4));
163 break;
165 case IA64_REG_CR_EOI:
166 /* Ignore filed: 63:0 */
167 nvalue = 0;
168 break;
170 case IA64_REG_CR_ITV:
171 case IA64_REG_CR_PMV:
172 case IA64_REG_CR_CMCV:
173 case IA64_REG_CR_LRR0:
174 case IA64_REG_CR_LRR1:
175 /* Ignore filed: 63:17,12:12 */
176 nvalue = value & (~MASK (17, 47));
177 nvalue = nvalue & (~MASK (12, 1));
178 break;
179 }
181 return nvalue;
182 }
185 /*
186 * Return:
187 * 1: PSR reserved fields are not zero
188 * 0: PSR reserved fields are all zero
189 */
190 int
191 check_psr_rsv_fields (u64 value)
192 {
193 /* PSR reserved fields: 0, 12~6, 16, 31~28, 63~46
194 * These reserved fields shall all be zero
195 * Otherwise we will panic
196 */
198 if ( value & MASK (0, 1) ||
199 value & MASK (6, 7) ||
200 value & MASK (16, 1) ||
201 value & MASK (28, 4) ||
202 value & MASK (46, 18)
203 ) {
204 return 1;
205 }
207 return 0;
208 }
211 #ifdef CHECK_FAULT
212 /*
213 * Return:
214 * 1: CR reserved fields are not zero
215 * 0: CR reserved fields are all zero
216 */
217 int
218 check_cr_rsv_fields (int index, u64 value)
219 {
220 switch (index) {
221 case IA64_REG_CR_DCR:
222 if ( (value & MASK ( 3, 5 )) ||
223 (value & MASK (15, 49))) {
224 return 1;
225 }
226 return 0;
228 case IA64_REG_CR_ITM:
229 case IA64_REG_CR_IVA:
230 case IA64_REG_CR_IIP:
231 case IA64_REG_CR_IFA:
232 case IA64_REG_CR_IIPA:
233 case IA64_REG_CR_IIM:
234 case IA64_REG_CR_IHA:
235 case IA64_REG_CR_EOI:
236 return 0;
238 case IA64_REG_CR_PTA:
239 if ( (value & MASK ( 1, 1 )) ||
240 (value & MASK (9, 6))) {
241 return 1;
242 }
243 return 0;
245 case IA64_REG_CR_IPSR:
246 return check_psr_rsv_fields (value);
249 case IA64_REG_CR_ISR:
250 if ( (value & MASK ( 24, 8 )) ||
251 (value & MASK (44, 20))) {
252 return 1;
253 }
254 return 0;
256 case IA64_REG_CR_ITIR:
257 if ( (value & MASK ( 0, 2 )) ||
258 (value & MASK (32, 32))) {
259 return 1;
260 }
261 return 0;
263 case IA64_REG_CR_IFS:
264 if ( (value & MASK ( 38, 25 ))) {
265 return 1;
266 }
267 return 0;
269 case IA64_REG_CR_LID:
270 if ( (value & MASK ( 0, 16 ))) {
271 return 1;
272 }
273 return 0;
275 case IA64_REG_CR_IVR:
276 if ( (value & MASK ( 8, 56 ))) {
277 return 1;
278 }
279 return 0;
281 case IA64_REG_CR_TPR:
282 if ( (value & MASK ( 8, 8 ))) {
283 return 1;
284 }
285 return 0;
287 case IA64_REG_CR_IRR0:
288 if ( (value & MASK ( 1, 1 )) ||
289 (value & MASK (3, 13))) {
290 return 1;
291 }
292 return 0;
294 case IA64_REG_CR_ITV:
295 case IA64_REG_CR_PMV:
296 case IA64_REG_CR_CMCV:
297 if ( (value & MASK ( 8, 4 )) ||
298 (value & MASK (13, 3))) {
299 return 1;
300 }
301 return 0;
303 case IA64_REG_CR_LRR0:
304 case IA64_REG_CR_LRR1:
305 if ( (value & MASK ( 11, 1 )) ||
306 (value & MASK (14, 1))) {
307 return 1;
308 }
309 return 0;
310 }
311 panic ("Unsupported CR");
312 return 0;
313 }
314 #endif
316 #if 0
317 /*
318 * Return:
319 * 0: Indirect Reg reserved fields are not zero
320 * 1: Indirect Reg reserved fields are all zero
321 */
322 int
323 check_indirect_reg_rsv_fields ( int type, int index, u64 value )
324 {
326 switch ( type ) {
327 case IA64_CPUID:
328 if ( index == 3 ) {
329 if ( value & MASK (40, 24 )) {
330 return 0;
331 }
332 } else if ( index == 4 ) {
333 if ( value & MASK (2, 62 )) {
334 return 0;
335 }
336 }
337 break;
339 case IA64_DBR:
340 case IA64_IBR:
341 case IA64_PMC:
342 case IA64_PMD:
343 break;
345 case IA64_PKR:
346 if ( value & MASK (4, 4) ||
347 value & MASK (32, 32 )) {
348 return 0;
349 }
350 break;
352 case IA64_RR:
353 if ( value & MASK (1, 1) ||
354 value & MASK (32, 32 )) {
355 return 0;
356 }
357 break;
359 default:
360 panic ("Unsupported instruction!");
361 }
363 return 1;
364 }
365 #endif
369 /* Return
370 * Same format as isr_t
371 * Only ei/ni bits are valid, all other bits are zero
372 */
373 u64
374 set_isr_ei_ni (VCPU *vcpu)
375 {
377 IA64_PSR vpsr,ipsr;
378 ISR visr;
379 REGS *regs;
381 regs=vcpu_regs(vcpu);
383 visr.val = 0;
385 vpsr.val = VCPU(vcpu, vpsr);
387 if (!vpsr.ic == 1 ) {
388 /* Set ISR.ni */
389 visr.ni = 1;
390 }
391 ipsr.val = regs->cr_ipsr;
393 visr.ei = ipsr.ri;
394 return visr.val;
395 }
398 /* Set up ISR.na/code{3:0}/r/w for no-access instructions
399 * Refer to SDM Vol Table 5-1
400 * Parameter:
401 * setr: if 1, indicates this function will set up ISR.r
402 * setw: if 1, indicates this function will set up ISR.w
403 * Return:
404 * Same format as ISR. All fields are zero, except na/code{3:0}/r/w
405 */
406 u64
407 set_isr_for_na_inst(VCPU *vcpu, int op)
408 {
409 ISR visr;
410 visr.val = 0;
411 switch (op) {
412 case IA64_INST_TPA:
413 visr.na = 1;
414 visr.code = 0;
415 break;
416 case IA64_INST_TAK:
417 visr.na = 1;
418 visr.code = 3;
419 break;
420 }
421 return visr.val;
422 }
426 /*
427 * Set up ISR for registe Nat consumption fault
428 * Parameters:
429 * read: if 1, indicates this is a read access;
430 * write: if 1, indicates this is a write access;
431 */
432 void
433 set_rnat_consumption_isr (VCPU *vcpu,int inst,int read,int write)
434 {
435 ISR visr;
436 u64 value;
437 /* Need set up ISR: code, ei, ni, na, r/w */
438 visr.val = 0;
440 /* ISR.code{7:4} =1,
441 * Set up ISR.code{3:0}, ISR.na
442 */
443 visr.code = (1 << 4);
444 if (inst) {
446 value = set_isr_for_na_inst (vcpu,inst);
447 visr.val = visr.val | value;
448 }
450 /* Set up ISR.r/w */
451 visr.r = read;
452 visr.w = write;
454 /* Set up ei/ni */
455 value = set_isr_ei_ni (vcpu);
456 visr.val = visr.val | value;
458 vcpu_set_isr (vcpu,visr.val);
459 }
463 /*
464 * Set up ISR for break fault
465 */
466 void set_break_isr (VCPU *vcpu)
467 {
468 ISR visr;
469 u64 value;
471 /* Need set up ISR: ei, ni */
473 visr.val = 0;
475 /* Set up ei/ni */
476 value = set_isr_ei_ni (vcpu);
477 visr.val = visr.val | value;
479 vcpu_set_isr(vcpu, visr.val);
480 }
487 /*
488 * Set up ISR for Priviledged Operation fault
489 */
490 void set_privileged_operation_isr (VCPU *vcpu,int inst)
491 {
492 ISR visr;
493 u64 value;
495 /* Need set up ISR: code, ei, ni, na */
497 visr.val = 0;
499 /* Set up na, code{3:0} for no-access instruction */
500 value = set_isr_for_na_inst (vcpu, inst);
501 visr.val = visr.val | value;
504 /* ISR.code{7:4} =1 */
505 visr.code = (1 << 4) | visr.code;
507 /* Set up ei/ni */
508 value = set_isr_ei_ni (vcpu);
509 visr.val = visr.val | value;
511 vcpu_set_isr (vcpu, visr.val);
512 }
517 /*
518 * Set up ISR for Priviledged Register fault
519 */
520 void set_privileged_reg_isr (VCPU *vcpu, int inst)
521 {
522 ISR visr;
523 u64 value;
525 /* Need set up ISR: code, ei, ni */
527 visr.val = 0;
529 /* ISR.code{7:4} =2 */
530 visr.code = 2 << 4;
532 /* Set up ei/ni */
533 value = set_isr_ei_ni (vcpu);
534 visr.val = visr.val | value;
536 vcpu_set_isr (vcpu, visr.val);
537 }
543 /*
544 * Set up ISR for Reserved Register/Field fault
545 */
546 void set_rsv_reg_field_isr (VCPU *vcpu)
547 {
548 ISR visr;
549 u64 value;
551 /* Need set up ISR: code, ei, ni */
553 visr.val = 0;
555 /* ISR.code{7:4} =4 */
556 visr.code = (3 << 4) | visr.code;
558 /* Set up ei/ni */
559 value = set_isr_ei_ni (vcpu);
560 visr.val = visr.val | value;
562 vcpu_set_isr (vcpu, visr.val);
563 }
567 /*
568 * Set up ISR for Illegal Operation fault
569 */
570 void set_illegal_op_isr (VCPU *vcpu)
571 {
572 ISR visr;
573 u64 value;
575 /* Need set up ISR: ei, ni */
577 visr.val = 0;
579 /* Set up ei/ni */
580 value = set_isr_ei_ni (vcpu);
581 visr.val = visr.val | value;
583 vcpu_set_isr (vcpu, visr.val);
584 }
587 void set_isr_reg_nat_consumption(VCPU *vcpu, u64 flag, u64 non_access)
588 {
589 ISR isr;
591 isr.val = 0;
592 isr.val = set_isr_ei_ni(vcpu);
593 isr.code = IA64_REG_NAT_CONSUMPTION_FAULT | flag;
594 isr.na = non_access;
595 isr.r = 1;
596 isr.w = 0;
597 vcpu_set_isr(vcpu, isr.val);
598 return;
599 }
601 void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access)
602 {
603 ISR isr;
605 isr.val = set_isr_ei_ni(vcpu);
606 isr.code = IA64_PRIV_OP_FAULT;
607 isr.na = non_access;
608 vcpu_set_isr(vcpu, isr.val);
610 return;
611 }
614 IA64FAULT check_target_register(VCPU *vcpu, u64 reg_index)
615 {
616 u64 sof;
617 REGS *regs;
618 regs=vcpu_regs(vcpu);
619 sof = regs->cr_ifs & 0x7f;
620 if(reg_index >= sof + 32)
621 return IA64_FAULT;
622 return IA64_NO_FAULT;;
623 }
626 int is_reserved_rr_register(VCPU* vcpu, int reg_index)
627 {
628 return (reg_index >= 8);
629 }
631 #define ITIR_RSV_MASK (0x3UL | (((1UL<<32)-1) << 32))
632 int is_reserved_itir_field(VCPU* vcpu, u64 itir)
633 {
634 if ( itir & ITIR_RSV_MASK ) {
635 return 1;
636 }
637 return 0;
638 }
640 static int __is_reserved_rr_field(u64 reg_value)
641 {
642 ia64_rr rr = { .rrval = reg_value };
644 if(rr.reserved0 != 0 || rr.reserved1 != 0){
645 return 1;
646 }
647 if(rr.ps < 12 || rr.ps > 28){
648 // page too big or small.
649 return 1;
650 }
651 if(rr.ps > 15 && rr.ps % 2 != 0){
652 // unsupported page size.
653 return 1;
654 }
655 return 0;
656 }
658 int is_reserved_rr_rid(VCPU* vcpu, u64 reg_value)
659 {
660 ia64_rr rr = { .rrval = reg_value };
662 if (rr.rid >= (1UL << vcpu->domain->arch.rid_bits))
663 return 1;
665 return 0;
666 }
668 int is_reserved_rr_field(VCPU* vcpu, u64 reg_value)
669 {
670 if (__is_reserved_rr_field(reg_value))
671 return 1;
673 return is_reserved_rr_rid(vcpu, reg_value);
674 }