debuggers.hg
changeset 9807:2b6e531dab38
[IA64] Merge vpsr.i with evtchn_upcall_mask tosolve one trickish bug
Per agreement on the summit, xen/ia64 will move to event channel
model same as xen/x86, under which event is the layer under pirq
(external interrupt), virq, and ipi with the latter three bound
to event ports. Within that model, no external interrupt will be
injected directly and evtchn_upcall_mask is the flag to control
whether events are deliverable.
So xenlinux needs to operate evtchn_upcall_mask at all places
where it originally operates vpsr.i. However these two flags are
presented at different shared area, and thus xenlinux can't ensure
atomical update on two flags which leaves severe stability issues.
One severe bug comes for this reason where some hypercall may be
restarted infinitely when events pending.
Actually based on description of future model, events become the
superset of external interrupts and thus evtchn_upcall_mask super-
set of vpsr.i (interrupt_delivery_enable). We can merge two flags
into one by removing the latter. By this way, we ensure correctness
and most importantly conform to common code which always assumes
upon evtchn_upcall_mask.
Signed-off-by Kevin Tian <kevin.tian@intel.com>
Per agreement on the summit, xen/ia64 will move to event channel
model same as xen/x86, under which event is the layer under pirq
(external interrupt), virq, and ipi with the latter three bound
to event ports. Within that model, no external interrupt will be
injected directly and evtchn_upcall_mask is the flag to control
whether events are deliverable.
So xenlinux needs to operate evtchn_upcall_mask at all places
where it originally operates vpsr.i. However these two flags are
presented at different shared area, and thus xenlinux can't ensure
atomical update on two flags which leaves severe stability issues.
One severe bug comes for this reason where some hypercall may be
restarted infinitely when events pending.
Actually based on description of future model, events become the
superset of external interrupts and thus evtchn_upcall_mask super-
set of vpsr.i (interrupt_delivery_enable). We can merge two flags
into one by removing the latter. By this way, we ensure correctness
and most importantly conform to common code which always assumes
upon evtchn_upcall_mask.
Signed-off-by Kevin Tian <kevin.tian@intel.com>
line diff
1.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c Wed Mar 29 12:41:33 2006 -0700 1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c Thu Mar 30 09:55:26 2006 -0700 1.3 @@ -5,6 +5,7 @@ 1.4 #include <asm/sal.h> 1.5 #include <asm/hypervisor.h> 1.6 /* #include <asm-xen/evtchn.h> */ 1.7 +#include <xen/interface/arch-ia64.h> 1.8 #include <linux/vmalloc.h> 1.9 1.10 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)0xf100000000000000;
2.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S Wed Mar 29 12:41:33 2006 -0700 2.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S Thu Mar 30 09:55:26 2006 -0700 2.3 @@ -8,6 +8,27 @@ 2.4 #include <asm/processor.h> 2.5 #include <asm/asmmacro.h> 2.6 2.7 +/* To clear vpsr.ic, vpsr.i needs to be cleared first */ 2.8 +#define XEN_CLEAR_PSR_IC \ 2.9 + mov r14=1; \ 2.10 + movl r15=XSI_PSR_I_ADDR; \ 2.11 + movl r2=XSI_PSR_IC; \ 2.12 + ;; \ 2.13 + ld8 r15=[r15]; \ 2.14 + ld4 r3=[r2]; \ 2.15 + ;; \ 2.16 + ld1 r16=[r15]; \ 2.17 + ;; \ 2.18 + st1 [r15]=r14; \ 2.19 + st4 [r2]=r0; \ 2.20 + ;; 2.21 + 2.22 +/* First restore vpsr.ic, and then vpsr.i */ 2.23 +#define XEN_RESTORE_PSR_IC \ 2.24 + st4 [r2]=r3; \ 2.25 + st1 [r15]=r16; \ 2.26 + ;; 2.27 + 2.28 GLOBAL_ENTRY(xen_get_ivr) 2.29 movl r8=running_on_xen;; 2.30 ld4 r8=[r8];; 2.31 @@ -15,15 +36,12 @@ GLOBAL_ENTRY(xen_get_ivr) 2.32 (p7) mov r8=cr.ivr;; 2.33 (p7) br.ret.sptk.many rp 2.34 ;; 2.35 - movl r9=XSI_PSR_IC 2.36 - ;; 2.37 - ld8 r10=[r9] 2.38 - ;; 2.39 - st8 [r9]=r0 2.40 + XEN_CLEAR_PSR_IC 2.41 ;; 2.42 XEN_HYPER_GET_IVR 2.43 ;; 2.44 - st8 [r9]=r10 2.45 + XEN_RESTORE_PSR_IC 2.46 + ;; 2.47 br.ret.sptk.many rp 2.48 ;; 2.49 END(xen_get_ivr) 2.50 @@ -35,15 +53,12 @@ GLOBAL_ENTRY(xen_get_tpr) 2.51 (p7) mov r8=cr.tpr;; 2.52 (p7) br.ret.sptk.many rp 2.53 ;; 2.54 - movl r9=XSI_PSR_IC 2.55 - ;; 2.56 - ld8 r10=[r9] 2.57 - ;; 2.58 - st8 [r9]=r0 2.59 + XEN_CLEAR_PSR_IC 2.60 ;; 2.61 XEN_HYPER_GET_TPR 2.62 ;; 2.63 - st8 [r9]=r10 2.64 + XEN_RESTORE_PSR_IC 2.65 + ;; 2.66 br.ret.sptk.many rp 2.67 ;; 2.68 END(xen_get_tpr) 2.69 @@ -55,16 +70,14 @@ GLOBAL_ENTRY(xen_set_tpr) 2.70 (p7) mov cr.tpr=r32;; 2.71 (p7) br.ret.sptk.many rp 2.72 ;; 2.73 - movl r9=XSI_PSR_IC 2.74 mov r8=r32 2.75 ;; 2.76 - ld8 r10=[r9] 2.77 - ;; 2.78 - st8 [r9]=r0 2.79 + XEN_CLEAR_PSR_IC 2.80 ;; 2.81 XEN_HYPER_SET_TPR 2.82 ;; 2.83 - st8 [r9]=r10 2.84 + XEN_RESTORE_PSR_IC 2.85 + ;; 2.86 br.ret.sptk.many rp 2.87 ;; 2.88 END(xen_set_tpr) 2.89 @@ -76,16 +89,14 @@ GLOBAL_ENTRY(xen_eoi) 2.90 (p7) mov cr.eoi=r0;; 2.91 (p7) br.ret.sptk.many rp 2.92 ;; 2.93 - movl r9=XSI_PSR_IC 2.94 mov r8=r32 2.95 ;; 2.96 - ld8 r10=[r9] 2.97 - ;; 2.98 - st8 [r9]=r0 2.99 + XEN_CLEAR_PSR_IC 2.100 ;; 2.101 XEN_HYPER_EOI 2.102 ;; 2.103 - st8 [r9]=r10 2.104 + XEN_RESTORE_PSR_IC 2.105 + ;; 2.106 br.ret.sptk.many rp 2.107 ;; 2.108 END(xen_eoi) 2.109 @@ -97,16 +108,13 @@ GLOBAL_ENTRY(xen_thash) 2.110 (p7) thash r8=r32;; 2.111 (p7) br.ret.sptk.many rp 2.112 ;; 2.113 - movl r9=XSI_PSR_IC 2.114 mov r8=r32 2.115 ;; 2.116 - ld8 r10=[r9] 2.117 - ;; 2.118 - st8 [r9]=r0 2.119 + XEN_CLEAR_PSR_IC 2.120 ;; 2.121 XEN_HYPER_THASH 2.122 ;; 2.123 - st8 [r9]=r10 2.124 + XEN_RESTORE_PSR_IC 2.125 ;; 2.126 br.ret.sptk.many rp 2.127 ;; 2.128 @@ -119,16 +127,13 @@ GLOBAL_ENTRY(xen_set_itm) 2.129 (p7) mov cr.itm=r32;; 2.130 (p7) br.ret.sptk.many rp 2.131 ;; 2.132 - movl r9=XSI_PSR_IC 2.133 mov r8=r32 2.134 ;; 2.135 - ld8 r10=[r9] 2.136 - ;; 2.137 - st8 [r9]=r0 2.138 + XEN_CLEAR_PSR_IC 2.139 ;; 2.140 XEN_HYPER_SET_ITM 2.141 ;; 2.142 - st8 [r9]=r10 2.143 + XEN_RESTORE_PSR_IC 2.144 ;; 2.145 br.ret.sptk.many rp 2.146 ;; 2.147 @@ -141,17 +146,14 @@ GLOBAL_ENTRY(xen_ptcga) 2.148 (p7) ptc.ga r32,r33;; 2.149 (p7) br.ret.sptk.many rp 2.150 ;; 2.151 - movl r11=XSI_PSR_IC 2.152 mov r8=r32 2.153 mov r9=r33 2.154 ;; 2.155 - ld8 r10=[r11] 2.156 - ;; 2.157 - st8 [r11]=r0 2.158 + XEN_CLEAR_PSR_IC 2.159 ;; 2.160 XEN_HYPER_PTC_GA 2.161 ;; 2.162 - st8 [r11]=r10 2.163 + XEN_RESTORE_PSR_IC 2.164 ;; 2.165 br.ret.sptk.many rp 2.166 ;; 2.167 @@ -164,16 +166,13 @@ GLOBAL_ENTRY(xen_get_rr) 2.168 (p7) mov r8=rr[r32];; 2.169 (p7) br.ret.sptk.many rp 2.170 ;; 2.171 - movl r9=XSI_PSR_IC 2.172 mov r8=r32 2.173 ;; 2.174 - ld8 r10=[r9] 2.175 - ;; 2.176 - st8 [r9]=r0 2.177 + XEN_CLEAR_PSR_IC 2.178 ;; 2.179 XEN_HYPER_GET_RR 2.180 ;; 2.181 - st8 [r9]=r10 2.182 + XEN_RESTORE_PSR_IC 2.183 ;; 2.184 br.ret.sptk.many rp 2.185 ;; 2.186 @@ -186,17 +185,14 @@ GLOBAL_ENTRY(xen_set_rr) 2.187 (p7) mov rr[r32]=r33;; 2.188 (p7) br.ret.sptk.many rp 2.189 ;; 2.190 - movl r11=XSI_PSR_IC 2.191 mov r8=r32 2.192 mov r9=r33 2.193 ;; 2.194 - ld8 r10=[r11] 2.195 - ;; 2.196 - st8 [r11]=r0 2.197 + XEN_CLEAR_PSR_IC 2.198 ;; 2.199 XEN_HYPER_SET_RR 2.200 ;; 2.201 - st8 [r11]=r10 2.202 + XEN_RESTORE_PSR_IC 2.203 ;; 2.204 br.ret.sptk.many rp 2.205 ;; 2.206 @@ -241,17 +237,14 @@ GLOBAL_ENTRY(xen_set_kr) 2.207 (p7) mov ar7=r9 2.208 (p7) br.ret.sptk.many rp;; 2.209 2.210 -1: movl r11=XSI_PSR_IC 2.211 - mov r8=r32 2.212 +1: mov r8=r32 2.213 mov r9=r33 2.214 ;; 2.215 - ld8 r10=[r11] 2.216 - ;; 2.217 - st8 [r11]=r0 2.218 + XEN_CLEAR_PSR_IC 2.219 ;; 2.220 XEN_HYPER_SET_KR 2.221 ;; 2.222 - st8 [r11]=r10 2.223 + XEN_RESTORE_PSR_IC 2.224 ;; 2.225 br.ret.sptk.many rp 2.226 END(xen_set_rr) 2.227 @@ -263,16 +256,13 @@ GLOBAL_ENTRY(xen_fc) 2.228 (p7) fc r32;; 2.229 (p7) br.ret.sptk.many rp 2.230 ;; 2.231 - movl r9=XSI_PSR_IC 2.232 mov r8=r32 2.233 ;; 2.234 - ld8 r10=[r9] 2.235 - ;; 2.236 - st8 [r9]=r0 2.237 + XEN_CLEAR_PSR_IC 2.238 ;; 2.239 XEN_HYPER_FC 2.240 ;; 2.241 - st8 [r9]=r10 2.242 + XEN_RESTORE_PSR_IC 2.243 ;; 2.244 br.ret.sptk.many rp 2.245 END(xen_fc) 2.246 @@ -284,16 +274,13 @@ GLOBAL_ENTRY(xen_get_cpuid) 2.247 (p7) mov r8=cpuid[r32];; 2.248 (p7) br.ret.sptk.many rp 2.249 ;; 2.250 - movl r9=XSI_PSR_IC 2.251 mov r8=r32 2.252 ;; 2.253 - ld8 r10=[r9] 2.254 - ;; 2.255 - st8 [r9]=r0 2.256 + XEN_CLEAR_PSR_IC 2.257 ;; 2.258 XEN_HYPER_GET_CPUID 2.259 ;; 2.260 - st8 [r9]=r10 2.261 + XEN_RESTORE_PSR_IC 2.262 ;; 2.263 br.ret.sptk.many rp 2.264 END(xen_get_cpuid) 2.265 @@ -305,16 +292,13 @@ GLOBAL_ENTRY(xen_get_pmd) 2.266 (p7) mov r8=pmd[r32];; 2.267 (p7) br.ret.sptk.many rp 2.268 ;; 2.269 - movl r9=XSI_PSR_IC 2.270 mov r8=r32 2.271 ;; 2.272 - ld8 r10=[r9] 2.273 - ;; 2.274 - st8 [r9]=r0 2.275 + XEN_CLEAR_PSR_IC 2.276 ;; 2.277 XEN_HYPER_GET_PMD 2.278 ;; 2.279 - st8 [r9]=r10 2.280 + XEN_RESTORE_PSR_IC 2.281 ;; 2.282 br.ret.sptk.many rp 2.283 END(xen_get_pmd) 2.284 @@ -327,16 +311,13 @@ GLOBAL_ENTRY(xen_get_eflag) 2.285 (p7) mov r8=ar24;; 2.286 (p7) br.ret.sptk.many rp 2.287 ;; 2.288 - movl r9=XSI_PSR_IC 2.289 mov r8=r32 2.290 ;; 2.291 - ld8 r10=[r9] 2.292 - ;; 2.293 - st8 [r9]=r0 2.294 + XEN_CLEAR_PSR_IC 2.295 ;; 2.296 XEN_HYPER_GET_EFLAG 2.297 ;; 2.298 - st8 [r9]=r10 2.299 + XEN_RESTORE_PSR_IC 2.300 ;; 2.301 br.ret.sptk.many rp 2.302 END(xen_get_eflag) 2.303 @@ -349,16 +330,13 @@ GLOBAL_ENTRY(xen_set_eflag) 2.304 (p7) mov ar24=r32 2.305 (p7) br.ret.sptk.many rp 2.306 ;; 2.307 - movl r9=XSI_PSR_IC 2.308 mov r8=r32 2.309 ;; 2.310 - ld8 r10=[r9] 2.311 - ;; 2.312 - st8 [r9]=r0 2.313 + XEN_CLEAR_PSR_IC 2.314 ;; 2.315 XEN_HYPER_SET_EFLAG 2.316 ;; 2.317 - st8 [r9]=r10 2.318 + XEN_RESTORE_PSR_IC 2.319 ;; 2.320 br.ret.sptk.many rp 2.321 END(xen_set_eflag)
3.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S Wed Mar 29 12:41:33 2006 -0700 3.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S Thu Mar 30 09:55:26 2006 -0700 3.3 @@ -312,9 +312,12 @@ ENTRY(ia64_leave_syscall) 3.4 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 3.5 #else /* !CONFIG_PREEMPT */ 3.6 #ifdef CONFIG_XEN 3.7 - movl r2=XSI_PSR_I 3.8 + movl r2=XSI_PSR_I_ADDR 3.9 + mov r18=1 3.10 ;; 3.11 -(pUStk) st4 [r2]=r0 3.12 + ld8 r2=[r2] 3.13 + ;; 3.14 +(pUStk) st1 [r2]=r18 3.15 #else 3.16 (pUStk) rsm psr.i 3.17 #endif 3.18 @@ -345,9 +348,14 @@ ENTRY(ia64_leave_syscall) 3.19 ;; 3.20 invala // M0|1 invalidate ALAT 3.21 #ifdef CONFIG_XEN 3.22 + movl r28=XSI_PSR_I_ADDR 3.23 movl r29=XSI_PSR_IC 3.24 ;; 3.25 - st8 [r29]=r0 // note: clears both vpsr.i and vpsr.ic! 3.26 + ld8 r28=[r28] 3.27 + mov r30=1 3.28 + ;; 3.29 + st1 [r28]=r30 3.30 + st4 [r29]=r0 // note: clears both vpsr.i and vpsr.ic! 3.31 ;; 3.32 #else 3.33 rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection 3.34 @@ -441,9 +449,12 @@ GLOBAL_ENTRY(ia64_leave_kernel) 3.35 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 3.36 #else 3.37 #ifdef CONFIG_XEN 3.38 -(pUStk) movl r17=XSI_PSR_I 3.39 - ;; 3.40 -(pUStk) st4 [r17]=r0 3.41 +(pUStk) movl r17=XSI_PSR_I_ADDR 3.42 +(pUStk) mov r31=1 3.43 + ;; 3.44 +(pUStk) ld8 r17=[r17] 3.45 + ;; 3.46 +(pUStk) st1 [r17]=r31 3.47 ;; 3.48 #else 3.49 (pUStk) rsm psr.i 3.50 @@ -496,9 +507,14 @@ GLOBAL_ENTRY(ia64_leave_kernel) 3.51 mov ar.ssd=r31 3.52 ;; 3.53 #ifdef CONFIG_XEN 3.54 + movl r23=XSI_PSR_I_ADDR 3.55 movl r22=XSI_PSR_IC 3.56 ;; 3.57 - st8 [r22]=r0 // note: clears both vpsr.i and vpsr.ic! 3.58 + ld8 r23=[r23] 3.59 + mov r25=1 3.60 + ;; 3.61 + st1 [r23]=r25 3.62 + st4 [r22]=r0 // note: clears both vpsr.i and vpsr.ic! 3.63 ;; 3.64 #else 3.65 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection 3.66 @@ -803,9 +819,12 @@ skip_rbs_switch: 3.67 br.call.spnt.many rp=schedule 3.68 .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 3.69 #ifdef CONFIG_XEN 3.70 - movl r2=XSI_PSR_I 3.71 + movl r2=XSI_PSR_I_ADDR 3.72 + mov r20=1 3.73 ;; 3.74 - st4 [r2]=r0 3.75 + ld8 r2=[r2] 3.76 + ;; 3.77 + st1 [r2]=r20 3.78 #else 3.79 rsm psr.i // disable interrupts 3.80 #endif
4.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S Wed Mar 29 12:41:33 2006 -0700 4.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S Thu Mar 30 09:55:26 2006 -0700 4.3 @@ -683,9 +683,11 @@ ENTRY(dkey_miss) 4.4 // Leaving this code inline above results in an IVT section overflow 4.5 // There is no particular reason for this code to be here... 4.6 xen_page_fault: 4.7 -(p15) movl r3=XSI_PSR_I 4.8 +(p15) movl r3=XSI_PSR_I_ADDR 4.9 ;; 4.10 -(p15) st4 [r3]=r14,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1 4.11 +(p15) ld8 r3=[r3] 4.12 + ;; 4.13 +(p15) st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR // if (p15) vpsr.i = 1 4.14 mov r14=r0 4.15 ;; 4.16 (p15) ld4 r14=[r3] // if (pending_interrupts) 4.17 @@ -1043,9 +1045,11 @@ ENTRY(break_fault) 4.18 mov r16=1 4.19 ;; 4.20 #if 1 4.21 - st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC // vpsr.ic = 1 4.22 + st4 [r3]=r16,XSI_PSR_I_ADDR-XSI_PSR_IC // vpsr.ic = 1 4.23 ;; 4.24 -(p15) st4 [r3]=r16,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1 4.25 +(p15) ld8 r3=[r3] 4.26 + ;; 4.27 +(p15) st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR // if (p15) vpsr.i = 1 4.28 mov r16=r0 4.29 ;; 4.30 (p15) ld4 r16=[r3] // if (pending_interrupts) 4.31 @@ -1055,10 +1059,12 @@ ENTRY(break_fault) 4.32 (p6) ssm psr.i // do a real ssm psr.i 4.33 ;; 4.34 #else 4.35 -// st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC // vpsr.ic = 1 4.36 - adds r3=XSI_PSR_I-XSI_PSR_IC,r3 // SKIP vpsr.ic = 1 4.37 +// st4 [r3]=r16,XSI_PSR_I_ADDR-XSI_PSR_IC // vpsr.ic = 1 4.38 + adds r3=XSI_PSR_I_ADDR-XSI_PSR_IC,r3 // SKIP vpsr.ic = 1 4.39 ;; 4.40 -(p15) st4 [r3]=r16,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1 4.41 +(p15) ld8 r3=[r3] 4.42 + ;; 4.43 +(p15) st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR // if (p15) vpsr.i = 1 4.44 mov r16=r0 4.45 ;; 4.46 (p15) ld4 r16=[r3] // if (pending_interrupts)
5.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S Wed Mar 29 12:41:33 2006 -0700 5.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S Thu Mar 30 09:55:26 2006 -0700 5.3 @@ -43,11 +43,14 @@ 1: { 5.4 // from the idle loop so confuses privop counting 5.5 movl r31=XSI_PSR_IC 5.6 ;; 5.7 -(p6) st8 [r31]=r0 5.8 +(p6) st4 [r31]=r0 5.9 ;; 5.10 -(p7) adds r31=XSI_PSR_I-XSI_PSR_IC,r31 5.11 +(p7) adds r31=XSI_PSR_I_ADDR-XSI_PSR_IC,r31 5.12 +(p7) mov r22=1 5.13 ;; 5.14 -(p7) st4 [r31]=r0 5.15 +(p7) ld8 r31=[r31] 5.16 + ;; 5.17 +(p7) st1 [r31]=r22 5.18 ;; 5.19 mov r31 = in3 5.20 mov b7 = loc2
6.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h Wed Mar 29 12:41:33 2006 -0700 6.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h Thu Mar 30 09:55:26 2006 -0700 6.3 @@ -87,9 +87,14 @@ extern void xen_set_eflag(unsigned long) 6.4 * Others, like "pend", are abstractions based on privileged registers. 6.5 * "Pend" is guaranteed to be set if reading cr.ivr would return a 6.6 * (non-spurious) interrupt. */ 6.7 -#define xen_get_virtual_psr_i() (*(int *)(XSI_PSR_I)) 6.8 -#define xen_set_virtual_psr_i(_val) ({ *(int *)(XSI_PSR_I) = _val ? 1:0; }) 6.9 -#define xen_set_virtual_psr_ic(_val) ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; }) 6.10 +#define XSI_PSR_I \ 6.11 + (*(uint64_t *)(XSI_PSR_I_ADDR)) 6.12 +#define xen_get_virtual_psr_i() \ 6.13 + (!(*(uint8_t *)(XSI_PSR_I))) 6.14 +#define xen_set_virtual_psr_i(_val) \ 6.15 + ({ *(uint8_t *)(XSI_PSR_I) = (uint8_t)(_val) ? 0:1; }) 6.16 +#define xen_set_virtual_psr_ic(_val) \ 6.17 + ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; }) 6.18 #define xen_get_virtual_pend() (*(int *)(XSI_PEND)) 6.19 6.20 /* Hyperprivops are "break" instructions with a well-defined API.
7.1 --- a/xen/arch/ia64/asm-xsi-offsets.c Wed Mar 29 12:41:33 2006 -0700 7.2 +++ b/xen/arch/ia64/asm-xsi-offsets.c Thu Mar 30 09:55:26 2006 -0700 7.3 @@ -50,8 +50,8 @@ void foo(void) 7.4 /* First is shared info page, and then arch specific vcpu context */ 7.5 DEFINE(XSI_BASE, SHAREDINFO_ADDR); 7.6 7.7 - DEFINE(XSI_PSR_I_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_delivery_enabled))); 7.8 - DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_delivery_enabled))); 7.9 + DEFINE(XSI_PSR_I_ADDR_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_mask_addr))); 7.10 + DEFINE(XSI_PSR_I_ADDR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_mask_addr))); 7.11 DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr))); 7.12 DEFINE(XSI_IPSR_OFS, (XSI_OFS + offsetof(mapped_regs_t, ipsr))); 7.13 DEFINE(XSI_IIP_OFS, (XSI_OFS + offsetof(mapped_regs_t, iip))); 7.14 @@ -104,5 +104,4 @@ void foo(void) 7.15 DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pkrs[0]))); 7.16 DEFINE(XSI_TMP0_OFS, (XSI_OFS + offsetof(mapped_regs_t, tmp[0]))); 7.17 DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tmp[0]))); 7.18 - 7.19 }
8.1 --- a/xen/arch/ia64/xen/domain.c Wed Mar 29 12:41:33 2006 -0700 8.2 +++ b/xen/arch/ia64/xen/domain.c Thu Mar 30 09:55:26 2006 -0700 8.3 @@ -485,6 +485,8 @@ void new_thread(struct vcpu *v, 8.4 regs->ar_rsc |= (2 << 2); /* force PL2/3 */ 8.5 VCPU(v, banknum) = 1; 8.6 VCPU(v, metaphysical_mode) = 1; 8.7 + VCPU(v, interrupt_mask_addr) = 8.8 + (uint64_t)SHAREDINFO_ADDR + INT_ENABLE_OFFSET(v); 8.9 } 8.10 } 8.11
9.1 --- a/xen/arch/ia64/xen/hyperprivop.S Wed Mar 29 12:41:33 2006 -0700 9.2 +++ b/xen/arch/ia64/xen/hyperprivop.S Thu Mar 30 09:55:26 2006 -0700 9.3 @@ -87,7 +87,7 @@ 9.4 // r16 == cr.isr 9.5 // r17 == cr.iim 9.6 // r18 == XSI_PSR_IC_OFS 9.7 -// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits) 9.8 +// r19 == vpsr.ic 9.9 // r31 == pr 9.10 GLOBAL_ENTRY(fast_hyperprivop) 9.11 #ifndef FAST_HYPERPRIVOPS // see beginning of file 9.12 @@ -223,7 +223,7 @@ 1: // when we get to here r20=~=interrup 9.13 // r16 == cr.isr 9.14 // r17 == cr.iim 9.15 // r18 == XSI_PSR_IC 9.16 -// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits) 9.17 +// r19 == vpsr.ic 9.18 // r31 == pr 9.19 ENTRY(hyper_ssm_i) 9.20 #ifndef FAST_SSM_I 9.21 @@ -278,11 +278,15 @@ ENTRY(hyper_ssm_i) 9.22 movl r27=~(IA64_PSR_BE|IA64_PSR_BN);; 9.23 or r30=r30,r28;; 9.24 and r30=r30,r27;; 9.25 + mov r20=1 9.26 + adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 9.27 adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;; 9.28 + ld8 r22=[r22] 9.29 st8 [r21]=r30 ;; 9.30 // set shared_mem interrupt_delivery_enabled to 0 9.31 // set shared_mem interrupt_collection_enabled to 0 9.32 - st8 [r18]=r0;; 9.33 + st1 [r22]=r20;; 9.34 + st4 [r18]=r0;; 9.35 // cover and set shared_mem precover_ifs to cr.ifs 9.36 // set shared_mem ifs and incomplete_regframe to 0 9.37 cover ;; 9.38 @@ -405,9 +409,10 @@ GLOBAL_ENTRY(fast_tick_reflect) 9.39 cmp.eq p6,p0=r16,r0;; 9.40 (p6) br.cond.spnt.few fast_tick_reflect_done;; 9.41 // if guest vpsr.i is off, we're done 9.42 - adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;; 9.43 - ld4 r21=[r21];; 9.44 - cmp.eq p6,p0=r21,r0 9.45 + adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;; 9.46 + ld8 r21=[r21];; 9.47 + ld1 r21=[r21];; 9.48 + cmp.eq p0,p6=r21,r0 9.49 (p6) br.cond.spnt.few fast_tick_reflect_done;; 9.50 9.51 // OK, we have a clock tick to deliver to the active domain! 9.52 @@ -445,17 +450,22 @@ GLOBAL_ENTRY(fast_tick_reflect) 9.53 dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;; 9.54 or r17=r17,r28;; 9.55 and r17=r17,r27;; 9.56 - ld4 r16=[r18],4;; 9.57 + ld4 r16=[r18],XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS;; 9.58 cmp.ne p6,p0=r16,r0;; 9.59 + ld8 r16=[r18],XSI_PSR_IC_OFS-XSI_PSR_I_ADDR_OFS 9.60 (p6) dep r17=-1,r17,IA64_PSR_IC_BIT,1 ;; 9.61 - ld4 r16=[r18],-4;; 9.62 - cmp.ne p6,p0=r16,r0;; 9.63 + ld1 r16=[r16];; 9.64 + cmp.eq p6,p0=r16,r0;; 9.65 (p6) dep r17=-1,r17,IA64_PSR_I_BIT,1 ;; 9.66 + mov r20=1 9.67 + adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 9.68 adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;; 9.69 + ld8 r22=[r22] 9.70 st8 [r21]=r17 ;; 9.71 // set shared_mem interrupt_delivery_enabled to 0 9.72 // set shared_mem interrupt_collection_enabled to 0 9.73 - st8 [r18]=r0;; 9.74 + st1 [r22]=r20;; 9.75 + st4 [r18]=r0;; 9.76 // cover and set shared_mem precover_ifs to cr.ifs 9.77 // set shared_mem ifs and incomplete_regframe to 0 9.78 cover ;; 9.79 @@ -530,7 +540,7 @@ END(fast_tick_reflect) 9.80 // r16 == cr.isr 9.81 // r17 == cr.iim 9.82 // r18 == XSI_PSR_IC 9.83 -// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits) 9.84 +// r19 == vpsr.ic 9.85 // r31 == pr 9.86 GLOBAL_ENTRY(fast_break_reflect) 9.87 #ifndef FAST_BREAK // see beginning of file 9.88 @@ -594,12 +604,13 @@ ENTRY(fast_reflect) 9.89 #endif 9.90 // save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!) 9.91 adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;; 9.92 - st8 [r21]=r29;; 9.93 + st8 [r21]=r29,XSI_ISR_OFS-XSI_IIP_OFS;; 9.94 // set shared_mem isr 9.95 - adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 9.96 st8 [r21]=r16 ;; 9.97 // set cr.ipsr 9.98 + adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 9.99 mov r29=r30 ;; 9.100 + ld8 r21=[r21] 9.101 movl r28=DELIVER_PSR_SET;; 9.102 movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);; 9.103 or r29=r29,r28;; 9.104 @@ -616,19 +627,22 @@ ENTRY(fast_reflect) 9.105 or r30=r30,r28;; 9.106 and r30=r30,r27;; 9.107 // also set shared_mem ipsr.i and ipsr.ic appropriately 9.108 - ld8 r24=[r18];; 9.109 - extr.u r22=r24,32,32 9.110 + ld1 r22=[r21] 9.111 + ld4 r24=[r18];; 9.112 cmp4.eq p6,p7=r24,r0;; 9.113 (p6) dep r30=0,r30,IA64_PSR_IC_BIT,1 9.114 (p7) dep r30=-1,r30,IA64_PSR_IC_BIT,1 ;; 9.115 - cmp4.eq p6,p7=r22,r0;; 9.116 + mov r24=r21 9.117 + cmp.ne p6,p7=r22,r0;; 9.118 (p6) dep r30=0,r30,IA64_PSR_I_BIT,1 9.119 (p7) dep r30=-1,r30,IA64_PSR_I_BIT,1 ;; 9.120 + mov r22=1 9.121 adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;; 9.122 st8 [r21]=r30 ;; 9.123 // set shared_mem interrupt_delivery_enabled to 0 9.124 // set shared_mem interrupt_collection_enabled to 0 9.125 - st8 [r18]=r0;; 9.126 + st1 [r24]=r22 9.127 + st4 [r18]=r0;; 9.128 // cover and set shared_mem precover_ifs to cr.ifs 9.129 // set shared_mem ifs and incomplete_regframe to 0 9.130 cover ;; 9.131 @@ -639,8 +653,6 @@ ENTRY(fast_reflect) 9.132 st8 [r21]=r0 ;; 9.133 adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;; 9.134 st8 [r21]=r24 ;; 9.135 - // vpsr.i = vpsr.ic = 0 on delivery of interruption 9.136 - st8 [r18]=r0;; 9.137 // FIXME: need to save iipa and isr to be arch-compliant 9.138 // set iip to go to domain IVA break instruction vector 9.139 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;; 9.140 @@ -723,7 +735,7 @@ GLOBAL_ENTRY(fast_access_reflect) 9.141 cmp.eq p7,p0=r21,r0 9.142 (p7) br.spnt.few dispatch_reflection ;; 9.143 movl r18=XSI_PSR_IC;; 9.144 - ld8 r21=[r18];; 9.145 + ld4 r21=[r18];; 9.146 cmp.eq p7,p0=r0,r21 9.147 (p7) br.spnt.few dispatch_reflection ;; 9.148 // set shared_mem ifa, FIXME: should we validate it? 9.149 @@ -1062,17 +1074,20 @@ just_do_rfi: 9.150 dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set 9.151 mov cr.ifs=r20 ;; 9.152 // ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3; 9.153 + adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 9.154 dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;; 9.155 // vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic 9.156 - mov r19=r0 ;; 9.157 + ld8 r20=[r20] 9.158 + mov r19=1 9.159 extr.u r23=r21,IA64_PSR_I_BIT,1 ;; 9.160 cmp.ne p7,p6=r23,r0 ;; 9.161 // not done yet 9.162 -(p7) dep r19=-1,r19,32,1 9.163 +(p7) st1 [r20]=r0 9.164 +(p6) st1 [r20]=r19;; 9.165 extr.u r23=r21,IA64_PSR_IC_BIT,1 ;; 9.166 cmp.ne p7,p6=r23,r0 ;; 9.167 -(p7) dep r19=-1,r19,0,1 ;; 9.168 - st8 [r18]=r19 ;; 9.169 +(p7) st4 [r18]=r19;; 9.170 +(p6) st4 [r18]=r0;; 9.171 // force on psr.ic, i, dt, rt, it, bn 9.172 movl r20=(IA64_PSR_I|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT|IA64_PSR_BN) 9.173 ;; 9.174 @@ -1209,10 +1224,12 @@ GLOBAL_ENTRY(rfi_with_interrupt) 9.175 extr.u r20=r21,41,2 ;; // get v(!)psr.ri 9.176 dep r16=r20,r16,41,2 ;; // deposit cr.isr.ei 9.177 adds r22=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 9.178 - st8 [r22]=r16 ;; 9.179 + st8 [r22]=r16,XSI_PSR_I_ADDR_OFS-XSI_ISR_OFS ;; 9.180 // set cr.ipsr (make sure cpl==2!) 9.181 mov r29=r17 ;; 9.182 movl r28=DELIVER_PSR_SET;; 9.183 + mov r20=1 9.184 + ld8 r22=[r22] 9.185 movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);; 9.186 or r29=r29,r28;; 9.187 and r29=r29,r27;; 9.188 @@ -1220,7 +1237,8 @@ GLOBAL_ENTRY(rfi_with_interrupt) 9.189 // v.ipsr and v.iip are already set (and v.iip validated) as rfi target 9.190 // set shared_mem interrupt_delivery_enabled to 0 9.191 // set shared_mem interrupt_collection_enabled to 0 9.192 - st8 [r18]=r0;; 9.193 + st1 [r22]=r20 9.194 + st4 [r18]=r0;; 9.195 // cover and set shared_mem precover_ifs to cr.ifs 9.196 // set shared_mem ifs and incomplete_regframe to 0 9.197 #if 0
10.1 --- a/xen/arch/ia64/xen/ivt.S Wed Mar 29 12:41:33 2006 -0700 10.2 +++ b/xen/arch/ia64/xen/ivt.S Thu Mar 30 09:55:26 2006 -0700 10.3 @@ -930,7 +930,7 @@ ENTRY(break_fault) 10.4 #endif 10.5 movl r18=XSI_PSR_IC 10.6 ;; 10.7 - ld8 r19=[r18] 10.8 + ld4 r19=[r18] 10.9 ;; 10.10 cmp.eq p7,p0=r0,r17 // is this a psuedo-cover? 10.11 (p7) br.spnt.many dispatch_privop_fault
11.1 --- a/xen/arch/ia64/xen/process.c Wed Mar 29 12:41:33 2006 -0700 11.2 +++ b/xen/arch/ia64/xen/process.c Thu Mar 30 09:55:26 2006 -0700 11.3 @@ -206,9 +206,9 @@ void reflect_interruption(unsigned long 11.4 #ifdef CONFIG_SMP 11.5 #warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu" 11.6 #endif 11.7 - regs->r31 = (unsigned long) &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr); 11.8 + regs->r31 = XSI_IPSR; 11.9 11.10 - PSCB(v,interrupt_delivery_enabled) = 0; 11.11 + v->vcpu_info->evtchn_upcall_mask = 1; 11.12 PSCB(v,interrupt_collection_enabled) = 0; 11.13 11.14 inc_slow_reflect_count(vector);
12.1 --- a/xen/arch/ia64/xen/vcpu.c Wed Mar 29 12:41:33 2006 -0700 12.2 +++ b/xen/arch/ia64/xen/vcpu.c Thu Mar 30 09:55:26 2006 -0700 12.3 @@ -197,7 +197,8 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 12.4 ipsr = (struct ia64_psr *)®s->cr_ipsr; 12.5 imm = *(struct ia64_psr *)&imm24; 12.6 // interrupt flag 12.7 - if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0; 12.8 + if (imm.i) 12.9 + vcpu->vcpu_info->evtchn_upcall_mask = 1; 12.10 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0; 12.11 // interrupt collection flag 12.12 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0; 12.13 @@ -232,7 +233,7 @@ IA64FAULT vcpu_set_psr_dt(VCPU *vcpu) 12.14 12.15 IA64FAULT vcpu_set_psr_i(VCPU *vcpu) 12.16 { 12.17 - PSCB(vcpu,interrupt_delivery_enabled) = 1; 12.18 + vcpu->vcpu_info->evtchn_upcall_mask = 0; 12.19 PSCB(vcpu,interrupt_collection_enabled) = 1; 12.20 return IA64_NO_FAULT; 12.21 } 12.22 @@ -261,11 +262,11 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI 12.23 } 12.24 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; } 12.25 if (imm.i) { 12.26 - if (!PSCB(vcpu,interrupt_delivery_enabled)) { 12.27 + if (vcpu->vcpu_info->evtchn_upcall_mask) { 12.28 //printf("vcpu_set_psr_sm: psr.ic 0->1 "); 12.29 enabling_interrupts = 1; 12.30 } 12.31 - PSCB(vcpu,interrupt_delivery_enabled) = 1; 12.32 + vcpu->vcpu_info->evtchn_upcall_mask = 0; 12.33 } 12.34 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1; 12.35 // TODO: do this faster 12.36 @@ -312,9 +313,9 @@ IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UIN 12.37 if (newpsr.up) { ipsr->up = 1; psr.up = 1; } 12.38 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; } 12.39 if (newpsr.i) { 12.40 - if (!PSCB(vcpu,interrupt_delivery_enabled)) 12.41 + if (vcpu->vcpu_info->evtchn_upcall_mask) 12.42 enabling_interrupts = 1; 12.43 - PSCB(vcpu,interrupt_delivery_enabled) = 1; 12.44 + vcpu->vcpu_info->evtchn_upcall_mask = 0; 12.45 } 12.46 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1; 12.47 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; } 12.48 @@ -340,7 +341,7 @@ IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT6 12.49 12.50 newpsr = *(struct ia64_psr *)®s->cr_ipsr; 12.51 if (newpsr.cpl == 2) newpsr.cpl = 0; 12.52 - if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1; 12.53 + if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1; 12.54 else newpsr.i = 0; 12.55 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1; 12.56 else newpsr.ic = 0; 12.57 @@ -360,7 +361,7 @@ BOOLEAN vcpu_get_psr_ic(VCPU *vcpu) 12.58 12.59 BOOLEAN vcpu_get_psr_i(VCPU *vcpu) 12.60 { 12.61 - return !!PSCB(vcpu,interrupt_delivery_enabled); 12.62 + return !vcpu->vcpu_info->evtchn_upcall_mask; 12.63 } 12.64 12.65 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr) 12.66 @@ -373,7 +374,7 @@ UINT64 vcpu_get_ipsr_int_state(VCPU *vcp 12.67 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1; 12.68 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1; 12.69 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled); 12.70 - psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled); 12.71 + psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask; 12.72 psr.ia64_psr.bn = PSCB(vcpu,banknum); 12.73 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1; 12.74 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain 12.75 @@ -931,7 +932,7 @@ IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT6 12.76 bits &= ~(1L << bitnum); 12.77 *p = bits; 12.78 /* clearing an eoi bit may unmask another pending interrupt... */ 12.79 - if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled... 12.80 + if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled... 12.81 // worry about this later... Linux only calls eoi 12.82 // with interrupts disabled 12.83 printf("Trying to EOI interrupt with interrupts enabled\n"); 12.84 @@ -1186,7 +1187,6 @@ IA64FAULT vcpu_rfi(VCPU *vcpu) 12.85 12.86 psr.i64 = PSCB(vcpu,ipsr); 12.87 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2; 12.88 - if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1; 12.89 int_enable = psr.ia64_psr.i; 12.90 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1; 12.91 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE); 12.92 @@ -1218,7 +1218,7 @@ IA64FAULT vcpu_rfi(VCPU *vcpu) 12.93 } 12.94 PSCB(vcpu,interrupt_collection_enabled) = 1; 12.95 vcpu_bsw1(vcpu); 12.96 - PSCB(vcpu,interrupt_delivery_enabled) = int_enable; 12.97 + vcpu->vcpu_info->evtchn_upcall_mask = !int_enable; 12.98 return (IA64_NO_FAULT); 12.99 } 12.100
13.1 --- a/xen/arch/ia64/xen/xentime.c Wed Mar 29 12:41:33 2006 -0700 13.2 +++ b/xen/arch/ia64/xen/xentime.c Thu Mar 30 09:55:26 2006 -0700 13.3 @@ -111,7 +111,7 @@ xen_timer_interrupt (int irq, void *dev_ 13.4 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) { 13.5 printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */ 13.6 regs->cr_iip /*, 13.7 - VCPU(current,interrupt_delivery_enabled), 13.8 + !current->vcpu_info->evtchn_upcall_mask, 13.9 VCPU(current,pending_interruption) */); 13.10 count = 0; 13.11 }
14.1 --- a/xen/include/asm-ia64/domain.h Wed Mar 29 12:41:33 2006 -0700 14.2 +++ b/xen/include/asm-ia64/domain.h Thu Mar 30 09:55:26 2006 -0700 14.3 @@ -39,6 +39,9 @@ struct arch_domain { 14.4 #define xen_vastart arch.xen_vastart 14.5 #define xen_vaend arch.xen_vaend 14.6 #define shared_info_va arch.shared_info_va 14.7 +#define INT_ENABLE_OFFSET(v) \ 14.8 + (sizeof(vcpu_info_t) * (v)->vcpu_id + \ 14.9 + offsetof(vcpu_info_t, evtchn_upcall_mask)) 14.10 14.11 struct arch_vcpu { 14.12 #if 1
15.1 --- a/xen/include/public/arch-ia64.h Wed Mar 29 12:41:33 2006 -0700 15.2 +++ b/xen/include/public/arch-ia64.h Thu Mar 30 09:55:26 2006 -0700 15.3 @@ -268,7 +268,11 @@ typedef struct { 15.4 unsigned long precover_ifs; 15.5 unsigned long unat; // not sure if this is needed until NaT arch is done 15.6 int interrupt_collection_enabled; // virtual psr.ic 15.7 - int interrupt_delivery_enabled; // virtual psr.i 15.8 + /* virtual interrupt deliverable flag is evtchn_upcall_mask in 15.9 + * shared info area now. interrupt_mask_addr is the address 15.10 + * of evtchn_upcall_mask for current vcpu 15.11 + */ 15.12 + unsigned long interrupt_mask_addr; 15.13 int pending_interruption; 15.14 int incomplete_regframe; // see SDM vol2 6.8 15.15 unsigned long reserved5_1[4];