debuggers.hg

view xen/arch/ia64/vmx/optvfault.S @ 16670:2900e4dacaa7

[IA64] xenoprof: don't modify mPSR.pp. VTi case

Don't modify mPSR.pp for xenoprof. VTi domain case
xenoprof manages mPSR.pp so that mPSR.pp shouldn't be modified.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Mon Dec 17 09:56:12 2007 -0700 (2007-12-17)
parents 9152cf7f5b82
children 4f1f9ee50133
line source
1 /*
2 * arch/ia64/vmx/optvfault.S
3 * optimize virtualization fault handler
4 *
5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 */
9 #include <linux/config.h>
10 #include <asm/config.h>
11 #include <asm/pgtable.h>
12 #include <asm/asmmacro.h>
13 #include <asm/kregs.h>
14 #include <asm/offsets.h>
15 #include <asm/percpu.h>
16 #include <asm/processor.h>
17 #include <asm/vmx_vpd.h>
18 #include <asm/vmx_pal_vsa.h>
19 #include <asm/asm-offsets.h>
20 #include <asm-ia64/vmx_mm_def.h>
21 #include <asm-ia64/vmx_phy_mode.h>
23 #define ACCE_MOV_FROM_AR
24 #define ACCE_MOV_FROM_RR
25 #define ACCE_MOV_TO_RR
26 #define ACCE_RSM
27 #define ACCE_SSM
28 #define ACCE_MOV_TO_PSR
29 #define ACCE_THASH
31 // Inputs are: r21 (= current), r24 (= cause), r25 (= insn), r31 (=saved pr)
34 //mov r1=ar3 (only itc is virtualized)
35 GLOBAL_ENTRY(vmx_asm_mov_from_ar)
36 #ifndef ACCE_MOV_FROM_AR
37 br.many vmx_virtualization_fault_back
38 #endif
39 add r18=VCPU_VTM_OFFSET_OFS,r21
40 add r16=VCPU_VTM_LAST_ITC_OFS,r21
41 extr.u r17=r25,6,7
42 ;;
43 ld8 r18=[r18]
44 mov r19=ar.itc
45 mov r24=b0
46 ;;
47 ld8 r16=[r16]
48 add r19=r19,r18
49 movl r20=asm_mov_to_reg
50 ;;
51 adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20
52 shladd r17=r17,4,r20
53 cmp.gtu p6,p0=r16,r19
54 ;;
55 (p6) mov r19=r16
56 mov b0=r17
57 br.sptk.few b0
58 ;;
59 END(vmx_asm_mov_from_ar)
62 // mov r1=rr[r3]
63 GLOBAL_ENTRY(vmx_asm_mov_from_rr)
64 #ifndef ACCE_MOV_FROM_RR
65 br.many vmx_virtualization_fault_back
66 #endif
67 extr.u r16=r25,20,7
68 extr.u r17=r25,6,7
69 movl r20=asm_mov_from_reg
70 ;;
71 adds r30=vmx_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
72 shladd r16=r16,4,r20
73 mov r24=b0
74 ;;
75 add r27=VCPU_VRR0_OFS,r21
76 mov b0=r16
77 br.many b0
78 ;;
79 vmx_asm_mov_from_rr_back_1:
80 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
81 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
82 shr.u r26=r19,61
83 ;;
84 shladd r17=r17,4,r22
85 shladd r27=r26,3,r27
86 ;;
87 ld8 r19=[r27]
88 mov b0=r17
89 br.many b0
90 END(vmx_asm_mov_from_rr)
93 // mov rr[r3]=r2
94 GLOBAL_ENTRY(vmx_asm_mov_to_rr)
95 #ifndef ACCE_MOV_TO_RR
96 br.many vmx_virtualization_fault_back
97 #endif
98 add r22=IA64_VCPU_DOMAIN_OFFSET,r21
99 extr.u r16=r25,20,7 // r3
100 extr.u r17=r25,13,7 // r2
101 ;;
102 ld8 r22=[r22] // Get domain
103 movl r20=asm_mov_from_reg
104 ;;
105 adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
106 shladd r16=r16,4,r20 // get r3
107 mov r18=b0 // save b0
108 ;;
109 add r27=VCPU_VRR0_OFS,r21
110 mov b0=r16
111 br.many b0
112 ;;
113 vmx_asm_mov_to_rr_back_1:
114 adds r30=vmx_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
115 shr.u r23=r19,61 // get RR #
116 shladd r17=r17,4,r20 // get r2
117 ;;
118 //if rr7, go back
119 cmp.eq p6,p0=7,r23
120 mov b0=r18 // restore b0
121 (p6) br.cond.dpnt.many vmx_virtualization_fault_back
122 ;;
123 mov r28=r19 // save r3
124 mov b0=r17
125 br.many b0
126 vmx_asm_mov_to_rr_back_2:
127 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
128 shladd r27=r23,3,r27 // address of VRR
129 add r22=IA64_DOMAIN_RID_BITS_OFFSET,r22
130 ;;
131 ld1 r22=[r22] // Load rid_bits from domain
132 mov b0=r18 // restore b0
133 adds r16=IA64_VCPU_STARTING_RID_OFFSET,r21
134 ;;
135 ld4 r16=[r16] // load starting_rid
136 extr.u r17=r19,8,24 // Extract RID
137 ;;
138 shr r17=r17,r22 // Shift out used bits
139 shl r16=r16,8
140 ;;
141 add r20=r19,r16
142 cmp.ne p6,p0=0,r17 // If reserved RID bits are set, use C fall back.
143 (p6) br.cond.dpnt.many vmx_virtualization_fault_back
144 ;; //mangling rid 1 and 3
145 extr.u r16=r20,8,8
146 extr.u r17=r20,24,8
147 mov r24=r18 // saved b0 for resume
148 ;;
149 extr.u r18=r20,2,6 // page size
150 dep r20=r16,r20,24,8
151 mov b0=r30
152 ;;
153 dep r20=r17,r20,8,8
154 ;; //set ve 1
155 dep r20=-1,r20,0,1
156 // If ps > PAGE_SHIFT, use PAGE_SHIFT
157 cmp.lt p6,p0=PAGE_SHIFT,r18
158 ;;
159 (p6) mov r18=PAGE_SHIFT
160 ;;
161 (p6) dep r20=r18,r20,2,6
162 ;;
163 st8 [r27]=r19 // Write to vrr.
164 // Write to save_rr if rr=0 or rr=4.
165 cmp.eq p6,p0=0,r23
166 ;;
167 cmp.eq.or p6,p0=4,r23
168 ;;
169 adds r16=IA64_VCPU_MMU_MODE_OFFSET,r21
170 (p6) adds r17=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
171 ;;
172 ld1 r16=[r16]
173 cmp.eq p7,p0=r0,r0
174 (p6) shladd r17=r23,1,r17
175 ;;
176 (p6) st8 [r17]=r20
177 (p6) cmp.eq p7,p0=VMX_MMU_VIRTUAL,r16 // Set physical rr if in virt mode
178 ;;
179 (p7) mov rr[r28]=r20
180 br.many b0
181 END(vmx_asm_mov_to_rr)
184 //rsm
185 GLOBAL_ENTRY(vmx_asm_rsm)
186 #ifndef ACCE_RSM
187 br.many vmx_virtualization_fault_back
188 #endif
189 add r16=IA64_VPD_BASE_OFFSET,r21
190 extr.u r26=r25,6,21 // Imm21
191 extr.u r27=r25,31,2 // I2d
192 ;;
193 ld8 r16=[r16]
194 extr.u r28=r25,36,1 // I
195 dep r26=r27,r26,21,2
196 ;;
197 add r17=VPD_VPSR_START_OFFSET,r16
198 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
199 //r26 is imm24
200 dep r26=r28,r26,23,1
201 ;;
202 ld8 r18=[r17]
204 // xenoprof
205 // Don't change mPSR.pp.
206 // It is manipulated by xenoprof.
207 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_PP
209 ld1 r23=[r22]
210 sub r27=-1,r26 // ~r26
211 mov r24=b0
212 ;;
213 mov r20=cr.ipsr
214 or r28=r27,r28 // Keep IC,I,DT,SI
215 and r19=r18,r27 // Update vpsr
216 ;;
217 st8 [r17]=r19
218 and r20=r20,r28 // Update ipsr
219 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
220 ;;
221 ld8 r27=[r27]
222 ;;
223 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
224 ;;
225 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 // Keep dfh
226 ;;
227 mov cr.ipsr=r20
228 cmp.ne p6,p0=VMX_MMU_VIRTUAL,r23
229 ;;
230 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
231 (p6) br.dptk vmx_resume_to_guest // DT not cleared or already in phy mode
232 ;;
233 // Switch to meta physical mode D.
234 add r26=IA64_VCPU_META_RID_D_OFFSET,r21
235 mov r23=VMX_MMU_PHY_D
236 ;;
237 ld8 r26=[r26]
238 st1 [r22]=r23
239 dep.z r28=4,61,3
240 ;;
241 mov rr[r0]=r26
242 ;;
243 mov rr[r28]=r26
244 ;;
245 srlz.d
246 br.many vmx_resume_to_guest
247 END(vmx_asm_rsm)
250 //ssm
251 GLOBAL_ENTRY(vmx_asm_ssm)
252 #ifndef ACCE_SSM
253 br.many vmx_virtualization_fault_back
254 #endif
255 add r16=IA64_VPD_BASE_OFFSET,r21
256 extr.u r26=r25,6,21
257 extr.u r27=r25,31,2
258 ;;
259 ld8 r16=[r16]
260 extr.u r28=r25,36,1
261 dep r26=r27,r26,21,2
262 ;; //r26 is imm24
263 add r27=VPD_VPSR_START_OFFSET,r16
264 dep r26=r28,r26,23,1
265 ;; //r19 vpsr
266 ld8 r29=[r27]
267 mov r24=b0
268 dep r17=0,r26,IA64_PSR_PP_BIT,1 // For xenoprof
269 // Don't change mPSR.pp
270 // It is maintained by xenoprof.
271 ;;
272 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
273 mov r20=cr.ipsr
274 or r19=r29,r26
275 ;;
276 ld1 r23=[r22] // mmu_mode
277 st8 [r27]=r19 // vpsr
278 or r20=r20,r17
279 ;;
280 mov cr.ipsr=r20
281 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
282 ;;
283 and r19=r28,r19
284 cmp.eq p6,p0=VMX_MMU_VIRTUAL,r23
285 ;;
286 cmp.ne.or p6,p0=r28,r19 // (vpsr & (it+dt+rt)) /= (it+dt+rt) ie stay in phy
287 (p6) br.dptk vmx_asm_ssm_1
288 ;;
289 add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
290 add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
291 mov r23=VMX_MMU_VIRTUAL
292 ;;
293 ld8 r26=[r26]
294 ld8 r27=[r27]
295 st1 [r22]=r23
296 dep.z r28=4,61,3
297 ;;
298 mov rr[r0]=r26
299 ;;
300 mov rr[r28]=r27
301 ;;
302 srlz.d
303 ;;
304 vmx_asm_ssm_1:
305 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
306 ;;
307 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
308 (p6) br.dptk vmx_resume_to_guest
309 ;;
310 add r29=VPD_VTPR_START_OFFSET,r16
311 add r30=VPD_VHPI_START_OFFSET,r16
312 ;;
313 ld8 r29=[r29]
314 ld8 r30=[r30]
315 ;;
316 extr.u r17=r29,4,4
317 extr.u r18=r29,16,1
318 ;;
319 dep r17=r18,r17,4,1
320 ;;
321 cmp.gt p6,p0=r30,r17
322 (p6) br.dpnt.few vmx_asm_dispatch_vexirq
323 br.many vmx_resume_to_guest
324 END(vmx_asm_ssm)
327 //mov psr.l=r2
328 GLOBAL_ENTRY(vmx_asm_mov_to_psr)
329 #ifndef ACCE_MOV_TO_PSR
330 br.many vmx_virtualization_fault_back
331 #endif
332 add r16=IA64_VPD_BASE_OFFSET,r21
333 extr.u r26=r25,13,7 //r2
334 ;;
335 ld8 r16=[r16]
336 movl r20=asm_mov_from_reg
337 ;;
338 adds r30=vmx_asm_mov_to_psr_back-asm_mov_from_reg,r20
339 shladd r26=r26,4,r20
340 mov r24=b0
341 ;;
342 add r27=VPD_VPSR_START_OFFSET,r16
343 mov b0=r26
344 br.many b0
345 ;;
346 vmx_asm_mov_to_psr_back:
347 ld8 r17=[r27] // vpsr
348 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
349 dep r19=0,r19,32,32 // Clear bits 32-63
350 ;;
351 ld1 r23=[r22] // mmu_mode
352 dep r18=0,r17,0,32
353 ;;
354 or r30=r18,r19
355 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
356 ;;
357 st8 [r27]=r30 // set vpsr
358 and r27=r28,r30
359 and r29=r28,r17
360 ;;
361 cmp.eq p5,p0=r29,r27 // (old_vpsr & (dt+rt+it)) == (new_vpsr & (dt+rt+it))
362 cmp.eq p6,p7=r28,r27 // (new_vpsr & (dt+rt+it)) == (dt+rt+it)
363 (p5) br.many vmx_asm_mov_to_psr_1 // no change
364 ;;
365 //virtual to physical D
366 (p7) add r26=IA64_VCPU_META_RID_D_OFFSET,r21
367 (p7) add r27=IA64_VCPU_META_RID_D_OFFSET,r21
368 (p7) mov r23=VMX_MMU_PHY_D
369 ;;
370 //physical to virtual
371 (p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
372 (p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
373 (p6) mov r23=VMX_MMU_VIRTUAL
374 ;;
375 ld8 r26=[r26]
376 ld8 r27=[r27]
377 st1 [r22]=r23
378 dep.z r28=4,61,3
379 ;;
380 mov rr[r0]=r26
381 ;;
382 mov rr[r28]=r27
383 ;;
384 srlz.d
385 ;;
386 vmx_asm_mov_to_psr_1:
387 mov r20=cr.ipsr
388 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
389 ;;
390 tbit.nz p7,p0=r20,IA64_PSR_PP_BIT // For xenoprof
391 or r19=r19,r28
392 dep r20=0,r20,0,32
393 ;;
394 add r20=r19,r20
395 mov b0=r24
396 ;;
397 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
398 (p7) dep r20=-1,r20,IA64_PSR_PP_BIT,1 // For xenoprof
399 // Dom't change mPSR.pp
400 // It is maintaned by xenoprof
401 ;;
402 ld8 r27=[r27]
403 ;;
404 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
405 ;;
406 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
407 ;;
408 mov cr.ipsr=r20
409 cmp.ne p6,p0=r0,r0
410 ;;
411 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
412 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
413 (p6) br.dpnt.few vmx_resume_to_guest
414 ;;
415 add r29=VPD_VTPR_START_OFFSET,r16
416 add r30=VPD_VHPI_START_OFFSET,r16
417 ;;
418 ld8 r29=[r29]
419 ld8 r30=[r30]
420 ;;
421 extr.u r17=r29,4,4
422 extr.u r18=r29,16,1
423 ;;
424 dep r17=r18,r17,4,1
425 ;;
426 cmp.gt p6,p0=r30,r17
427 (p6) br.dpnt.few vmx_asm_dispatch_vexirq
428 br.many vmx_resume_to_guest
429 END(vmx_asm_mov_to_psr)
432 ENTRY(vmx_asm_dispatch_vexirq)
433 //increment iip
434 mov r16=cr.ipsr
435 ;;
436 extr.u r17=r16,IA64_PSR_RI_BIT,2
437 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
438 ;;
439 (p6) mov r18=cr.iip
440 (p6) mov r17=r0
441 (p7) add r17=1,r17
442 ;;
443 (p6) add r18=0x10,r18
444 dep r16=r17,r16,IA64_PSR_RI_BIT,2
445 ;;
446 (p6) mov cr.iip=r18
447 mov cr.ipsr=r16
448 br.many vmx_dispatch_vexirq
449 END(vmx_asm_dispatch_vexirq)
451 // thash r1=r3
452 // TODO: add support when pta.vf = 1
453 GLOBAL_ENTRY(vmx_asm_thash)
454 #ifndef ACCE_THASH
455 br.many vmx_virtualization_fault_back
456 #endif
457 extr.u r17=r25,20,7 // get r3 from opcode in r25
458 extr.u r18=r25,6,7 // get r1 from opcode in r25
459 movl r20=asm_mov_from_reg
460 ;;
461 adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20
462 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
463 adds r16=IA64_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
464 mov r24=b0 // save b0
465 ;;
466 ld8 r16=[r16] // get VPD addr
467 mov b0=r17
468 br.many b0 // r19 return value
469 ;;
470 vmx_asm_thash_back1:
471 shr.u r23=r19,61 // get RR number
472 adds r25=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr
473 adds r16=IA64_VPD_VPTA_OFFSET,r16 // get virtual pta
474 ;;
475 shladd r27=r23,3,r25 // get vcpu->arch.arch_vmx.vrr[r23]'s addr
476 ld8 r17=[r16] // get virtual PTA
477 mov r26=1
478 ;;
479 extr.u r29=r17,2,6 // get pta.size
480 ld8 r25=[r27] // get vcpu->arch.arch_vmx.vrr[r23]'s value
481 ;;
482 // Fall-back to C if VF (long format) is set
483 tbit.nz p6,p0=r17,8
484 mov b0=r24
485 (p6) br.cond.dpnt.many vmx_virtualization_fault_back
486 extr.u r25=r25,2,6 // get rr.ps
487 shl r22=r26,r29 // 1UL << pta.size
488 ;;
489 shr.u r23=r19,r25 // vaddr >> rr.ps
490 adds r26=3,r29 // pta.size + 3
491 shl r27=r17,3 // pta << 3
492 ;;
493 shl r23=r23,3 // (vaddr >> rr.ps) << 3
494 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
495 movl r16=VRN_MASK
496 ;;
497 adds r22=-1,r22 // (1UL << pta.size) - 1
498 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
499 and r19=r19,r16 // vaddr & VRN_MASK
500 ;;
501 and r22=r22,r23 // vhpt_offset
502 or r19=r19,r27 // (vadr&VRN_MASK) |(((pta<<3)>>(pta.size + 3))<<pta.size)
503 adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
504 ;;
505 or r19=r19,r22 // calc pval
506 shladd r17=r18,4,r26
507 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
508 ;;
509 mov b0=r17
510 br.many b0
511 END(vmx_asm_thash)
513 #define MOV_TO_REG0 \
514 {; \
515 nop.b 0x0; \
516 nop.b 0x0; \
517 nop.b 0x0; \
518 ;; \
519 };
522 #define MOV_TO_REG(n) \
523 {; \
524 mov r##n##=r19; \
525 mov b0=r30; \
526 br.sptk.many b0; \
527 ;; \
528 };
531 #define MOV_FROM_REG(n) \
532 {; \
533 mov r19=r##n##; \
534 mov b0=r30; \
535 br.sptk.many b0; \
536 ;; \
537 };
540 #define MOV_TO_BANK0_REG(n) \
541 ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
542 {; \
543 mov r26=r2; \
544 mov r2=r19; \
545 bsw.1; \
546 ;; \
547 }; \
548 {; \
549 mov r##n##=r2; \
550 nop.b 0x0; \
551 bsw.0; \
552 ;; \
553 }; \
554 {; \
555 mov r2=r26; \
556 mov b0=r30; \
557 br.sptk.many b0; \
558 ;; \
559 }; \
560 END(asm_mov_to_bank0_reg##n##)
563 #define MOV_FROM_BANK0_REG(n) \
564 ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
565 {; \
566 mov r26=r2; \
567 nop.b 0x0; \
568 bsw.1; \
569 ;; \
570 }; \
571 {; \
572 mov r2=r##n##; \
573 nop.b 0x0; \
574 bsw.0; \
575 ;; \
576 }; \
577 {; \
578 mov r19=r2; \
579 mov r2=r26; \
580 mov b0=r30; \
581 }; \
582 {; \
583 nop.b 0x0; \
584 nop.b 0x0; \
585 br.sptk.many b0; \
586 ;; \
587 }; \
588 END(asm_mov_from_bank0_reg##n##)
591 #define JMP_TO_MOV_TO_BANK0_REG(n) \
592 {; \
593 nop.b 0x0; \
594 nop.b 0x0; \
595 br.sptk.many asm_mov_to_bank0_reg##n##; \
596 ;; \
597 }
600 #define JMP_TO_MOV_FROM_BANK0_REG(n) \
601 {; \
602 nop.b 0x0; \
603 nop.b 0x0; \
604 br.sptk.many asm_mov_from_bank0_reg##n##; \
605 ;; \
606 }
609 MOV_FROM_BANK0_REG(16)
610 MOV_FROM_BANK0_REG(17)
611 MOV_FROM_BANK0_REG(18)
612 MOV_FROM_BANK0_REG(19)
613 MOV_FROM_BANK0_REG(20)
614 MOV_FROM_BANK0_REG(21)
615 MOV_FROM_BANK0_REG(22)
616 MOV_FROM_BANK0_REG(23)
617 MOV_FROM_BANK0_REG(24)
618 MOV_FROM_BANK0_REG(25)
619 MOV_FROM_BANK0_REG(26)
620 MOV_FROM_BANK0_REG(27)
621 MOV_FROM_BANK0_REG(28)
622 MOV_FROM_BANK0_REG(29)
623 MOV_FROM_BANK0_REG(30)
624 MOV_FROM_BANK0_REG(31)
627 // mov from reg table
628 // r19: value, r30: return address
629 // r26 may be destroyed
630 ENTRY(asm_mov_from_reg)
631 MOV_FROM_REG(0)
632 MOV_FROM_REG(1)
633 MOV_FROM_REG(2)
634 MOV_FROM_REG(3)
635 MOV_FROM_REG(4)
636 MOV_FROM_REG(5)
637 MOV_FROM_REG(6)
638 MOV_FROM_REG(7)
639 MOV_FROM_REG(8)
640 MOV_FROM_REG(9)
641 MOV_FROM_REG(10)
642 MOV_FROM_REG(11)
643 MOV_FROM_REG(12)
644 MOV_FROM_REG(13)
645 MOV_FROM_REG(14)
646 MOV_FROM_REG(15)
647 JMP_TO_MOV_FROM_BANK0_REG(16)
648 JMP_TO_MOV_FROM_BANK0_REG(17)
649 JMP_TO_MOV_FROM_BANK0_REG(18)
650 JMP_TO_MOV_FROM_BANK0_REG(19)
651 JMP_TO_MOV_FROM_BANK0_REG(20)
652 JMP_TO_MOV_FROM_BANK0_REG(21)
653 JMP_TO_MOV_FROM_BANK0_REG(22)
654 JMP_TO_MOV_FROM_BANK0_REG(23)
655 JMP_TO_MOV_FROM_BANK0_REG(24)
656 JMP_TO_MOV_FROM_BANK0_REG(25)
657 JMP_TO_MOV_FROM_BANK0_REG(26)
658 JMP_TO_MOV_FROM_BANK0_REG(27)
659 JMP_TO_MOV_FROM_BANK0_REG(28)
660 JMP_TO_MOV_FROM_BANK0_REG(29)
661 JMP_TO_MOV_FROM_BANK0_REG(30)
662 JMP_TO_MOV_FROM_BANK0_REG(31)
663 MOV_FROM_REG(32)
664 MOV_FROM_REG(33)
665 MOV_FROM_REG(34)
666 MOV_FROM_REG(35)
667 MOV_FROM_REG(36)
668 MOV_FROM_REG(37)
669 MOV_FROM_REG(38)
670 MOV_FROM_REG(39)
671 MOV_FROM_REG(40)
672 MOV_FROM_REG(41)
673 MOV_FROM_REG(42)
674 MOV_FROM_REG(43)
675 MOV_FROM_REG(44)
676 MOV_FROM_REG(45)
677 MOV_FROM_REG(46)
678 MOV_FROM_REG(47)
679 MOV_FROM_REG(48)
680 MOV_FROM_REG(49)
681 MOV_FROM_REG(50)
682 MOV_FROM_REG(51)
683 MOV_FROM_REG(52)
684 MOV_FROM_REG(53)
685 MOV_FROM_REG(54)
686 MOV_FROM_REG(55)
687 MOV_FROM_REG(56)
688 MOV_FROM_REG(57)
689 MOV_FROM_REG(58)
690 MOV_FROM_REG(59)
691 MOV_FROM_REG(60)
692 MOV_FROM_REG(61)
693 MOV_FROM_REG(62)
694 MOV_FROM_REG(63)
695 MOV_FROM_REG(64)
696 MOV_FROM_REG(65)
697 MOV_FROM_REG(66)
698 MOV_FROM_REG(67)
699 MOV_FROM_REG(68)
700 MOV_FROM_REG(69)
701 MOV_FROM_REG(70)
702 MOV_FROM_REG(71)
703 MOV_FROM_REG(72)
704 MOV_FROM_REG(73)
705 MOV_FROM_REG(74)
706 MOV_FROM_REG(75)
707 MOV_FROM_REG(76)
708 MOV_FROM_REG(77)
709 MOV_FROM_REG(78)
710 MOV_FROM_REG(79)
711 MOV_FROM_REG(80)
712 MOV_FROM_REG(81)
713 MOV_FROM_REG(82)
714 MOV_FROM_REG(83)
715 MOV_FROM_REG(84)
716 MOV_FROM_REG(85)
717 MOV_FROM_REG(86)
718 MOV_FROM_REG(87)
719 MOV_FROM_REG(88)
720 MOV_FROM_REG(89)
721 MOV_FROM_REG(90)
722 MOV_FROM_REG(91)
723 MOV_FROM_REG(92)
724 MOV_FROM_REG(93)
725 MOV_FROM_REG(94)
726 MOV_FROM_REG(95)
727 MOV_FROM_REG(96)
728 MOV_FROM_REG(97)
729 MOV_FROM_REG(98)
730 MOV_FROM_REG(99)
731 MOV_FROM_REG(100)
732 MOV_FROM_REG(101)
733 MOV_FROM_REG(102)
734 MOV_FROM_REG(103)
735 MOV_FROM_REG(104)
736 MOV_FROM_REG(105)
737 MOV_FROM_REG(106)
738 MOV_FROM_REG(107)
739 MOV_FROM_REG(108)
740 MOV_FROM_REG(109)
741 MOV_FROM_REG(110)
742 MOV_FROM_REG(111)
743 MOV_FROM_REG(112)
744 MOV_FROM_REG(113)
745 MOV_FROM_REG(114)
746 MOV_FROM_REG(115)
747 MOV_FROM_REG(116)
748 MOV_FROM_REG(117)
749 MOV_FROM_REG(118)
750 MOV_FROM_REG(119)
751 MOV_FROM_REG(120)
752 MOV_FROM_REG(121)
753 MOV_FROM_REG(122)
754 MOV_FROM_REG(123)
755 MOV_FROM_REG(124)
756 MOV_FROM_REG(125)
757 MOV_FROM_REG(126)
758 MOV_FROM_REG(127)
759 END(asm_mov_from_reg)
762 /* must be in bank 0
763 * parameter:
764 * r31: pr
765 * r24: b0
766 */
767 ENTRY(vmx_resume_to_guest)
768 mov r16=cr.ipsr
769 movl r20=__vsa_base
770 ;;
771 ld8 r20=[r20]
772 adds r19=IA64_VPD_BASE_OFFSET,r21
773 ;;
774 ld8 r25=[r19]
775 extr.u r17=r16,IA64_PSR_RI_BIT,2
776 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
777 ;;
778 (p6) mov r18=cr.iip
779 (p6) mov r17=r0
780 ;;
781 (p6) add r18=0x10,r18
782 (p7) add r17=1,r17
783 ;;
784 (p6) mov cr.iip=r18
785 dep r16=r17,r16,IA64_PSR_RI_BIT,2
786 ;;
787 mov cr.ipsr=r16
788 adds r19= VPD_VPSR_START_OFFSET,r25
789 add r28=PAL_VPS_RESUME_NORMAL,r20
790 add r29=PAL_VPS_RESUME_HANDLER,r20
791 ;;
792 ld8 r19=[r19]
793 mov b0=r29
794 cmp.ne p6,p7 = r0,r0
795 ;;
796 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
797 ;;
798 (p6) ld8 r26=[r25]
799 (p7) mov b0=r28
800 mov pr=r31,-2
801 br.sptk.many b0 // call pal service
802 ;;
803 END(vmx_resume_to_guest)
806 MOV_TO_BANK0_REG(16)
807 MOV_TO_BANK0_REG(17)
808 MOV_TO_BANK0_REG(18)
809 MOV_TO_BANK0_REG(19)
810 MOV_TO_BANK0_REG(20)
811 MOV_TO_BANK0_REG(21)
812 MOV_TO_BANK0_REG(22)
813 MOV_TO_BANK0_REG(23)
814 MOV_TO_BANK0_REG(24)
815 MOV_TO_BANK0_REG(25)
816 MOV_TO_BANK0_REG(26)
817 MOV_TO_BANK0_REG(27)
818 MOV_TO_BANK0_REG(28)
819 MOV_TO_BANK0_REG(29)
820 MOV_TO_BANK0_REG(30)
821 MOV_TO_BANK0_REG(31)
824 // mov to reg table
825 // r19: value, r30: return address
826 ENTRY(asm_mov_to_reg)
827 MOV_TO_REG0
828 MOV_TO_REG(1)
829 MOV_TO_REG(2)
830 MOV_TO_REG(3)
831 MOV_TO_REG(4)
832 MOV_TO_REG(5)
833 MOV_TO_REG(6)
834 MOV_TO_REG(7)
835 MOV_TO_REG(8)
836 MOV_TO_REG(9)
837 MOV_TO_REG(10)
838 MOV_TO_REG(11)
839 MOV_TO_REG(12)
840 MOV_TO_REG(13)
841 MOV_TO_REG(14)
842 MOV_TO_REG(15)
843 JMP_TO_MOV_TO_BANK0_REG(16)
844 JMP_TO_MOV_TO_BANK0_REG(17)
845 JMP_TO_MOV_TO_BANK0_REG(18)
846 JMP_TO_MOV_TO_BANK0_REG(19)
847 JMP_TO_MOV_TO_BANK0_REG(20)
848 JMP_TO_MOV_TO_BANK0_REG(21)
849 JMP_TO_MOV_TO_BANK0_REG(22)
850 JMP_TO_MOV_TO_BANK0_REG(23)
851 JMP_TO_MOV_TO_BANK0_REG(24)
852 JMP_TO_MOV_TO_BANK0_REG(25)
853 JMP_TO_MOV_TO_BANK0_REG(26)
854 JMP_TO_MOV_TO_BANK0_REG(27)
855 JMP_TO_MOV_TO_BANK0_REG(28)
856 JMP_TO_MOV_TO_BANK0_REG(29)
857 JMP_TO_MOV_TO_BANK0_REG(30)
858 JMP_TO_MOV_TO_BANK0_REG(31)
859 MOV_TO_REG(32)
860 MOV_TO_REG(33)
861 MOV_TO_REG(34)
862 MOV_TO_REG(35)
863 MOV_TO_REG(36)
864 MOV_TO_REG(37)
865 MOV_TO_REG(38)
866 MOV_TO_REG(39)
867 MOV_TO_REG(40)
868 MOV_TO_REG(41)
869 MOV_TO_REG(42)
870 MOV_TO_REG(43)
871 MOV_TO_REG(44)
872 MOV_TO_REG(45)
873 MOV_TO_REG(46)
874 MOV_TO_REG(47)
875 MOV_TO_REG(48)
876 MOV_TO_REG(49)
877 MOV_TO_REG(50)
878 MOV_TO_REG(51)
879 MOV_TO_REG(52)
880 MOV_TO_REG(53)
881 MOV_TO_REG(54)
882 MOV_TO_REG(55)
883 MOV_TO_REG(56)
884 MOV_TO_REG(57)
885 MOV_TO_REG(58)
886 MOV_TO_REG(59)
887 MOV_TO_REG(60)
888 MOV_TO_REG(61)
889 MOV_TO_REG(62)
890 MOV_TO_REG(63)
891 MOV_TO_REG(64)
892 MOV_TO_REG(65)
893 MOV_TO_REG(66)
894 MOV_TO_REG(67)
895 MOV_TO_REG(68)
896 MOV_TO_REG(69)
897 MOV_TO_REG(70)
898 MOV_TO_REG(71)
899 MOV_TO_REG(72)
900 MOV_TO_REG(73)
901 MOV_TO_REG(74)
902 MOV_TO_REG(75)
903 MOV_TO_REG(76)
904 MOV_TO_REG(77)
905 MOV_TO_REG(78)
906 MOV_TO_REG(79)
907 MOV_TO_REG(80)
908 MOV_TO_REG(81)
909 MOV_TO_REG(82)
910 MOV_TO_REG(83)
911 MOV_TO_REG(84)
912 MOV_TO_REG(85)
913 MOV_TO_REG(86)
914 MOV_TO_REG(87)
915 MOV_TO_REG(88)
916 MOV_TO_REG(89)
917 MOV_TO_REG(90)
918 MOV_TO_REG(91)
919 MOV_TO_REG(92)
920 MOV_TO_REG(93)
921 MOV_TO_REG(94)
922 MOV_TO_REG(95)
923 MOV_TO_REG(96)
924 MOV_TO_REG(97)
925 MOV_TO_REG(98)
926 MOV_TO_REG(99)
927 MOV_TO_REG(100)
928 MOV_TO_REG(101)
929 MOV_TO_REG(102)
930 MOV_TO_REG(103)
931 MOV_TO_REG(104)
932 MOV_TO_REG(105)
933 MOV_TO_REG(106)
934 MOV_TO_REG(107)
935 MOV_TO_REG(108)
936 MOV_TO_REG(109)
937 MOV_TO_REG(110)
938 MOV_TO_REG(111)
939 MOV_TO_REG(112)
940 MOV_TO_REG(113)
941 MOV_TO_REG(114)
942 MOV_TO_REG(115)
943 MOV_TO_REG(116)
944 MOV_TO_REG(117)
945 MOV_TO_REG(118)
946 MOV_TO_REG(119)
947 MOV_TO_REG(120)
948 MOV_TO_REG(121)
949 MOV_TO_REG(122)
950 MOV_TO_REG(123)
951 MOV_TO_REG(124)
952 MOV_TO_REG(125)
953 MOV_TO_REG(126)
954 MOV_TO_REG(127)
955 END(asm_mov_to_reg)