debuggers.hg
changeset 16834:54ed70d1dd11
[IA64] vti fault handler clean up: vmx_minstate.h white space
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Fri Dec 14 13:04:27 2007 -0700 (2007-12-14) |
parents | 09cd682ac68e |
children | 0f5926ba1d28 |
files | xen/arch/ia64/vmx/vmx_minstate.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/vmx_minstate.h Fri Dec 14 12:53:03 2007 -0700 1.2 +++ b/xen/arch/ia64/vmx/vmx_minstate.h Fri Dec 14 13:04:27 2007 -0700 1.3 @@ -1,4 +1,3 @@ 1.4 -/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 1.5 /* 1.6 * vmx_minstate.h: 1.7 * Copyright (c) 2005, Intel Corporation. 1.8 @@ -35,52 +34,47 @@ 1.9 #include <asm/cache.h> 1.10 #include "entry.h" 1.11 1.12 -#define VMX_MINSTATE_START_SAVE_MIN \ 1.13 - mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 1.14 - ;; \ 1.15 - mov.m r28=ar.rnat; \ 1.16 - addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ 1.17 - ;; \ 1.18 - lfetch.fault.excl.nt1 [r22]; \ 1.19 - addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 1.20 - mov r23=ar.bspstore; /* save ar.bspstore */ \ 1.21 - ;; \ 1.22 - mov ar.bspstore=r22; /* switch to kernel RBS */ \ 1.23 - ;; \ 1.24 - mov r18=ar.bsp; \ 1.25 - mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ 1.26 +#define VMX_MINSTATE_START_SAVE_MIN \ 1.27 + mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 1.28 + ;; \ 1.29 + mov.m r28=ar.rnat; \ 1.30 + addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ 1.31 + ;; \ 1.32 + lfetch.fault.excl.nt1 [r22]; \ 1.33 + addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 1.34 + mov r23=ar.bspstore; /* save ar.bspstore */ \ 1.35 + ;; \ 1.36 + mov ar.bspstore=r22; /* switch to kernel RBS */ \ 1.37 + ;; \ 1.38 + mov r18=ar.bsp; \ 1.39 + mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ 1.40 1.41 - 1.42 - 1.43 -#define VMX_MINSTATE_END_SAVE_MIN \ 1.44 - bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ 1.45 +#define VMX_MINSTATE_END_SAVE_MIN \ 1.46 + bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ 1.47 ;; 1.48 1.49 - 1.50 -#define PAL_VSA_SYNC_READ \ 1.51 - /* begin to call pal vps sync_read */ \ 1.52 - add r25=IA64_VPD_BASE_OFFSET, r21; \ 1.53 - movl r20=__vsa_base; \ 1.54 - ;; \ 1.55 - ld8 r25=[r25]; /* read vpd base */ \ 1.56 - ld8 r20=[r20]; /* read entry point */ \ 1.57 - ;; \ 1.58 - add r20=PAL_VPS_SYNC_READ,r20; \ 1.59 - ;; \ 1.60 -{ .mii; \ 1.61 - nop 0x0; \ 1.62 - mov r24=ip; \ 1.63 - mov b0=r20; \ 1.64 - ;; \ 1.65 -}; \ 1.66 -{ .mmb; \ 1.67 - add r24 = 0x20, r24; \ 1.68 - nop 0x0; \ 1.69 - br.cond.sptk b0; /* call the service */ \ 1.70 - ;; \ 1.71 -}; \ 1.72 - 1.73 - 1.74 +#define PAL_VSA_SYNC_READ \ 1.75 + /* begin to call pal vps sync_read */ \ 1.76 + add r25=IA64_VPD_BASE_OFFSET, r21; \ 1.77 + movl r20=__vsa_base; \ 1.78 + ;; \ 1.79 + ld8 r25=[r25]; /* read vpd base */ \ 1.80 + ld8 r20=[r20]; /* read entry point */ \ 1.81 + ;; \ 1.82 + add r20=PAL_VPS_SYNC_READ,r20; \ 1.83 + ;; \ 1.84 +{ .mii; \ 1.85 + nop 0x0; \ 1.86 + mov r24=ip; \ 1.87 + mov b0=r20; \ 1.88 + ;; \ 1.89 +}; \ 1.90 +{ .mmb; \ 1.91 + add r24 = 0x20, r24; \ 1.92 + nop 0x0; \ 1.93 + br.cond.sptk b0; /* call the service */ \ 1.94 + ;; \ 1.95 +}; 1.96 1.97 #define IA64_CURRENT_REG IA64_KR(CURRENT) /* r21 is reserved for current pointer */ 1.98 //#define VMX_MINSTATE_GET_CURRENT(reg) mov reg=IA64_CURRENT_REG 1.99 @@ -112,101 +106,101 @@ 1.100 * we can pass interruption state as arguments to a handler. 1.101 */ 1.102 1.103 -#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ 1.104 - VMX_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ 1.105 - mov r27=ar.rsc; /* M */ \ 1.106 - mov r20=r1; /* A */ \ 1.107 - mov r25=ar.unat; /* M */ \ 1.108 - mov r29=cr.ipsr; /* M */ \ 1.109 - mov r26=ar.pfs; /* I */ \ 1.110 - mov r18=cr.isr; \ 1.111 - COVER; /* B;; (or nothing) */ \ 1.112 - ;; \ 1.113 - tbit.z p6,p0=r29,IA64_PSR_VM_BIT; \ 1.114 - ;; \ 1.115 - tbit.nz.or p6,p0 = r18,IA64_ISR_NI_BIT; \ 1.116 - ;; \ 1.117 -(p6) br.spnt.few vmx_panic; \ 1.118 - tbit.z p0,p15=r29,IA64_PSR_I_BIT; \ 1.119 - mov r1=r16; \ 1.120 -/* mov r21=r16; */ \ 1.121 - /* switch from user to kernel RBS: */ \ 1.122 - ;; \ 1.123 - invala; /* M */ \ 1.124 - SAVE_IFS; \ 1.125 - ;; \ 1.126 - VMX_MINSTATE_START_SAVE_MIN \ 1.127 - adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ 1.128 - adds r16=PT(CR_IPSR),r1; \ 1.129 - ;; \ 1.130 - lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ 1.131 - st8 [r16]=r29; /* save cr.ipsr */ \ 1.132 - ;; \ 1.133 - lfetch.fault.excl.nt1 [r17]; \ 1.134 - tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ 1.135 - mov r29=b0 \ 1.136 - ;; \ 1.137 - adds r16=PT(R8),r1; /* initialize first base pointer */ \ 1.138 - adds r17=PT(R9),r1; /* initialize second base pointer */ \ 1.139 - ;; \ 1.140 -.mem.offset 0,0; st8.spill [r16]=r8,16; \ 1.141 -.mem.offset 8,0; st8.spill [r17]=r9,16; \ 1.142 - ;; \ 1.143 -.mem.offset 0,0; st8.spill [r16]=r10,24; \ 1.144 -.mem.offset 8,0; st8.spill [r17]=r11,24; \ 1.145 - ;; \ 1.146 - mov r9=cr.iip; /* M */ \ 1.147 - mov r10=ar.fpsr; /* M */ \ 1.148 - ;; \ 1.149 - st8 [r16]=r9,16; /* save cr.iip */ \ 1.150 - st8 [r17]=r30,16; /* save cr.ifs */ \ 1.151 - sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ 1.152 - ;; \ 1.153 - st8 [r16]=r25,16; /* save ar.unat */ \ 1.154 - st8 [r17]=r26,16; /* save ar.pfs */ \ 1.155 - shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ 1.156 - ;; \ 1.157 - st8 [r16]=r27,16; /* save ar.rsc */ \ 1.158 - st8 [r17]=r28,16; /* save ar.rnat */ \ 1.159 - ;; /* avoid RAW on r16 & r17 */ \ 1.160 - st8 [r16]=r23,16; /* save ar.bspstore */ \ 1.161 - st8 [r17]=r31,16; /* save predicates */ \ 1.162 - ;; \ 1.163 - st8 [r16]=r29,16; /* save b0 */ \ 1.164 - st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ 1.165 - cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ 1.166 - ;; \ 1.167 -.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ 1.168 -.mem.offset 8,0; st8.spill [r17]=r12,16; \ 1.169 +#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ 1.170 + VMX_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ 1.171 + mov r27=ar.rsc; /* M */ \ 1.172 + mov r20=r1; /* A */ \ 1.173 + mov r25=ar.unat; /* M */ \ 1.174 + mov r29=cr.ipsr; /* M */ \ 1.175 + mov r26=ar.pfs; /* I */ \ 1.176 + mov r18=cr.isr; \ 1.177 + COVER; /* B;; (or nothing) */ \ 1.178 + ;; \ 1.179 + tbit.z p6,p0=r29,IA64_PSR_VM_BIT; \ 1.180 + ;; \ 1.181 + tbit.nz.or p6,p0 = r18,IA64_ISR_NI_BIT; \ 1.182 + ;; \ 1.183 +(p6)br.spnt.few vmx_panic; \ 1.184 + tbit.z p0,p15=r29,IA64_PSR_I_BIT; \ 1.185 + mov r1=r16; \ 1.186 + /* mov r21=r16; */ \ 1.187 + /* switch from user to kernel RBS: */ \ 1.188 + ;; \ 1.189 + invala; /* M */ \ 1.190 + SAVE_IFS; \ 1.191 + ;; \ 1.192 + VMX_MINSTATE_START_SAVE_MIN \ 1.193 + adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ 1.194 + adds r16=PT(CR_IPSR),r1; \ 1.195 + ;; \ 1.196 + lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ 1.197 + st8 [r16]=r29; /* save cr.ipsr */ \ 1.198 + ;; \ 1.199 + lfetch.fault.excl.nt1 [r17]; \ 1.200 + tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ 1.201 + mov r29=b0 \ 1.202 + ;; \ 1.203 + adds r16=PT(R8),r1; /* initialize first base pointer */ \ 1.204 + adds r17=PT(R9),r1; /* initialize second base pointer */ \ 1.205 + ;; \ 1.206 +.mem.offset 0,0; st8.spill [r16]=r8,16; \ 1.207 +.mem.offset 8,0; st8.spill [r17]=r9,16; \ 1.208 + ;; \ 1.209 +.mem.offset 0,0; st8.spill [r16]=r10,24; \ 1.210 +.mem.offset 8,0; st8.spill [r17]=r11,24; \ 1.211 + ;; \ 1.212 + mov r9=cr.iip; /* M */ \ 1.213 + mov r10=ar.fpsr; /* M */ \ 1.214 + ;; \ 1.215 + st8 [r16]=r9,16; /* save cr.iip */ \ 1.216 + st8 [r17]=r30,16; /* save cr.ifs */ \ 1.217 + sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ 1.218 + ;; \ 1.219 + st8 [r16]=r25,16; /* save ar.unat */ \ 1.220 + st8 [r17]=r26,16; /* save ar.pfs */ \ 1.221 + shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ 1.222 + ;; \ 1.223 + st8 [r16]=r27,16; /* save ar.rsc */ \ 1.224 + st8 [r17]=r28,16; /* save ar.rnat */ \ 1.225 + ;; /* avoid RAW on r16 & r17 */ \ 1.226 + st8 [r16]=r23,16; /* save ar.bspstore */ \ 1.227 + st8 [r17]=r31,16; /* save predicates */ \ 1.228 + ;; \ 1.229 + st8 [r16]=r29,16; /* save b0 */ \ 1.230 + st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ 1.231 + cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ 1.232 + ;; \ 1.233 +.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ 1.234 +.mem.offset 8,0; st8.spill [r17]=r12,16; \ 1.235 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ 1.236 - ;; \ 1.237 -.mem.offset 0,0; st8.spill [r16]=r13,16; \ 1.238 -.mem.offset 8,0; st8.spill [r17]=r10,16; /* save ar.fpsr */ \ 1.239 - mov r13=r21; /* establish `current' */ \ 1.240 - ;; \ 1.241 -.mem.offset 0,0; st8.spill [r16]=r15,16; \ 1.242 -.mem.offset 8,0; st8.spill [r17]=r14,16; \ 1.243 - ;; \ 1.244 -.mem.offset 0,0; st8.spill [r16]=r2,16; \ 1.245 -.mem.offset 8,0; st8.spill [r17]=r3,16; \ 1.246 - adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ 1.247 - ;; \ 1.248 - adds r16=IA64_VCPU_IIPA_OFFSET,r13; \ 1.249 - adds r17=IA64_VCPU_ISR_OFFSET,r13; \ 1.250 - mov r26=cr.iipa; \ 1.251 - mov r27=cr.isr; \ 1.252 - ;; \ 1.253 - st8 [r16]=r26; \ 1.254 - st8 [r17]=r27; \ 1.255 - ;; \ 1.256 - EXTRA; \ 1.257 - mov r8=ar.ccv; \ 1.258 - mov r9=ar.csd; \ 1.259 - mov r10=ar.ssd; \ 1.260 - movl r11=FPSR_DEFAULT; /* L-unit */ \ 1.261 - movl r1=__gp; /* establish kernel global pointer */ \ 1.262 - ;; \ 1.263 - PAL_VSA_SYNC_READ \ 1.264 + ;; \ 1.265 +.mem.offset 0,0; st8.spill [r16]=r13,16; \ 1.266 +.mem.offset 8,0; st8.spill [r17]=r10,16; /* save ar.fpsr */ \ 1.267 + mov r13=r21; /* establish `current' */ \ 1.268 + ;; \ 1.269 +.mem.offset 0,0; st8.spill [r16]=r15,16; \ 1.270 +.mem.offset 8,0; st8.spill [r17]=r14,16; \ 1.271 + ;; \ 1.272 +.mem.offset 0,0; st8.spill [r16]=r2,16; \ 1.273 +.mem.offset 8,0; st8.spill [r17]=r3,16; \ 1.274 + adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ 1.275 + ;; \ 1.276 + adds r16=IA64_VCPU_IIPA_OFFSET,r13; \ 1.277 + adds r17=IA64_VCPU_ISR_OFFSET,r13; \ 1.278 + mov r26=cr.iipa; \ 1.279 + mov r27=cr.isr; \ 1.280 + ;; \ 1.281 + st8 [r16]=r26; \ 1.282 + st8 [r17]=r27; \ 1.283 + ;; \ 1.284 + EXTRA; \ 1.285 + mov r8=ar.ccv; \ 1.286 + mov r9=ar.csd; \ 1.287 + mov r10=ar.ssd; \ 1.288 + movl r11=FPSR_DEFAULT; /* L-unit */ \ 1.289 + movl r1=__gp; /* establish kernel global pointer */ \ 1.290 + ;; \ 1.291 + PAL_VSA_SYNC_READ \ 1.292 VMX_MINSTATE_END_SAVE_MIN 1.293 1.294 /* 1.295 @@ -223,71 +217,80 @@ 1.296 * 1.297 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST. 1.298 */ 1.299 -#define VMX_SAVE_REST \ 1.300 -.mem.offset 0,0; st8.spill [r2]=r16,16; \ 1.301 -.mem.offset 8,0; st8.spill [r3]=r17,16; \ 1.302 - ;; \ 1.303 -.mem.offset 0,0; st8.spill [r2]=r18,16; \ 1.304 -.mem.offset 8,0; st8.spill [r3]=r19,16; \ 1.305 - ;; \ 1.306 -.mem.offset 0,0; st8.spill [r2]=r20,16; \ 1.307 -.mem.offset 8,0; st8.spill [r3]=r21,16; \ 1.308 - mov r18=b6; \ 1.309 - ;; \ 1.310 -.mem.offset 0,0; st8.spill [r2]=r22,16; \ 1.311 -.mem.offset 8,0; st8.spill [r3]=r23,16; \ 1.312 - mov r19=b7; \ 1.313 - ;; \ 1.314 -.mem.offset 0,0; st8.spill [r2]=r24,16; \ 1.315 -.mem.offset 8,0; st8.spill [r3]=r25,16; \ 1.316 - ;; \ 1.317 -.mem.offset 0,0; st8.spill [r2]=r26,16; \ 1.318 -.mem.offset 8,0; st8.spill [r3]=r27,16; \ 1.319 - ;; \ 1.320 -.mem.offset 0,0; st8.spill [r2]=r28,16; \ 1.321 -.mem.offset 8,0; st8.spill [r3]=r29,16; \ 1.322 - ;; \ 1.323 -.mem.offset 0,0; st8.spill [r2]=r30,16; \ 1.324 -.mem.offset 8,0; st8.spill [r3]=r31,32; \ 1.325 - ;; \ 1.326 - mov ar.fpsr=r11; \ 1.327 - st8 [r2]=r8,8; \ 1.328 - adds r24=PT(B6)-PT(F7),r3; \ 1.329 - ;; \ 1.330 - stf.spill [r2]=f6,32; \ 1.331 - stf.spill [r3]=f7,32; \ 1.332 - ;; \ 1.333 - stf.spill [r2]=f8,32; \ 1.334 - stf.spill [r3]=f9,32; \ 1.335 - ;; \ 1.336 - stf.spill [r2]=f10,32; \ 1.337 - stf.spill [r3]=f11; \ 1.338 - adds r25=PT(B7)-PT(F11),r3; \ 1.339 - ;; \ 1.340 - st8 [r24]=r18,16; /* b6 */ \ 1.341 - st8 [r25]=r19,16; /* b7 */ \ 1.342 - adds r3=PT(R5)-PT(F11),r3; \ 1.343 - ;; \ 1.344 - st8 [r24]=r9; /* ar.csd */ \ 1.345 - st8 [r25]=r10; /* ar.ssd */ \ 1.346 - ;; \ 1.347 - mov r18=ar.unat; \ 1.348 - adds r19=PT(EML_UNAT)-PT(R4),r2; \ 1.349 - ;; \ 1.350 - st8 [r19]=r18; /* eml_unat */ \ 1.351 +#define VMX_SAVE_REST \ 1.352 +.mem.offset 0,0; st8.spill [r2]=r16,16; \ 1.353 +.mem.offset 8,0; st8.spill [r3]=r17,16; \ 1.354 + ;; \ 1.355 +.mem.offset 0,0; st8.spill [r2]=r18,16; \ 1.356 +.mem.offset 8,0; st8.spill [r3]=r19,16; \ 1.357 + ;; \ 1.358 +.mem.offset 0,0; st8.spill [r2]=r20,16; \ 1.359 +.mem.offset 8,0; st8.spill [r3]=r21,16; \ 1.360 + mov r18=b6; \ 1.361 + ;; \ 1.362 +.mem.offset 0,0; st8.spill [r2]=r22,16; \ 1.363 +.mem.offset 8,0; st8.spill [r3]=r23,16; \ 1.364 + mov r19=b7; \ 1.365 + ;; \ 1.366 +.mem.offset 0,0; st8.spill [r2]=r24,16; \ 1.367 +.mem.offset 8,0; st8.spill [r3]=r25,16; \ 1.368 + ;; \ 1.369 +.mem.offset 0,0; st8.spill [r2]=r26,16; \ 1.370 +.mem.offset 8,0; st8.spill [r3]=r27,16; \ 1.371 + ;; \ 1.372 +.mem.offset 0,0; st8.spill [r2]=r28,16; \ 1.373 +.mem.offset 8,0; st8.spill [r3]=r29,16; \ 1.374 + ;; \ 1.375 +.mem.offset 0,0; st8.spill [r2]=r30,16; \ 1.376 +.mem.offset 8,0; st8.spill [r3]=r31,32; \ 1.377 + ;; \ 1.378 + mov ar.fpsr=r11; \ 1.379 + st8 [r2]=r8,8; \ 1.380 + adds r24=PT(B6)-PT(F7),r3; \ 1.381 + ;; \ 1.382 + stf.spill [r2]=f6,32; \ 1.383 + stf.spill [r3]=f7,32; \ 1.384 + ;; \ 1.385 + stf.spill [r2]=f8,32; \ 1.386 + stf.spill [r3]=f9,32; \ 1.387 + ;; \ 1.388 + stf.spill [r2]=f10,32; \ 1.389 + stf.spill [r3]=f11; \ 1.390 + adds r25=PT(B7)-PT(F11),r3; \ 1.391 + ;; \ 1.392 + st8 [r24]=r18,16; /* b6 */ \ 1.393 + st8 [r25]=r19,16; /* b7 */ \ 1.394 + adds r3=PT(R5)-PT(F11),r3; \ 1.395 + ;; \ 1.396 + st8 [r24]=r9; /* ar.csd */ \ 1.397 + st8 [r25]=r10; /* ar.ssd */ \ 1.398 + ;; \ 1.399 + mov r18=ar.unat; \ 1.400 + adds r19=PT(EML_UNAT)-PT(R4),r2; \ 1.401 + ;; \ 1.402 + st8 [r19]=r18; /* eml_unat */ 1.403 1.404 +#define VMX_SAVE_EXTRA \ 1.405 +.mem.offset 0,0; st8.spill [r2]=r4,16; \ 1.406 +.mem.offset 8,0; st8.spill [r3]=r5,16; \ 1.407 + ;; \ 1.408 +.mem.offset 0,0; st8.spill [r2]=r6,16; \ 1.409 +.mem.offset 8,0; st8.spill [r3]=r7; \ 1.410 + ;; \ 1.411 + mov r26=ar.unat; \ 1.412 + ;; \ 1.413 + st8 [r2]=r26; /* eml_unat */ 1.414 1.415 -#define VMX_SAVE_EXTRA \ 1.416 -.mem.offset 0,0; st8.spill [r2]=r4,16; \ 1.417 -.mem.offset 8,0; st8.spill [r3]=r5,16; \ 1.418 - ;; \ 1.419 -.mem.offset 0,0; st8.spill [r2]=r6,16; \ 1.420 -.mem.offset 8,0; st8.spill [r3]=r7; \ 1.421 - ;; \ 1.422 - mov r26=ar.unat; \ 1.423 - ;; \ 1.424 - st8 [r2]=r26; /* eml_unat */ \ 1.425 +#define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,) 1.426 +#define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) 1.427 +#define VMX_SAVE_MIN VMX_DO_SAVE_MIN( , mov r30=r0, ) 1.428 1.429 -#define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,) 1.430 -#define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) 1.431 -#define VMX_SAVE_MIN VMX_DO_SAVE_MIN( , mov r30=r0, ) 1.432 +/* 1.433 + * Local variables: 1.434 + * mode: C 1.435 + * c-set-style: "BSD" 1.436 + * c-basic-offset: 4 1.437 + * tab-width: 4 1.438 + * indent-tabs-mode: nil 1.439 + * End: 1.440 + */