xen-vtx-unstable

annotate xen/include/asm-x86/vmx.h @ 6776:e7c7196fa329

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:46:49 2005 +0000 (2005-09-13)
parents 4d899a738d59 813c37b68376
children 72e4e2aab342
rev   line source
iap10@3290 1 /*
iap10@3290 2 * vmx.h: VMX Architecture related definitions
iap10@3290 3 * Copyright (c) 2004, Intel Corporation.
iap10@3290 4 *
iap10@3290 5 * This program is free software; you can redistribute it and/or modify it
iap10@3290 6 * under the terms and conditions of the GNU General Public License,
iap10@3290 7 * version 2, as published by the Free Software Foundation.
iap10@3290 8 *
iap10@3290 9 * This program is distributed in the hope it will be useful, but WITHOUT
iap10@3290 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
iap10@3290 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
iap10@3290 12 * more details.
iap10@3290 13 *
iap10@3290 14 * You should have received a copy of the GNU General Public License along with
iap10@3290 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
iap10@3290 16 * Place - Suite 330, Boston, MA 02111-1307 USA.
iap10@3290 17 *
iap10@3290 18 */
iap10@3290 19 #ifndef __ASM_X86_VMX_H__
iap10@3290 20 #define __ASM_X86_VMX_H__
iap10@3290 21
iap10@3290 22 #include <xen/sched.h>
iap10@3290 23 #include <asm/types.h>
iap10@3290 24 #include <asm/regs.h>
iap10@3290 25 #include <asm/processor.h>
iap10@3290 26 #include <asm/vmx_vmcs.h>
cl349@4856 27 #include <asm/i387.h>
iap10@3290 28
arun@5608 29 #include <public/io/ioreq.h>
arun@5608 30
kaf24@6705 31 extern int hvm_enabled;
kaf24@6705 32
kaf24@4683 33 extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
iap10@3290 34 extern void vmx_asm_do_resume(void);
iap10@3290 35 extern void vmx_asm_do_launch(void);
kaf24@6326 36 extern void vmx_intr_assist(void);
iap10@3290 37
kaf24@5289 38 extern void arch_vmx_do_launch(struct vcpu *);
kaf24@5289 39 extern void arch_vmx_do_resume(struct vcpu *);
kaf24@6113 40 extern void arch_vmx_do_relaunch(struct vcpu *);
iap10@3290 41
iap10@3290 42 extern int vmcs_size;
iap10@3290 43 extern unsigned int cpu_rev;
iap10@3290 44
iap10@3290 45 /*
iap10@3290 46 * Need fill bits for SENTER
iap10@3290 47 */
iap10@3290 48
kaf24@5414 49 #define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x00000016
kaf24@5414 50
kaf24@5414 51 #define MONITOR_PIN_BASED_EXEC_CONTROLS \
kaf24@5775 52 ( \
kaf24@5414 53 MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
kaf24@5414 54 PIN_BASED_EXT_INTR_MASK | \
kaf24@5775 55 PIN_BASED_NMI_EXITING \
kaf24@5775 56 )
kaf24@5414 57
kaf24@5414 58 #define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x0401e172
iap10@3290 59
kaf24@5775 60 #define _MONITOR_CPU_BASED_EXEC_CONTROLS \
kaf24@5775 61 ( \
kaf24@5414 62 MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
kaf24@5414 63 CPU_BASED_HLT_EXITING | \
kaf24@5414 64 CPU_BASED_INVDPG_EXITING | \
kaf24@5414 65 CPU_BASED_MWAIT_EXITING | \
kaf24@5414 66 CPU_BASED_MOV_DR_EXITING | \
kaf24@5836 67 CPU_BASED_ACTIVATE_IO_BITMAP | \
kaf24@5775 68 CPU_BASED_UNCOND_IO_EXITING \
kaf24@5775 69 )
kaf24@5775 70
kaf24@5775 71 #define MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
kaf24@5775 72 ( \
kaf24@5414 73 CPU_BASED_CR8_LOAD_EXITING | \
kaf24@5775 74 CPU_BASED_CR8_STORE_EXITING \
kaf24@5775 75 )
kaf24@5775 76
kaf24@5775 77 #define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE 0x0003edff
kaf24@5414 78
kaf24@5775 79 #define MONITOR_VM_EXIT_CONTROLS_IA32E_MODE 0x00000200
kaf24@5775 80
kaf24@5775 81 #define _MONITOR_VM_EXIT_CONTROLS \
kaf24@5775 82 ( \
kaf24@5775 83 MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |\
kaf24@5775 84 VM_EXIT_ACK_INTR_ON_EXIT \
kaf24@5775 85 )
kaf24@5414 86
kaf24@5775 87 #if defined (__x86_64__)
kaf24@5775 88 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
kaf24@5775 89 ( \
kaf24@5775 90 _MONITOR_CPU_BASED_EXEC_CONTROLS | \
kaf24@5775 91 MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
kaf24@5775 92 )
kaf24@5775 93 #define MONITOR_VM_EXIT_CONTROLS \
kaf24@5775 94 ( \
kaf24@5775 95 _MONITOR_VM_EXIT_CONTROLS | \
kaf24@5775 96 MONITOR_VM_EXIT_CONTROLS_IA32E_MODE \
kaf24@5775 97 )
kaf24@5775 98 #else
kaf24@5775 99 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
kaf24@5775 100 _MONITOR_CPU_BASED_EXEC_CONTROLS
kaf24@5414 101
kaf24@5775 102 #define MONITOR_VM_EXIT_CONTROLS \
kaf24@5775 103 _MONITOR_VM_EXIT_CONTROLS
kaf24@5775 104 #endif
kaf24@5414 105
kaf24@5414 106 #define VM_ENTRY_CONTROLS_RESERVED_VALUE 0x000011ff
kaf24@5775 107 #define VM_ENTRY_CONTROLS_IA32E_MODE 0x00000200
kaf24@5414 108 #define MONITOR_VM_ENTRY_CONTROLS VM_ENTRY_CONTROLS_RESERVED_VALUE
iap10@3290 109 /*
iap10@3290 110 * Exit Reasons
iap10@3290 111 */
iap10@3290 112 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
iap10@3290 113
iap10@3290 114 #define EXIT_REASON_EXCEPTION_NMI 0
iap10@3290 115 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
iap10@3290 116
iap10@3290 117 #define EXIT_REASON_PENDING_INTERRUPT 7
iap10@3290 118
iap10@3290 119 #define EXIT_REASON_TASK_SWITCH 9
iap10@3290 120 #define EXIT_REASON_CPUID 10
iap10@3290 121 #define EXIT_REASON_HLT 12
iap10@3290 122 #define EXIT_REASON_INVLPG 14
iap10@3290 123 #define EXIT_REASON_RDPMC 15
iap10@3290 124 #define EXIT_REASON_RDTSC 16
iap10@3290 125 #define EXIT_REASON_VMCALL 18
iap10@3290 126
iap10@3290 127 #define EXIT_REASON_CR_ACCESS 28
iap10@3290 128 #define EXIT_REASON_DR_ACCESS 29
iap10@3290 129 #define EXIT_REASON_IO_INSTRUCTION 30
iap10@3290 130 #define EXIT_REASON_MSR_READ 31
iap10@3290 131 #define EXIT_REASON_MSR_WRITE 32
iap10@3290 132 #define EXIT_REASON_MWAIT_INSTRUCTION 36
iap10@3290 133
iap10@3290 134 /*
iap10@3290 135 * Interruption-information format
iap10@3290 136 */
iap10@3290 137 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
iap10@3290 138 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
iap10@3290 139 #define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */
iap10@3290 140 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
iap10@3290 141
iap10@3290 142 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
iap10@3290 143 #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
iap10@3290 144
iap10@3290 145 /*
iap10@3290 146 * Exit Qualifications for MOV for Control Register Access
iap10@3290 147 */
iap10@3290 148 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */
iap10@3290 149 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
iap10@3290 150 #define TYPE_MOV_TO_CR (0 << 4)
iap10@3290 151 #define TYPE_MOV_FROM_CR (1 << 4)
iap10@3290 152 #define TYPE_CLTS (2 << 4)
kaf24@6730 153 #define TYPE_LMSW (3 << 4)
kaf24@5414 154 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
kaf24@6730 155 #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
iap10@3290 156 #define REG_EAX (0 << 8)
iap10@3290 157 #define REG_ECX (1 << 8)
iap10@3290 158 #define REG_EDX (2 << 8)
iap10@3290 159 #define REG_EBX (3 << 8)
iap10@3290 160 #define REG_ESP (4 << 8)
iap10@3290 161 #define REG_EBP (5 << 8)
iap10@3290 162 #define REG_ESI (6 << 8)
iap10@3290 163 #define REG_EDI (7 << 8)
kaf24@5414 164 #define REG_R8 (8 << 8)
kaf24@5414 165 #define REG_R9 (9 << 8)
kaf24@5414 166 #define REG_R10 (10 << 8)
kaf24@5414 167 #define REG_R11 (11 << 8)
kaf24@5414 168 #define REG_R12 (12 << 8)
kaf24@5414 169 #define REG_R13 (13 << 8)
kaf24@5414 170 #define REG_R14 (14 << 8)
kaf24@5414 171 #define REG_R15 (15 << 8)
iap10@3290 172
iap10@3290 173 /*
iap10@3290 174 * Exit Qualifications for MOV for Debug Register Access
iap10@3290 175 */
iap10@3290 176 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
iap10@3290 177 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
iap10@3290 178 #define TYPE_MOV_TO_DR (0 << 4)
iap10@3290 179 #define TYPE_MOV_FROM_DR (1 << 4)
kaf24@5414 180 #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
iap10@3290 181
iap10@3290 182 #define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */
iap10@3290 183 #define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */
iap10@3290 184 #define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */
iap10@3290 185 #define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */
iap10@3290 186 #define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */
iap10@3290 187 #define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */
iap10@3290 188 #define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */
iap10@3290 189 #define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */
iap10@3290 190 #define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */
iap10@3290 191 /* reserved */
iap10@3290 192 #define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */
iap10@3290 193 #define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */
iap10@3290 194 #define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */
iap10@3290 195 #define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */
iap10@3290 196 #define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */
iap10@3290 197 #define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */
iap10@3290 198 #define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */
iap10@3290 199 #define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */
iap10@3290 200 #define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */
iap10@3290 201
arun@4999 202 /* Pending Debug exceptions */
arun@4999 203
arun@4999 204 #define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */
arun@4999 205 #define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */
arun@4999 206
iap10@3290 207 #ifdef XEN_DEBUGGER
iap10@3290 208 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
iap10@3290 209 ( EXCEPTION_BITMAP_PG | \
iap10@3290 210 EXCEPTION_BITMAP_DB | \
iap10@3290 211 EXCEPTION_BITMAP_BP | \
iap10@3290 212 EXCEPTION_BITMAP_GP )
iap10@3290 213 #else
iap10@3290 214 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
iap10@3290 215 ( EXCEPTION_BITMAP_PG | \
iap10@3290 216 EXCEPTION_BITMAP_GP )
iap10@3290 217 #endif
iap10@3290 218
kaf24@5774 219 /* These bits in the CR4 are owned by the host */
kaf24@5774 220 #ifdef __i386__
kaf24@5774 221 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
kaf24@5774 222 #else
kaf24@5774 223 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
kaf24@5774 224 #endif
kaf24@5774 225
iap10@3290 226 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
iap10@3290 227 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
iap10@3290 228 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
iap10@3290 229 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */
iap10@3290 230 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */
iap10@3290 231 #define VMREAD_OPCODE ".byte 0x0f,0x78\n"
iap10@3290 232 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
iap10@3290 233 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n"
iap10@3290 234 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n"
iap10@3290 235 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n"
iap10@3290 236
iap10@3290 237 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
iap10@3290 238 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
iap10@3290 239 #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */
iap10@3290 240
iap10@3290 241 static inline int __vmptrld (u64 addr)
iap10@3290 242 {
iap10@3290 243 unsigned long eflags;
iap10@3290 244 __asm__ __volatile__ ( VMPTRLD_OPCODE
iap10@3290 245 MODRM_EAX_06
iap10@3290 246 :
iap10@3290 247 : "a" (&addr)
iap10@3290 248 : "memory");
iap10@3290 249
iap10@3290 250 __save_flags(eflags);
iap10@3290 251 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 252 return -1;
iap10@3290 253 return 0;
iap10@3290 254 }
iap10@3290 255
iap10@3290 256 static inline void __vmptrst (u64 addr)
iap10@3290 257 {
iap10@3290 258 __asm__ __volatile__ ( VMPTRST_OPCODE
iap10@3290 259 MODRM_EAX_07
iap10@3290 260 :
iap10@3290 261 : "a" (&addr)
iap10@3290 262 : "memory");
iap10@3290 263 }
iap10@3290 264
iap10@3290 265 static inline int __vmpclear (u64 addr)
iap10@3290 266 {
iap10@3290 267 unsigned long eflags;
iap10@3290 268
iap10@3290 269 __asm__ __volatile__ ( VMCLEAR_OPCODE
iap10@3290 270 MODRM_EAX_06
iap10@3290 271 :
iap10@3290 272 : "a" (&addr)
iap10@3290 273 : "memory");
iap10@3290 274 __save_flags(eflags);
iap10@3290 275 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 276 return -1;
iap10@3290 277 return 0;
iap10@3290 278 }
iap10@3290 279
lcy@6539 280 #define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr)))
lcy@6539 281
lcy@6539 282 static always_inline int ___vmread (const unsigned long field, void *ptr, const int size)
iap10@3290 283 {
iap10@3290 284 unsigned long eflags;
iap10@3290 285 unsigned long ecx = 0;
iap10@3290 286
iap10@3290 287 __asm__ __volatile__ ( VMREAD_OPCODE
iap10@3290 288 MODRM_EAX_ECX
iap10@3290 289 : "=c" (ecx)
iap10@3290 290 : "a" (field)
iap10@3290 291 : "memory");
iap10@3290 292
lcy@6539 293 switch (size) {
lcy@6539 294 case 1:
lcy@6539 295 *((u8 *) (ptr)) = ecx;
lcy@6539 296 break;
lcy@6539 297 case 2:
lcy@6539 298 *((u16 *) (ptr)) = ecx;
lcy@6539 299 break;
lcy@6539 300 case 4:
lcy@6539 301 *((u32 *) (ptr)) = ecx;
lcy@6539 302 break;
lcy@6539 303 case 8:
lcy@6539 304 *((u64 *) (ptr)) = ecx;
lcy@6539 305 break;
lcy@6539 306 default:
lcy@6539 307 domain_crash_synchronous();
lcy@6539 308 break;
lcy@6539 309 }
iap10@3290 310
iap10@3290 311 __save_flags(eflags);
iap10@3290 312 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 313 return -1;
iap10@3290 314 return 0;
iap10@3290 315 }
iap10@3290 316
arun@4586 317 static inline int __vmwrite (unsigned long field, unsigned long value)
iap10@3290 318 {
iap10@3290 319 unsigned long eflags;
iap10@3290 320
iap10@3290 321 __asm__ __volatile__ ( VMWRITE_OPCODE
iap10@3290 322 MODRM_EAX_ECX
iap10@3290 323 :
iap10@3290 324 : "a" (field) , "c" (value)
iap10@3290 325 : "memory");
iap10@3290 326 __save_flags(eflags);
iap10@3290 327 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 328 return -1;
iap10@3290 329 return 0;
iap10@3290 330 }
iap10@3290 331
arun@4999 332 static inline int __vm_set_bit(unsigned long field, unsigned long mask)
arun@4999 333 {
arun@4999 334 unsigned long tmp;
arun@4999 335 int err = 0;
arun@4999 336
arun@4999 337 err |= __vmread(field, &tmp);
arun@4999 338 tmp |= mask;
arun@4999 339 err |= __vmwrite(field, tmp);
arun@4999 340
arun@4999 341 return err;
arun@4999 342 }
arun@4999 343
arun@4999 344 static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
arun@4999 345 {
arun@4999 346 unsigned long tmp;
arun@4999 347 int err = 0;
arun@4999 348
arun@4999 349 err |= __vmread(field, &tmp);
arun@4999 350 tmp &= ~mask;
arun@4999 351 err |= __vmwrite(field, tmp);
arun@4999 352
arun@4999 353 return err;
arun@4999 354 }
arun@4999 355
iap10@3290 356 static inline void __vmxoff (void)
iap10@3290 357 {
iap10@3290 358 __asm__ __volatile__ ( VMXOFF_OPCODE
iap10@3290 359 ::: "memory");
iap10@3290 360 }
iap10@3290 361
iap10@3290 362 static inline int __vmxon (u64 addr)
iap10@3290 363 {
iap10@3290 364 unsigned long eflags;
iap10@3290 365
iap10@3290 366 __asm__ __volatile__ ( VMXON_OPCODE
iap10@3290 367 MODRM_EAX_06
iap10@3290 368 :
iap10@3290 369 : "a" (&addr)
iap10@3290 370 : "memory");
iap10@3290 371 __save_flags(eflags);
iap10@3290 372 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 373 return -1;
iap10@3290 374 return 0;
iap10@3290 375 }
arun@3910 376
cl349@4856 377 /* Make sure that xen intercepts any FP accesses from current */
kaf24@6326 378 static inline void vmx_stts(void)
cl349@4856 379 {
cl349@4856 380 unsigned long cr0;
cl349@4856 381
cl349@4856 382 __vmread(GUEST_CR0, &cr0);
cl349@4856 383 if (!(cr0 & X86_CR0_TS))
cl349@4856 384 __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
cl349@4856 385
cl349@4856 386 __vmread(CR0_READ_SHADOW, &cr0);
cl349@4856 387 if (!(cr0 & X86_CR0_TS))
arun@4999 388 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
cl349@4856 389 }
arun@5186 390
arun@5186 391 /* Works only for ed == current */
kaf24@5289 392 static inline int vmx_paging_enabled(struct vcpu *v)
arun@5186 393 {
arun@5186 394 unsigned long cr0;
arun@5186 395
arun@5186 396 __vmread(CR0_READ_SHADOW, &cr0);
arun@5186 397 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
arun@5186 398 }
arun@5186 399
kaf24@5646 400 #define VMX_INVALID_ERROR_CODE -1
kaf24@5646 401
kaf24@5646 402 static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type,
kaf24@5646 403 int error_code)
kaf24@5646 404 {
kaf24@5646 405 unsigned long intr_fields;
kaf24@5646 406
kaf24@5646 407 /* Reflect it back into the guest */
kaf24@5646 408 intr_fields = (INTR_INFO_VALID_MASK | type | trap);
kaf24@5646 409 if (error_code != VMX_INVALID_ERROR_CODE) {
kaf24@5646 410 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
kaf24@5646 411 intr_fields |= INTR_INFO_DELIEVER_CODE_MASK;
kaf24@5646 412 }
kaf24@5646 413
kaf24@5646 414 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
kaf24@5646 415 return 0;
kaf24@5646 416 }
kaf24@5646 417
kaf24@5646 418 static inline int vmx_inject_exception(struct vcpu *v, int trap, int error_code)
kaf24@5646 419 {
kaf24@5646 420 return __vmx_inject_exception(v, trap, INTR_TYPE_EXCEPTION, error_code);
kaf24@5646 421 }
kaf24@5646 422
kaf24@5646 423 static inline int vmx_inject_extint(struct vcpu *v, int trap, int error_code)
kaf24@5646 424 {
kaf24@5646 425 __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code);
kaf24@5646 426 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
kaf24@5646 427
kaf24@5646 428 return 0;
kaf24@5646 429 }
kaf24@5646 430
kaf24@5646 431 static inline int vmx_reflect_exception(struct vcpu *v)
kaf24@5646 432 {
kaf24@5646 433 int error_code, vector;
kaf24@5646 434
kaf24@5646 435 __vmread(VM_EXIT_INTR_INFO, &vector);
kaf24@5646 436 if (vector & INTR_INFO_DELIEVER_CODE_MASK)
kaf24@5646 437 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
kaf24@5646 438 else
kaf24@5646 439 error_code = VMX_INVALID_ERROR_CODE;
kaf24@5646 440 vector &= 0xff;
kaf24@5646 441
kaf24@5646 442 #ifndef NDEBUG
kaf24@5646 443 {
kaf24@5646 444 unsigned long eip;
kaf24@5646 445
kaf24@5646 446 __vmread(GUEST_RIP, &eip);
kaf24@5646 447 VMX_DBG_LOG(DBG_LEVEL_1,
kaf24@5646 448 "vmx_reflect_exception: eip = %lx, error_code = %x",
kaf24@5646 449 eip, error_code);
kaf24@5646 450 }
kaf24@5646 451 #endif /* NDEBUG */
kaf24@5646 452
kaf24@5646 453 vmx_inject_exception(v, vector, error_code);
kaf24@5646 454 return 0;
kaf24@5646 455 }
kaf24@5646 456
arun@5615 457 static inline shared_iopage_t *get_sp(struct domain *d)
arun@5615 458 {
arun@5615 459 return (shared_iopage_t *) d->arch.vmx_platform.shared_page_va;
arun@5615 460 }
arun@5615 461
arun@5608 462 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
arun@5608 463 {
arun@5615 464 return &get_sp(d)->vcpu_iodata[cpu];
arun@5608 465 }
arun@5608 466
arun@5608 467 static inline int iopacket_port(struct domain *d)
arun@5608 468 {
arun@5615 469 return get_sp(d)->sp_global.eport;
arun@5608 470 }
arun@5608 471
kaf24@5821 472 /* Prototypes */
kaf24@5821 473 void load_cpu_user_regs(struct cpu_user_regs *regs);
kaf24@5821 474 void store_cpu_user_regs(struct cpu_user_regs *regs);
kaf24@5821 475
kaf24@6591 476 enum { VMX_COPY_IN = 0, VMX_COPY_OUT };
kaf24@6591 477 int vmx_copy(void *buf, unsigned long laddr, int size, int dir);
kaf24@6591 478
iap10@3290 479 #endif /* __ASM_X86_VMX_H__ */