xen-vtx-unstable
annotate xen/include/asm-x86/vmx.h @ 5774:71d000e59b13
Cleanup mov to CR4 handling.
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Jul 14 08:00:35 2005 +0000 (2005-07-14) |
parents | 82390e707bb9 |
children | 9b77ba29108d |
rev | line source |
---|---|
iap10@3290 | 1 /* |
iap10@3290 | 2 * vmx.h: VMX Architecture related definitions |
iap10@3290 | 3 * Copyright (c) 2004, Intel Corporation. |
iap10@3290 | 4 * |
iap10@3290 | 5 * This program is free software; you can redistribute it and/or modify it |
iap10@3290 | 6 * under the terms and conditions of the GNU General Public License, |
iap10@3290 | 7 * version 2, as published by the Free Software Foundation. |
iap10@3290 | 8 * |
iap10@3290 | 9 * This program is distributed in the hope it will be useful, but WITHOUT |
iap10@3290 | 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
iap10@3290 | 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
iap10@3290 | 12 * more details. |
iap10@3290 | 13 * |
iap10@3290 | 14 * You should have received a copy of the GNU General Public License along with |
iap10@3290 | 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
iap10@3290 | 16 * Place - Suite 330, Boston, MA 02111-1307 USA. |
iap10@3290 | 17 * |
iap10@3290 | 18 */ |
iap10@3290 | 19 #ifndef __ASM_X86_VMX_H__ |
iap10@3290 | 20 #define __ASM_X86_VMX_H__ |
iap10@3290 | 21 |
iap10@3290 | 22 #include <xen/sched.h> |
iap10@3290 | 23 #include <asm/types.h> |
iap10@3290 | 24 #include <asm/regs.h> |
iap10@3290 | 25 #include <asm/processor.h> |
iap10@3290 | 26 #include <asm/vmx_vmcs.h> |
cl349@4856 | 27 #include <asm/i387.h> |
iap10@3290 | 28 |
arun@5608 | 29 #include <public/io/ioreq.h> |
arun@5608 | 30 |
kaf24@4683 | 31 extern void vmx_asm_vmexit_handler(struct cpu_user_regs); |
iap10@3290 | 32 extern void vmx_asm_do_resume(void); |
iap10@3290 | 33 extern void vmx_asm_do_launch(void); |
kaf24@5289 | 34 extern void vmx_intr_assist(struct vcpu *d); |
iap10@3290 | 35 |
kaf24@5289 | 36 extern void arch_vmx_do_launch(struct vcpu *); |
kaf24@5289 | 37 extern void arch_vmx_do_resume(struct vcpu *); |
iap10@3290 | 38 |
iap10@3290 | 39 extern int vmcs_size; |
iap10@3290 | 40 extern unsigned int cpu_rev; |
iap10@3290 | 41 |
iap10@3290 | 42 /* |
iap10@3290 | 43 * Need fill bits for SENTER |
iap10@3290 | 44 */ |
iap10@3290 | 45 |
kaf24@5414 | 46 #define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x00000016 |
kaf24@5414 | 47 |
kaf24@5414 | 48 #define MONITOR_PIN_BASED_EXEC_CONTROLS \ |
kaf24@5414 | 49 MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE | \ |
kaf24@5414 | 50 PIN_BASED_EXT_INTR_MASK | \ |
kaf24@5414 | 51 PIN_BASED_NMI_EXITING |
kaf24@5414 | 52 |
kaf24@5414 | 53 #define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x0401e172 |
iap10@3290 | 54 |
kaf24@5414 | 55 #define MONITOR_CPU_BASED_EXEC_CONTROLS \ |
kaf24@5414 | 56 MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE | \ |
kaf24@5414 | 57 CPU_BASED_HLT_EXITING | \ |
kaf24@5414 | 58 CPU_BASED_INVDPG_EXITING | \ |
kaf24@5414 | 59 CPU_BASED_MWAIT_EXITING | \ |
kaf24@5414 | 60 CPU_BASED_MOV_DR_EXITING | \ |
kaf24@5414 | 61 CPU_BASED_UNCOND_IO_EXITING | \ |
kaf24@5414 | 62 CPU_BASED_CR8_LOAD_EXITING | \ |
kaf24@5414 | 63 CPU_BASED_CR8_STORE_EXITING |
kaf24@5414 | 64 |
kaf24@5414 | 65 #define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE 0x0003edff |
kaf24@5414 | 66 |
kaf24@5414 | 67 #define VM_EXIT_CONTROLS_IA_32E_MODE 0x00000200 |
kaf24@5414 | 68 |
kaf24@5414 | 69 #define MONITOR_VM_EXIT_CONTROLS \ |
kaf24@5414 | 70 MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |\ |
kaf24@5414 | 71 VM_EXIT_ACK_INTR_ON_EXIT |
kaf24@5414 | 72 |
kaf24@5414 | 73 #define VM_ENTRY_CONTROLS_RESERVED_VALUE 0x000011ff |
kaf24@5414 | 74 #define VM_ENTRY_CONTROLS_IA_32E_MODE 0x00000200 |
kaf24@5414 | 75 #define MONITOR_VM_ENTRY_CONTROLS VM_ENTRY_CONTROLS_RESERVED_VALUE |
iap10@3290 | 76 /* |
iap10@3290 | 77 * Exit Reasons |
iap10@3290 | 78 */ |
iap10@3290 | 79 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 |
iap10@3290 | 80 |
iap10@3290 | 81 #define EXIT_REASON_EXCEPTION_NMI 0 |
iap10@3290 | 82 #define EXIT_REASON_EXTERNAL_INTERRUPT 1 |
iap10@3290 | 83 |
iap10@3290 | 84 #define EXIT_REASON_PENDING_INTERRUPT 7 |
iap10@3290 | 85 |
iap10@3290 | 86 #define EXIT_REASON_TASK_SWITCH 9 |
iap10@3290 | 87 #define EXIT_REASON_CPUID 10 |
iap10@3290 | 88 #define EXIT_REASON_HLT 12 |
iap10@3290 | 89 #define EXIT_REASON_INVLPG 14 |
iap10@3290 | 90 #define EXIT_REASON_RDPMC 15 |
iap10@3290 | 91 #define EXIT_REASON_RDTSC 16 |
iap10@3290 | 92 #define EXIT_REASON_VMCALL 18 |
iap10@3290 | 93 |
iap10@3290 | 94 #define EXIT_REASON_CR_ACCESS 28 |
iap10@3290 | 95 #define EXIT_REASON_DR_ACCESS 29 |
iap10@3290 | 96 #define EXIT_REASON_IO_INSTRUCTION 30 |
iap10@3290 | 97 #define EXIT_REASON_MSR_READ 31 |
iap10@3290 | 98 #define EXIT_REASON_MSR_WRITE 32 |
iap10@3290 | 99 #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
iap10@3290 | 100 |
iap10@3290 | 101 /* |
iap10@3290 | 102 * Interruption-information format |
iap10@3290 | 103 */ |
iap10@3290 | 104 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ |
iap10@3290 | 105 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ |
iap10@3290 | 106 #define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */ |
iap10@3290 | 107 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ |
iap10@3290 | 108 |
iap10@3290 | 109 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ |
iap10@3290 | 110 #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ |
iap10@3290 | 111 |
iap10@3290 | 112 /* |
iap10@3290 | 113 * Exit Qualifications for MOV for Control Register Access |
iap10@3290 | 114 */ |
iap10@3290 | 115 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */ |
iap10@3290 | 116 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ |
iap10@3290 | 117 #define TYPE_MOV_TO_CR (0 << 4) |
iap10@3290 | 118 #define TYPE_MOV_FROM_CR (1 << 4) |
iap10@3290 | 119 #define TYPE_CLTS (2 << 4) |
leendert@4652 | 120 #define TYPE_LMSW (3 << 4) |
kaf24@5414 | 121 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */ |
kaf24@5414 | 122 #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */ |
iap10@3290 | 123 #define REG_EAX (0 << 8) |
iap10@3290 | 124 #define REG_ECX (1 << 8) |
iap10@3290 | 125 #define REG_EDX (2 << 8) |
iap10@3290 | 126 #define REG_EBX (3 << 8) |
iap10@3290 | 127 #define REG_ESP (4 << 8) |
iap10@3290 | 128 #define REG_EBP (5 << 8) |
iap10@3290 | 129 #define REG_ESI (6 << 8) |
iap10@3290 | 130 #define REG_EDI (7 << 8) |
kaf24@5414 | 131 #define REG_R8 (8 << 8) |
kaf24@5414 | 132 #define REG_R9 (9 << 8) |
kaf24@5414 | 133 #define REG_R10 (10 << 8) |
kaf24@5414 | 134 #define REG_R11 (11 << 8) |
kaf24@5414 | 135 #define REG_R12 (12 << 8) |
kaf24@5414 | 136 #define REG_R13 (13 << 8) |
kaf24@5414 | 137 #define REG_R14 (14 << 8) |
kaf24@5414 | 138 #define REG_R15 (15 << 8) |
iap10@3290 | 139 |
iap10@3290 | 140 /* |
iap10@3290 | 141 * Exit Qualifications for MOV for Debug Register Access |
iap10@3290 | 142 */ |
iap10@3290 | 143 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */ |
iap10@3290 | 144 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ |
iap10@3290 | 145 #define TYPE_MOV_TO_DR (0 << 4) |
iap10@3290 | 146 #define TYPE_MOV_FROM_DR (1 << 4) |
kaf24@5414 | 147 #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */ |
iap10@3290 | 148 |
iap10@3290 | 149 #define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */ |
iap10@3290 | 150 #define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */ |
iap10@3290 | 151 #define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */ |
iap10@3290 | 152 #define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */ |
iap10@3290 | 153 #define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */ |
iap10@3290 | 154 #define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */ |
iap10@3290 | 155 #define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */ |
iap10@3290 | 156 #define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */ |
iap10@3290 | 157 #define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */ |
iap10@3290 | 158 /* reserved */ |
iap10@3290 | 159 #define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */ |
iap10@3290 | 160 #define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */ |
iap10@3290 | 161 #define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */ |
iap10@3290 | 162 #define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */ |
iap10@3290 | 163 #define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */ |
iap10@3290 | 164 #define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */ |
iap10@3290 | 165 #define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */ |
iap10@3290 | 166 #define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */ |
iap10@3290 | 167 #define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */ |
iap10@3290 | 168 |
arun@4999 | 169 /* Pending Debug exceptions */ |
arun@4999 | 170 |
arun@4999 | 171 #define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */ |
arun@4999 | 172 #define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */ |
arun@4999 | 173 |
iap10@3290 | 174 #ifdef XEN_DEBUGGER |
iap10@3290 | 175 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \ |
iap10@3290 | 176 ( EXCEPTION_BITMAP_PG | \ |
iap10@3290 | 177 EXCEPTION_BITMAP_DB | \ |
iap10@3290 | 178 EXCEPTION_BITMAP_BP | \ |
iap10@3290 | 179 EXCEPTION_BITMAP_GP ) |
iap10@3290 | 180 #else |
iap10@3290 | 181 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \ |
iap10@3290 | 182 ( EXCEPTION_BITMAP_PG | \ |
iap10@3290 | 183 EXCEPTION_BITMAP_GP ) |
iap10@3290 | 184 #endif |
iap10@3290 | 185 |
kaf24@5774 | 186 /* These bits in the CR4 are owned by the host */ |
kaf24@5774 | 187 #ifdef __i386__ |
kaf24@5774 | 188 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE) |
kaf24@5774 | 189 #else |
kaf24@5774 | 190 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE) |
kaf24@5774 | 191 #endif |
kaf24@5774 | 192 |
iap10@3290 | 193 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n" |
iap10@3290 | 194 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */ |
iap10@3290 | 195 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n" |
iap10@3290 | 196 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */ |
iap10@3290 | 197 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */ |
iap10@3290 | 198 #define VMREAD_OPCODE ".byte 0x0f,0x78\n" |
iap10@3290 | 199 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n" |
iap10@3290 | 200 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n" |
iap10@3290 | 201 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n" |
iap10@3290 | 202 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n" |
iap10@3290 | 203 |
iap10@3290 | 204 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */ |
iap10@3290 | 205 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */ |
iap10@3290 | 206 #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */ |
iap10@3290 | 207 |
iap10@3290 | 208 static inline int __vmptrld (u64 addr) |
iap10@3290 | 209 { |
iap10@3290 | 210 unsigned long eflags; |
iap10@3290 | 211 __asm__ __volatile__ ( VMPTRLD_OPCODE |
iap10@3290 | 212 MODRM_EAX_06 |
iap10@3290 | 213 : |
iap10@3290 | 214 : "a" (&addr) |
iap10@3290 | 215 : "memory"); |
iap10@3290 | 216 |
iap10@3290 | 217 __save_flags(eflags); |
iap10@3290 | 218 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) |
iap10@3290 | 219 return -1; |
iap10@3290 | 220 return 0; |
iap10@3290 | 221 } |
iap10@3290 | 222 |
iap10@3290 | 223 static inline void __vmptrst (u64 addr) |
iap10@3290 | 224 { |
iap10@3290 | 225 __asm__ __volatile__ ( VMPTRST_OPCODE |
iap10@3290 | 226 MODRM_EAX_07 |
iap10@3290 | 227 : |
iap10@3290 | 228 : "a" (&addr) |
iap10@3290 | 229 : "memory"); |
iap10@3290 | 230 } |
iap10@3290 | 231 |
iap10@3290 | 232 static inline int __vmpclear (u64 addr) |
iap10@3290 | 233 { |
iap10@3290 | 234 unsigned long eflags; |
iap10@3290 | 235 |
iap10@3290 | 236 __asm__ __volatile__ ( VMCLEAR_OPCODE |
iap10@3290 | 237 MODRM_EAX_06 |
iap10@3290 | 238 : |
iap10@3290 | 239 : "a" (&addr) |
iap10@3290 | 240 : "memory"); |
iap10@3290 | 241 __save_flags(eflags); |
iap10@3290 | 242 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) |
iap10@3290 | 243 return -1; |
iap10@3290 | 244 return 0; |
iap10@3290 | 245 } |
iap10@3290 | 246 |
arun@4586 | 247 static inline int __vmread (unsigned long field, void *value) |
iap10@3290 | 248 { |
iap10@3290 | 249 unsigned long eflags; |
iap10@3290 | 250 unsigned long ecx = 0; |
iap10@3290 | 251 |
iap10@3290 | 252 __asm__ __volatile__ ( VMREAD_OPCODE |
iap10@3290 | 253 MODRM_EAX_ECX |
iap10@3290 | 254 : "=c" (ecx) |
iap10@3290 | 255 : "a" (field) |
iap10@3290 | 256 : "memory"); |
iap10@3290 | 257 |
iap10@3290 | 258 *((long *) value) = ecx; |
iap10@3290 | 259 |
iap10@3290 | 260 __save_flags(eflags); |
iap10@3290 | 261 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) |
iap10@3290 | 262 return -1; |
iap10@3290 | 263 return 0; |
iap10@3290 | 264 } |
iap10@3290 | 265 |
arun@4586 | 266 static inline int __vmwrite (unsigned long field, unsigned long value) |
iap10@3290 | 267 { |
iap10@3290 | 268 unsigned long eflags; |
iap10@3290 | 269 |
iap10@3290 | 270 __asm__ __volatile__ ( VMWRITE_OPCODE |
iap10@3290 | 271 MODRM_EAX_ECX |
iap10@3290 | 272 : |
iap10@3290 | 273 : "a" (field) , "c" (value) |
iap10@3290 | 274 : "memory"); |
iap10@3290 | 275 __save_flags(eflags); |
iap10@3290 | 276 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) |
iap10@3290 | 277 return -1; |
iap10@3290 | 278 return 0; |
iap10@3290 | 279 } |
iap10@3290 | 280 |
arun@4999 | 281 static inline int __vm_set_bit(unsigned long field, unsigned long mask) |
arun@4999 | 282 { |
arun@4999 | 283 unsigned long tmp; |
arun@4999 | 284 int err = 0; |
arun@4999 | 285 |
arun@4999 | 286 err |= __vmread(field, &tmp); |
arun@4999 | 287 tmp |= mask; |
arun@4999 | 288 err |= __vmwrite(field, tmp); |
arun@4999 | 289 |
arun@4999 | 290 return err; |
arun@4999 | 291 } |
arun@4999 | 292 |
arun@4999 | 293 static inline int __vm_clear_bit(unsigned long field, unsigned long mask) |
arun@4999 | 294 { |
arun@4999 | 295 unsigned long tmp; |
arun@4999 | 296 int err = 0; |
arun@4999 | 297 |
arun@4999 | 298 err |= __vmread(field, &tmp); |
arun@4999 | 299 tmp &= ~mask; |
arun@4999 | 300 err |= __vmwrite(field, tmp); |
arun@4999 | 301 |
arun@4999 | 302 return err; |
arun@4999 | 303 } |
arun@4999 | 304 |
iap10@3290 | 305 static inline void __vmxoff (void) |
iap10@3290 | 306 { |
iap10@3290 | 307 __asm__ __volatile__ ( VMXOFF_OPCODE |
iap10@3290 | 308 ::: "memory"); |
iap10@3290 | 309 } |
iap10@3290 | 310 |
iap10@3290 | 311 static inline int __vmxon (u64 addr) |
iap10@3290 | 312 { |
iap10@3290 | 313 unsigned long eflags; |
iap10@3290 | 314 |
iap10@3290 | 315 __asm__ __volatile__ ( VMXON_OPCODE |
iap10@3290 | 316 MODRM_EAX_06 |
iap10@3290 | 317 : |
iap10@3290 | 318 : "a" (&addr) |
iap10@3290 | 319 : "memory"); |
iap10@3290 | 320 __save_flags(eflags); |
iap10@3290 | 321 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) |
iap10@3290 | 322 return -1; |
iap10@3290 | 323 return 0; |
iap10@3290 | 324 } |
arun@3910 | 325 |
cl349@4856 | 326 /* Make sure that xen intercepts any FP accesses from current */ |
cl349@4856 | 327 static inline void vmx_stts() |
cl349@4856 | 328 { |
cl349@4856 | 329 unsigned long cr0; |
cl349@4856 | 330 |
cl349@4856 | 331 __vmread(GUEST_CR0, &cr0); |
cl349@4856 | 332 if (!(cr0 & X86_CR0_TS)) |
cl349@4856 | 333 __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS); |
cl349@4856 | 334 |
cl349@4856 | 335 __vmread(CR0_READ_SHADOW, &cr0); |
cl349@4856 | 336 if (!(cr0 & X86_CR0_TS)) |
arun@4999 | 337 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); |
cl349@4856 | 338 } |
arun@5186 | 339 |
arun@5186 | 340 /* Works only for ed == current */ |
kaf24@5289 | 341 static inline int vmx_paging_enabled(struct vcpu *v) |
arun@5186 | 342 { |
arun@5186 | 343 unsigned long cr0; |
arun@5186 | 344 |
arun@5186 | 345 __vmread(CR0_READ_SHADOW, &cr0); |
arun@5186 | 346 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); |
arun@5186 | 347 } |
arun@5186 | 348 |
kaf24@5646 | 349 #define VMX_INVALID_ERROR_CODE -1 |
kaf24@5646 | 350 |
kaf24@5646 | 351 static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type, |
kaf24@5646 | 352 int error_code) |
kaf24@5646 | 353 { |
kaf24@5646 | 354 unsigned long intr_fields; |
kaf24@5646 | 355 |
kaf24@5646 | 356 /* Reflect it back into the guest */ |
kaf24@5646 | 357 intr_fields = (INTR_INFO_VALID_MASK | type | trap); |
kaf24@5646 | 358 if (error_code != VMX_INVALID_ERROR_CODE) { |
kaf24@5646 | 359 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); |
kaf24@5646 | 360 intr_fields |= INTR_INFO_DELIEVER_CODE_MASK; |
kaf24@5646 | 361 } |
kaf24@5646 | 362 |
kaf24@5646 | 363 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields); |
kaf24@5646 | 364 return 0; |
kaf24@5646 | 365 } |
kaf24@5646 | 366 |
kaf24@5646 | 367 static inline int vmx_inject_exception(struct vcpu *v, int trap, int error_code) |
kaf24@5646 | 368 { |
kaf24@5646 | 369 return __vmx_inject_exception(v, trap, INTR_TYPE_EXCEPTION, error_code); |
kaf24@5646 | 370 } |
kaf24@5646 | 371 |
kaf24@5646 | 372 static inline int vmx_inject_extint(struct vcpu *v, int trap, int error_code) |
kaf24@5646 | 373 { |
kaf24@5646 | 374 __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code); |
kaf24@5646 | 375 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); |
kaf24@5646 | 376 |
kaf24@5646 | 377 return 0; |
kaf24@5646 | 378 } |
kaf24@5646 | 379 |
kaf24@5646 | 380 static inline int vmx_reflect_exception(struct vcpu *v) |
kaf24@5646 | 381 { |
kaf24@5646 | 382 int error_code, vector; |
kaf24@5646 | 383 |
kaf24@5646 | 384 __vmread(VM_EXIT_INTR_INFO, &vector); |
kaf24@5646 | 385 if (vector & INTR_INFO_DELIEVER_CODE_MASK) |
kaf24@5646 | 386 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code); |
kaf24@5646 | 387 else |
kaf24@5646 | 388 error_code = VMX_INVALID_ERROR_CODE; |
kaf24@5646 | 389 vector &= 0xff; |
kaf24@5646 | 390 |
kaf24@5646 | 391 #ifndef NDEBUG |
kaf24@5646 | 392 { |
kaf24@5646 | 393 unsigned long eip; |
kaf24@5646 | 394 |
kaf24@5646 | 395 __vmread(GUEST_RIP, &eip); |
kaf24@5646 | 396 VMX_DBG_LOG(DBG_LEVEL_1, |
kaf24@5646 | 397 "vmx_reflect_exception: eip = %lx, error_code = %x", |
kaf24@5646 | 398 eip, error_code); |
kaf24@5646 | 399 } |
kaf24@5646 | 400 #endif /* NDEBUG */ |
kaf24@5646 | 401 |
kaf24@5646 | 402 vmx_inject_exception(v, vector, error_code); |
kaf24@5646 | 403 return 0; |
kaf24@5646 | 404 } |
kaf24@5646 | 405 |
arun@5615 | 406 static inline shared_iopage_t *get_sp(struct domain *d) |
arun@5615 | 407 { |
arun@5615 | 408 return (shared_iopage_t *) d->arch.vmx_platform.shared_page_va; |
arun@5615 | 409 } |
arun@5615 | 410 |
arun@5608 | 411 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu) |
arun@5608 | 412 { |
arun@5615 | 413 return &get_sp(d)->vcpu_iodata[cpu]; |
arun@5608 | 414 } |
arun@5608 | 415 |
arun@5608 | 416 static inline int iopacket_port(struct domain *d) |
arun@5608 | 417 { |
arun@5615 | 418 return get_sp(d)->sp_global.eport; |
arun@5608 | 419 } |
arun@5608 | 420 |
iap10@3290 | 421 #endif /* __ASM_X86_VMX_H__ */ |