iap10@3290: /* iap10@3290: * vmx.h: VMX Architecture related definitions iap10@3290: * Copyright (c) 2004, Intel Corporation. iap10@3290: * iap10@3290: * This program is free software; you can redistribute it and/or modify it iap10@3290: * under the terms and conditions of the GNU General Public License, iap10@3290: * version 2, as published by the Free Software Foundation. iap10@3290: * iap10@3290: * This program is distributed in the hope it will be useful, but WITHOUT iap10@3290: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or iap10@3290: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for iap10@3290: * more details. iap10@3290: * iap10@3290: * You should have received a copy of the GNU General Public License along with iap10@3290: * this program; if not, write to the Free Software Foundation, Inc., 59 Temple iap10@3290: * Place - Suite 330, Boston, MA 02111-1307 USA. iap10@3290: * iap10@3290: */ iap10@3290: #ifndef __ASM_X86_VMX_H__ iap10@3290: #define __ASM_X86_VMX_H__ iap10@3290: iap10@3290: #include iap10@3290: #include iap10@3290: #include iap10@3290: #include iap10@3290: #include cl349@4856: #include iap10@3290: arun@5608: #include arun@5608: kaf24@6705: extern int hvm_enabled; kaf24@6705: kaf24@4683: extern void vmx_asm_vmexit_handler(struct cpu_user_regs); iap10@3290: extern void vmx_asm_do_resume(void); iap10@3290: extern void vmx_asm_do_launch(void); kaf24@6326: extern void vmx_intr_assist(void); iap10@3290: kaf24@5289: extern void arch_vmx_do_launch(struct vcpu *); kaf24@5289: extern void arch_vmx_do_resume(struct vcpu *); kaf24@6113: extern void arch_vmx_do_relaunch(struct vcpu *); iap10@3290: iap10@3290: extern int vmcs_size; iap10@3290: extern unsigned int cpu_rev; iap10@3290: iap10@3290: /* iap10@3290: * Need fill bits for SENTER iap10@3290: */ iap10@3290: kaf24@5414: #define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x00000016 kaf24@5414: kaf24@5414: #define MONITOR_PIN_BASED_EXEC_CONTROLS \ kaf24@5775: ( \ kaf24@5414: MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE | \ kaf24@5414: PIN_BASED_EXT_INTR_MASK | \ kaf24@5775: PIN_BASED_NMI_EXITING \ kaf24@5775: ) kaf24@5414: kaf24@5414: #define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x0401e172 iap10@3290: kaf24@5775: #define _MONITOR_CPU_BASED_EXEC_CONTROLS \ kaf24@5775: ( \ kaf24@5414: MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE | \ kaf24@5414: CPU_BASED_HLT_EXITING | \ kaf24@5414: CPU_BASED_INVDPG_EXITING | \ kaf24@5414: CPU_BASED_MWAIT_EXITING | \ kaf24@5414: CPU_BASED_MOV_DR_EXITING | \ kaf24@5836: CPU_BASED_ACTIVATE_IO_BITMAP | \ kaf24@5775: CPU_BASED_UNCOND_IO_EXITING \ kaf24@5775: ) kaf24@5775: kaf24@5775: #define MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \ kaf24@5775: ( \ kaf24@5414: CPU_BASED_CR8_LOAD_EXITING | \ kaf24@5775: CPU_BASED_CR8_STORE_EXITING \ kaf24@5775: ) kaf24@5775: kaf24@5775: #define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE 0x0003edff kaf24@5414: kaf24@5775: #define MONITOR_VM_EXIT_CONTROLS_IA32E_MODE 0x00000200 kaf24@5775: kaf24@5775: #define _MONITOR_VM_EXIT_CONTROLS \ kaf24@5775: ( \ kaf24@5775: MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |\ kaf24@5775: VM_EXIT_ACK_INTR_ON_EXIT \ kaf24@5775: ) kaf24@5414: kaf24@5775: #if defined (__x86_64__) kaf24@5775: #define MONITOR_CPU_BASED_EXEC_CONTROLS \ kaf24@5775: ( \ kaf24@5775: _MONITOR_CPU_BASED_EXEC_CONTROLS | \ kaf24@5775: MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \ kaf24@5775: ) kaf24@5775: #define MONITOR_VM_EXIT_CONTROLS \ kaf24@5775: ( \ kaf24@5775: _MONITOR_VM_EXIT_CONTROLS | \ kaf24@5775: MONITOR_VM_EXIT_CONTROLS_IA32E_MODE \ kaf24@5775: ) kaf24@5775: #else kaf24@5775: #define MONITOR_CPU_BASED_EXEC_CONTROLS \ kaf24@5775: _MONITOR_CPU_BASED_EXEC_CONTROLS kaf24@5414: kaf24@5775: #define MONITOR_VM_EXIT_CONTROLS \ kaf24@5775: _MONITOR_VM_EXIT_CONTROLS kaf24@5775: #endif kaf24@5414: kaf24@5414: #define VM_ENTRY_CONTROLS_RESERVED_VALUE 0x000011ff kaf24@5775: #define VM_ENTRY_CONTROLS_IA32E_MODE 0x00000200 kaf24@5414: #define MONITOR_VM_ENTRY_CONTROLS VM_ENTRY_CONTROLS_RESERVED_VALUE iap10@3290: /* iap10@3290: * Exit Reasons iap10@3290: */ iap10@3290: #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 iap10@3290: iap10@3290: #define EXIT_REASON_EXCEPTION_NMI 0 iap10@3290: #define EXIT_REASON_EXTERNAL_INTERRUPT 1 iap10@3290: iap10@3290: #define EXIT_REASON_PENDING_INTERRUPT 7 iap10@3290: iap10@3290: #define EXIT_REASON_TASK_SWITCH 9 iap10@3290: #define EXIT_REASON_CPUID 10 iap10@3290: #define EXIT_REASON_HLT 12 iap10@3290: #define EXIT_REASON_INVLPG 14 iap10@3290: #define EXIT_REASON_RDPMC 15 iap10@3290: #define EXIT_REASON_RDTSC 16 iap10@3290: #define EXIT_REASON_VMCALL 18 iap10@3290: iap10@3290: #define EXIT_REASON_CR_ACCESS 28 iap10@3290: #define EXIT_REASON_DR_ACCESS 29 iap10@3290: #define EXIT_REASON_IO_INSTRUCTION 30 iap10@3290: #define EXIT_REASON_MSR_READ 31 iap10@3290: #define EXIT_REASON_MSR_WRITE 32 iap10@3290: #define EXIT_REASON_MWAIT_INSTRUCTION 36 iap10@3290: iap10@3290: /* iap10@3290: * Interruption-information format iap10@3290: */ iap10@3290: #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ iap10@3290: #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ iap10@3290: #define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */ iap10@3290: #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ iap10@3290: iap10@3290: #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ iap10@3290: #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ iap10@3290: iap10@3290: /* iap10@3290: * Exit Qualifications for MOV for Control Register Access iap10@3290: */ iap10@3290: #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */ iap10@3290: #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ iap10@3290: #define TYPE_MOV_TO_CR (0 << 4) iap10@3290: #define TYPE_MOV_FROM_CR (1 << 4) iap10@3290: #define TYPE_CLTS (2 << 4) kaf24@6730: #define TYPE_LMSW (3 << 4) kaf24@5414: #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */ kaf24@6730: #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */ iap10@3290: #define REG_EAX (0 << 8) iap10@3290: #define REG_ECX (1 << 8) iap10@3290: #define REG_EDX (2 << 8) iap10@3290: #define REG_EBX (3 << 8) iap10@3290: #define REG_ESP (4 << 8) iap10@3290: #define REG_EBP (5 << 8) iap10@3290: #define REG_ESI (6 << 8) iap10@3290: #define REG_EDI (7 << 8) kaf24@5414: #define REG_R8 (8 << 8) kaf24@5414: #define REG_R9 (9 << 8) kaf24@5414: #define REG_R10 (10 << 8) kaf24@5414: #define REG_R11 (11 << 8) kaf24@5414: #define REG_R12 (12 << 8) kaf24@5414: #define REG_R13 (13 << 8) kaf24@5414: #define REG_R14 (14 << 8) kaf24@5414: #define REG_R15 (15 << 8) iap10@3290: iap10@3290: /* iap10@3290: * Exit Qualifications for MOV for Debug Register Access iap10@3290: */ iap10@3290: #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */ iap10@3290: #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ iap10@3290: #define TYPE_MOV_TO_DR (0 << 4) iap10@3290: #define TYPE_MOV_FROM_DR (1 << 4) kaf24@5414: #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */ iap10@3290: iap10@3290: #define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */ iap10@3290: #define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */ iap10@3290: #define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */ iap10@3290: #define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */ iap10@3290: #define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */ iap10@3290: #define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */ iap10@3290: #define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */ iap10@3290: #define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */ iap10@3290: #define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */ iap10@3290: /* reserved */ iap10@3290: #define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */ iap10@3290: #define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */ iap10@3290: #define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */ iap10@3290: #define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */ iap10@3290: #define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */ iap10@3290: #define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */ iap10@3290: #define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */ iap10@3290: #define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */ iap10@3290: #define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */ iap10@3290: arun@4999: /* Pending Debug exceptions */ arun@4999: arun@4999: #define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */ arun@4999: #define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */ arun@4999: iap10@3290: #ifdef XEN_DEBUGGER iap10@3290: #define MONITOR_DEFAULT_EXCEPTION_BITMAP \ iap10@3290: ( EXCEPTION_BITMAP_PG | \ iap10@3290: EXCEPTION_BITMAP_DB | \ iap10@3290: EXCEPTION_BITMAP_BP | \ iap10@3290: EXCEPTION_BITMAP_GP ) iap10@3290: #else iap10@3290: #define MONITOR_DEFAULT_EXCEPTION_BITMAP \ iap10@3290: ( EXCEPTION_BITMAP_PG | \ iap10@3290: EXCEPTION_BITMAP_GP ) iap10@3290: #endif iap10@3290: kaf24@5774: /* These bits in the CR4 are owned by the host */ kaf24@5774: #ifdef __i386__ kaf24@5774: #define VMX_CR4_HOST_MASK (X86_CR4_VMXE) kaf24@5774: #else kaf24@5774: #define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE) kaf24@5774: #endif kaf24@5774: iap10@3290: #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n" iap10@3290: #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */ iap10@3290: #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n" iap10@3290: #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */ iap10@3290: #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */ iap10@3290: #define VMREAD_OPCODE ".byte 0x0f,0x78\n" iap10@3290: #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n" iap10@3290: #define VMWRITE_OPCODE ".byte 0x0f,0x79\n" iap10@3290: #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n" iap10@3290: #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n" iap10@3290: iap10@3290: #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */ iap10@3290: #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */ iap10@3290: #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */ iap10@3290: iap10@3290: static inline int __vmptrld (u64 addr) iap10@3290: { iap10@3290: unsigned long eflags; iap10@3290: __asm__ __volatile__ ( VMPTRLD_OPCODE iap10@3290: MODRM_EAX_06 iap10@3290: : iap10@3290: : "a" (&addr) iap10@3290: : "memory"); iap10@3290: iap10@3290: __save_flags(eflags); iap10@3290: if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) iap10@3290: return -1; iap10@3290: return 0; iap10@3290: } iap10@3290: iap10@3290: static inline void __vmptrst (u64 addr) iap10@3290: { iap10@3290: __asm__ __volatile__ ( VMPTRST_OPCODE iap10@3290: MODRM_EAX_07 iap10@3290: : iap10@3290: : "a" (&addr) iap10@3290: : "memory"); iap10@3290: } iap10@3290: iap10@3290: static inline int __vmpclear (u64 addr) iap10@3290: { iap10@3290: unsigned long eflags; iap10@3290: iap10@3290: __asm__ __volatile__ ( VMCLEAR_OPCODE iap10@3290: MODRM_EAX_06 iap10@3290: : iap10@3290: : "a" (&addr) iap10@3290: : "memory"); iap10@3290: __save_flags(eflags); iap10@3290: if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) iap10@3290: return -1; iap10@3290: return 0; iap10@3290: } iap10@3290: lcy@6539: #define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr))) lcy@6539: lcy@6539: static always_inline int ___vmread (const unsigned long field, void *ptr, const int size) iap10@3290: { iap10@3290: unsigned long eflags; iap10@3290: unsigned long ecx = 0; iap10@3290: iap10@3290: __asm__ __volatile__ ( VMREAD_OPCODE iap10@3290: MODRM_EAX_ECX iap10@3290: : "=c" (ecx) iap10@3290: : "a" (field) iap10@3290: : "memory"); iap10@3290: lcy@6539: switch (size) { lcy@6539: case 1: lcy@6539: *((u8 *) (ptr)) = ecx; lcy@6539: break; lcy@6539: case 2: lcy@6539: *((u16 *) (ptr)) = ecx; lcy@6539: break; lcy@6539: case 4: lcy@6539: *((u32 *) (ptr)) = ecx; lcy@6539: break; lcy@6539: case 8: lcy@6539: *((u64 *) (ptr)) = ecx; lcy@6539: break; lcy@6539: default: lcy@6539: domain_crash_synchronous(); lcy@6539: break; lcy@6539: } iap10@3290: iap10@3290: __save_flags(eflags); iap10@3290: if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) iap10@3290: return -1; iap10@3290: return 0; iap10@3290: } iap10@3290: arun@4586: static inline int __vmwrite (unsigned long field, unsigned long value) iap10@3290: { iap10@3290: unsigned long eflags; iap10@3290: iap10@3290: __asm__ __volatile__ ( VMWRITE_OPCODE iap10@3290: MODRM_EAX_ECX iap10@3290: : iap10@3290: : "a" (field) , "c" (value) iap10@3290: : "memory"); iap10@3290: __save_flags(eflags); iap10@3290: if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) iap10@3290: return -1; iap10@3290: return 0; iap10@3290: } iap10@3290: arun@4999: static inline int __vm_set_bit(unsigned long field, unsigned long mask) arun@4999: { arun@4999: unsigned long tmp; arun@4999: int err = 0; arun@4999: arun@4999: err |= __vmread(field, &tmp); arun@4999: tmp |= mask; arun@4999: err |= __vmwrite(field, tmp); arun@4999: arun@4999: return err; arun@4999: } arun@4999: arun@4999: static inline int __vm_clear_bit(unsigned long field, unsigned long mask) arun@4999: { arun@4999: unsigned long tmp; arun@4999: int err = 0; arun@4999: arun@4999: err |= __vmread(field, &tmp); arun@4999: tmp &= ~mask; arun@4999: err |= __vmwrite(field, tmp); arun@4999: arun@4999: return err; arun@4999: } arun@4999: iap10@3290: static inline void __vmxoff (void) iap10@3290: { iap10@3290: __asm__ __volatile__ ( VMXOFF_OPCODE iap10@3290: ::: "memory"); iap10@3290: } iap10@3290: iap10@3290: static inline int __vmxon (u64 addr) iap10@3290: { iap10@3290: unsigned long eflags; iap10@3290: iap10@3290: __asm__ __volatile__ ( VMXON_OPCODE iap10@3290: MODRM_EAX_06 iap10@3290: : iap10@3290: : "a" (&addr) iap10@3290: : "memory"); iap10@3290: __save_flags(eflags); iap10@3290: if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) iap10@3290: return -1; iap10@3290: return 0; iap10@3290: } arun@3910: cl349@4856: /* Make sure that xen intercepts any FP accesses from current */ kaf24@6326: static inline void vmx_stts(void) cl349@4856: { cl349@4856: unsigned long cr0; cl349@4856: cl349@4856: __vmread(GUEST_CR0, &cr0); cl349@4856: if (!(cr0 & X86_CR0_TS)) cl349@4856: __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS); cl349@4856: cl349@4856: __vmread(CR0_READ_SHADOW, &cr0); cl349@4856: if (!(cr0 & X86_CR0_TS)) arun@4999: __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); cl349@4856: } arun@5186: arun@5186: /* Works only for ed == current */ kaf24@5289: static inline int vmx_paging_enabled(struct vcpu *v) arun@5186: { arun@5186: unsigned long cr0; arun@5186: arun@5186: __vmread(CR0_READ_SHADOW, &cr0); arun@5186: return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); arun@5186: } arun@5186: kaf24@5646: #define VMX_INVALID_ERROR_CODE -1 kaf24@5646: kaf24@5646: static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type, kaf24@5646: int error_code) kaf24@5646: { kaf24@5646: unsigned long intr_fields; kaf24@5646: kaf24@5646: /* Reflect it back into the guest */ kaf24@5646: intr_fields = (INTR_INFO_VALID_MASK | type | trap); kaf24@5646: if (error_code != VMX_INVALID_ERROR_CODE) { kaf24@5646: __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); kaf24@5646: intr_fields |= INTR_INFO_DELIEVER_CODE_MASK; kaf24@5646: } kaf24@5646: kaf24@5646: __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields); kaf24@5646: return 0; kaf24@5646: } kaf24@5646: kaf24@5646: static inline int vmx_inject_exception(struct vcpu *v, int trap, int error_code) kaf24@5646: { kaf24@5646: return __vmx_inject_exception(v, trap, INTR_TYPE_EXCEPTION, error_code); kaf24@5646: } kaf24@5646: kaf24@5646: static inline int vmx_inject_extint(struct vcpu *v, int trap, int error_code) kaf24@5646: { kaf24@5646: __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code); kaf24@5646: __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); kaf24@5646: kaf24@5646: return 0; kaf24@5646: } kaf24@5646: kaf24@5646: static inline int vmx_reflect_exception(struct vcpu *v) kaf24@5646: { kaf24@5646: int error_code, vector; kaf24@5646: kaf24@5646: __vmread(VM_EXIT_INTR_INFO, &vector); kaf24@5646: if (vector & INTR_INFO_DELIEVER_CODE_MASK) kaf24@5646: __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code); kaf24@5646: else kaf24@5646: error_code = VMX_INVALID_ERROR_CODE; kaf24@5646: vector &= 0xff; kaf24@5646: kaf24@5646: #ifndef NDEBUG kaf24@5646: { kaf24@5646: unsigned long eip; kaf24@5646: kaf24@5646: __vmread(GUEST_RIP, &eip); kaf24@5646: VMX_DBG_LOG(DBG_LEVEL_1, kaf24@5646: "vmx_reflect_exception: eip = %lx, error_code = %x", kaf24@5646: eip, error_code); kaf24@5646: } kaf24@5646: #endif /* NDEBUG */ kaf24@5646: kaf24@5646: vmx_inject_exception(v, vector, error_code); kaf24@5646: return 0; kaf24@5646: } kaf24@5646: arun@5615: static inline shared_iopage_t *get_sp(struct domain *d) arun@5615: { arun@5615: return (shared_iopage_t *) d->arch.vmx_platform.shared_page_va; arun@5615: } arun@5615: arun@5608: static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu) arun@5608: { arun@5615: return &get_sp(d)->vcpu_iodata[cpu]; arun@5608: } arun@5608: arun@5608: static inline int iopacket_port(struct domain *d) arun@5608: { arun@5615: return get_sp(d)->sp_global.eport; arun@5608: } arun@5608: kaf24@5821: /* Prototypes */ kaf24@5821: void load_cpu_user_regs(struct cpu_user_regs *regs); kaf24@5821: void store_cpu_user_regs(struct cpu_user_regs *regs); kaf24@5821: kaf24@6591: enum { VMX_COPY_IN = 0, VMX_COPY_OUT }; kaf24@6591: int vmx_copy(void *buf, unsigned long laddr, int size, int dir); kaf24@6591: iap10@3290: #endif /* __ASM_X86_VMX_H__ */