debuggers.hg

view xen/include/asm-x86/hvm/vmx/vmx.h @ 16987:0d70e01c0012

vmx realmode: Emulate MSR accesses.
Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jan 31 09:33:26 2008 +0000 (2008-01-31)
parents 5a3448506d9c
children 3f1cf03826fe
line source
1 /*
2 * vmx.h: VMX Architecture related definitions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
19 #ifndef __ASM_X86_HVM_VMX_VMX_H__
20 #define __ASM_X86_HVM_VMX_VMX_H__
22 #include <xen/sched.h>
23 #include <asm/types.h>
24 #include <asm/regs.h>
25 #include <asm/processor.h>
26 #include <asm/hvm/vmx/vmcs.h>
27 #include <asm/i387.h>
28 #include <asm/hvm/trace.h>
30 void vmx_asm_vmexit_handler(struct cpu_user_regs);
31 void vmx_asm_do_vmentry(void);
32 void vmx_intr_assist(void);
33 void vmx_do_resume(struct vcpu *);
34 void set_guest_time(struct vcpu *v, u64 gtime);
35 void vmx_vlapic_msr_changed(struct vcpu *v);
36 void vmx_do_no_device_fault(void);
37 void vmx_cpuid_intercept(
38 unsigned int *eax, unsigned int *ebx,
39 unsigned int *ecx, unsigned int *edx);
40 int vmx_msr_read_intercept(struct cpu_user_regs *regs);
41 int vmx_msr_write_intercept(struct cpu_user_regs *regs);
42 void vmx_wbinvd_intercept(void);
43 void vmx_realmode(struct cpu_user_regs *regs);
44 int vmx_realmode_io_complete(void);
46 /*
47 * Exit Reasons
48 */
49 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
51 #define EXIT_REASON_EXCEPTION_NMI 0
52 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
53 #define EXIT_REASON_TRIPLE_FAULT 2
54 #define EXIT_REASON_INIT 3
55 #define EXIT_REASON_SIPI 4
56 #define EXIT_REASON_IO_SMI 5
57 #define EXIT_REASON_OTHER_SMI 6
58 #define EXIT_REASON_PENDING_VIRT_INTR 7
59 #define EXIT_REASON_PENDING_VIRT_NMI 8
60 #define EXIT_REASON_TASK_SWITCH 9
61 #define EXIT_REASON_CPUID 10
62 #define EXIT_REASON_HLT 12
63 #define EXIT_REASON_INVD 13
64 #define EXIT_REASON_INVLPG 14
65 #define EXIT_REASON_RDPMC 15
66 #define EXIT_REASON_RDTSC 16
67 #define EXIT_REASON_RSM 17
68 #define EXIT_REASON_VMCALL 18
69 #define EXIT_REASON_VMCLEAR 19
70 #define EXIT_REASON_VMLAUNCH 20
71 #define EXIT_REASON_VMPTRLD 21
72 #define EXIT_REASON_VMPTRST 22
73 #define EXIT_REASON_VMREAD 23
74 #define EXIT_REASON_VMRESUME 24
75 #define EXIT_REASON_VMWRITE 25
76 #define EXIT_REASON_VMXOFF 26
77 #define EXIT_REASON_VMXON 27
78 #define EXIT_REASON_CR_ACCESS 28
79 #define EXIT_REASON_DR_ACCESS 29
80 #define EXIT_REASON_IO_INSTRUCTION 30
81 #define EXIT_REASON_MSR_READ 31
82 #define EXIT_REASON_MSR_WRITE 32
83 #define EXIT_REASON_INVALID_GUEST_STATE 33
84 #define EXIT_REASON_MSR_LOADING 34
85 #define EXIT_REASON_MWAIT_INSTRUCTION 36
86 #define EXIT_REASON_MONITOR_INSTRUCTION 39
87 #define EXIT_REASON_PAUSE_INSTRUCTION 40
88 #define EXIT_REASON_MACHINE_CHECK 41
89 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
90 #define EXIT_REASON_APIC_ACCESS 44
91 #define EXIT_REASON_WBINVD 54
93 /*
94 * Interruption-information format
95 */
96 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
97 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
98 #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
99 #define INTR_INFO_NMI_UNBLOCKED_BY_IRET 0x1000 /* 12 */
100 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
101 #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
103 /*
104 * Exit Qualifications for MOV for Control Register Access
105 */
106 #define CONTROL_REG_ACCESS_NUM 0xf /* 3:0, number of control register */
107 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
108 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
109 #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
110 #define REG_EAX (0 << 8)
111 #define REG_ECX (1 << 8)
112 #define REG_EDX (2 << 8)
113 #define REG_EBX (3 << 8)
114 #define REG_ESP (4 << 8)
115 #define REG_EBP (5 << 8)
116 #define REG_ESI (6 << 8)
117 #define REG_EDI (7 << 8)
118 #define REG_R8 (8 << 8)
119 #define REG_R9 (9 << 8)
120 #define REG_R10 (10 << 8)
121 #define REG_R11 (11 << 8)
122 #define REG_R12 (12 << 8)
123 #define REG_R13 (13 << 8)
124 #define REG_R14 (14 << 8)
125 #define REG_R15 (15 << 8)
127 /*
128 * Exit Qualifications for MOV for Debug Register Access
129 */
130 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
131 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
132 #define TYPE_MOV_TO_DR (0 << 4)
133 #define TYPE_MOV_FROM_DR (1 << 4)
134 #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
136 /*
137 * Access Rights
138 */
139 #define X86_SEG_AR_SEG_TYPE 0xf /* 3:0, segment type */
140 #define X86_SEG_AR_DESC_TYPE (1u << 4) /* 4, descriptor type */
141 #define X86_SEG_AR_DPL 0x60 /* 6:5, descriptor privilege level */
142 #define X86_SEG_AR_SEG_PRESENT (1u << 7) /* 7, segment present */
143 #define X86_SEG_AR_AVL (1u << 12) /* 12, available for system software */
144 #define X86_SEG_AR_CS_LM_ACTIVE (1u << 13) /* 13, long mode active (CS only) */
145 #define X86_SEG_AR_DEF_OP_SIZE (1u << 14) /* 14, default operation size */
146 #define X86_SEG_AR_GRANULARITY (1u << 15) /* 15, granularity */
147 #define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */
149 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
150 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
151 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
152 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */
153 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */
154 #define VMREAD_OPCODE ".byte 0x0f,0x78\n"
155 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
156 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n"
157 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n"
158 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n"
160 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
161 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
162 #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */
164 static inline void __vmptrld(u64 addr)
165 {
166 __asm__ __volatile__ ( VMPTRLD_OPCODE
167 MODRM_EAX_06
168 /* CF==1 or ZF==1 --> crash (ud2) */
169 "ja 1f ; ud2 ; 1:\n"
170 :
171 : "a" (&addr)
172 : "memory");
173 }
175 static inline void __vmptrst(u64 addr)
176 {
177 __asm__ __volatile__ ( VMPTRST_OPCODE
178 MODRM_EAX_07
179 :
180 : "a" (&addr)
181 : "memory");
182 }
184 static inline void __vmpclear(u64 addr)
185 {
186 __asm__ __volatile__ ( VMCLEAR_OPCODE
187 MODRM_EAX_06
188 /* CF==1 or ZF==1 --> crash (ud2) */
189 "ja 1f ; ud2 ; 1:\n"
190 :
191 : "a" (&addr)
192 : "memory");
193 }
195 static inline unsigned long __vmread(unsigned long field)
196 {
197 unsigned long ecx;
199 __asm__ __volatile__ ( VMREAD_OPCODE
200 MODRM_EAX_ECX
201 /* CF==1 or ZF==1 --> crash (ud2) */
202 "ja 1f ; ud2 ; 1:\n"
203 : "=c" (ecx)
204 : "a" (field)
205 : "memory");
207 return ecx;
208 }
210 static inline void __vmwrite(unsigned long field, unsigned long value)
211 {
212 __asm__ __volatile__ ( VMWRITE_OPCODE
213 MODRM_EAX_ECX
214 /* CF==1 or ZF==1 --> crash (ud2) */
215 "ja 1f ; ud2 ; 1:\n"
216 :
217 : "a" (field) , "c" (value)
218 : "memory");
219 }
221 static inline unsigned long __vmread_safe(unsigned long field, int *error)
222 {
223 unsigned long ecx;
225 __asm__ __volatile__ ( VMREAD_OPCODE
226 MODRM_EAX_ECX
227 /* CF==1 or ZF==1 --> rc = -1 */
228 "setna %b0 ; neg %0"
229 : "=q" (*error), "=c" (ecx)
230 : "0" (0), "a" (field)
231 : "memory");
233 return ecx;
234 }
236 static inline void __vm_set_bit(unsigned long field, unsigned int bit)
237 {
238 __vmwrite(field, __vmread(field) | (1UL << bit));
239 }
241 static inline void __vm_clear_bit(unsigned long field, unsigned int bit)
242 {
243 __vmwrite(field, __vmread(field) & ~(1UL << bit));
244 }
246 static inline void __vmxoff(void)
247 {
248 asm volatile (
249 VMXOFF_OPCODE
250 : : : "memory" );
251 }
253 static inline int __vmxon(u64 addr)
254 {
255 int rc;
257 asm volatile (
258 "1: " VMXON_OPCODE MODRM_EAX_06 "\n"
259 " setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */
260 "2:\n"
261 ".section .fixup,\"ax\"\n"
262 "3: not %0 ; jmp 2b\n" /* #UD --> rc = -1 */
263 ".previous\n"
264 ".section __ex_table,\"a\"\n"
265 " "__FIXUP_ALIGN"\n"
266 " "__FIXUP_WORD" 1b,3b\n"
267 ".previous\n"
268 : "=q" (rc)
269 : "0" (0), "a" (&addr)
270 : "memory");
272 return rc;
273 }
275 static inline void __vmx_inject_exception(
276 struct vcpu *v, int trap, int type, int error_code)
277 {
278 unsigned long intr_fields;
280 /*
281 * NB. Callers do not need to worry about clearing STI/MOV-SS blocking:
282 * "If the VM entry is injecting, there is no blocking by STI or by
283 * MOV SS following the VM entry, regardless of the contents of the
284 * interruptibility-state field [in the guest-state area before the
285 * VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State).
286 */
288 intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap);
289 if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) {
290 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
291 intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
292 }
294 __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
296 if ( trap == TRAP_page_fault )
297 HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
298 else
299 HVMTRACE_2D(INJ_EXC, v, trap, error_code);
300 }
302 static inline void vmx_inject_hw_exception(
303 struct vcpu *v, int trap, int error_code)
304 {
305 __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
306 }
308 static inline void vmx_inject_extint(struct vcpu *v, int trap)
309 {
310 __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR,
311 HVM_DELIVER_NO_ERROR_CODE);
312 }
314 static inline void vmx_inject_nmi(struct vcpu *v)
315 {
316 __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI,
317 HVM_DELIVER_NO_ERROR_CODE);
318 }
320 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */