xen-vtx-unstable

annotate xen/include/asm-x86/vmx.h @ 4999:9ad448e1dadd

bitkeeper revision 1.1459 (428c4c45ObcqPNCq2Ebb3M0y4qnxog)

[PATCH] vmx-gdbserver-sstep.patch

This patch enables single stepping a VMX domain using the gdbserver.

Signed-Off-By: Leendert van Doorn <leendert@watson.ibm.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author arun.sharma@intel.com[kaf24]
date Thu May 19 08:20:21 2005 +0000 (2005-05-19)
parents d16ae85cb89e
children a825a76d6b0f
rev   line source
iap10@3290 1 /*
iap10@3290 2 * vmx.h: VMX Architecture related definitions
iap10@3290 3 * Copyright (c) 2004, Intel Corporation.
iap10@3290 4 *
iap10@3290 5 * This program is free software; you can redistribute it and/or modify it
iap10@3290 6 * under the terms and conditions of the GNU General Public License,
iap10@3290 7 * version 2, as published by the Free Software Foundation.
iap10@3290 8 *
iap10@3290 9 * This program is distributed in the hope it will be useful, but WITHOUT
iap10@3290 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
iap10@3290 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
iap10@3290 12 * more details.
iap10@3290 13 *
iap10@3290 14 * You should have received a copy of the GNU General Public License along with
iap10@3290 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
iap10@3290 16 * Place - Suite 330, Boston, MA 02111-1307 USA.
iap10@3290 17 *
iap10@3290 18 */
iap10@3290 19 #ifndef __ASM_X86_VMX_H__
iap10@3290 20 #define __ASM_X86_VMX_H__
iap10@3290 21
iap10@3290 22 #include <xen/sched.h>
iap10@3290 23 #include <asm/types.h>
iap10@3290 24 #include <asm/regs.h>
iap10@3290 25 #include <asm/processor.h>
iap10@3290 26 #include <asm/vmx_vmcs.h>
cl349@4856 27 #include <asm/i387.h>
iap10@3290 28
kaf24@4683 29 extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
iap10@3290 30 extern void vmx_asm_do_resume(void);
iap10@3290 31 extern void vmx_asm_do_launch(void);
iap10@3290 32 extern void vmx_intr_assist(struct exec_domain *d);
iap10@3290 33
iap10@3290 34 extern void arch_vmx_do_launch(struct exec_domain *);
iap10@3290 35 extern void arch_vmx_do_resume(struct exec_domain *);
iap10@3290 36
iap10@3290 37 extern int vmcs_size;
iap10@3290 38 extern unsigned int cpu_rev;
iap10@3290 39
iap10@3290 40 /*
iap10@3290 41 * Need fill bits for SENTER
iap10@3290 42 */
iap10@3290 43
iap10@3290 44 #define MONITOR_PIN_BASED_EXEC_CONTROLS 0x0000001f
iap10@3290 45 #define MONITOR_CPU_BASED_EXEC_CONTROLS 0x0581e7f2
iap10@3290 46 #define MONITOR_VM_EXIT_CONTROLS 0x0003edff
iap10@3290 47 #define MONITOR_VM_ENTRY_CONTROLS 0x000011ff
iap10@3290 48
iap10@3290 49 /*
iap10@3290 50 * Exit Reasons
iap10@3290 51 */
iap10@3290 52 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
iap10@3290 53
iap10@3290 54 #define EXIT_REASON_EXCEPTION_NMI 0
iap10@3290 55 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
iap10@3290 56
iap10@3290 57 #define EXIT_REASON_PENDING_INTERRUPT 7
iap10@3290 58
iap10@3290 59 #define EXIT_REASON_TASK_SWITCH 9
iap10@3290 60 #define EXIT_REASON_CPUID 10
iap10@3290 61 #define EXIT_REASON_HLT 12
iap10@3290 62 #define EXIT_REASON_INVLPG 14
iap10@3290 63 #define EXIT_REASON_RDPMC 15
iap10@3290 64 #define EXIT_REASON_RDTSC 16
iap10@3290 65 #define EXIT_REASON_VMCALL 18
iap10@3290 66
iap10@3290 67 #define EXIT_REASON_CR_ACCESS 28
iap10@3290 68 #define EXIT_REASON_DR_ACCESS 29
iap10@3290 69 #define EXIT_REASON_IO_INSTRUCTION 30
iap10@3290 70 #define EXIT_REASON_MSR_READ 31
iap10@3290 71 #define EXIT_REASON_MSR_WRITE 32
iap10@3290 72 #define EXIT_REASON_MWAIT_INSTRUCTION 36
iap10@3290 73
iap10@3290 74 /*
iap10@3290 75 * Interruption-information format
iap10@3290 76 */
iap10@3290 77 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
iap10@3290 78 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
iap10@3290 79 #define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */
iap10@3290 80 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
iap10@3290 81
iap10@3290 82 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
iap10@3290 83 #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
iap10@3290 84
iap10@3290 85 /*
iap10@3290 86 * Exit Qualifications for MOV for Control Register Access
iap10@3290 87 */
iap10@3290 88 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */
iap10@3290 89 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
iap10@3290 90 #define TYPE_MOV_TO_CR (0 << 4)
iap10@3290 91 #define TYPE_MOV_FROM_CR (1 << 4)
iap10@3290 92 #define TYPE_CLTS (2 << 4)
leendert@4652 93 #define TYPE_LMSW (3 << 4)
iap10@3290 94 #define CONTROL_REG_ACCESS_REG 0x700 /* 10:8, general purpose register */
iap10@3290 95 #define REG_EAX (0 << 8)
iap10@3290 96 #define REG_ECX (1 << 8)
iap10@3290 97 #define REG_EDX (2 << 8)
iap10@3290 98 #define REG_EBX (3 << 8)
iap10@3290 99 #define REG_ESP (4 << 8)
iap10@3290 100 #define REG_EBP (5 << 8)
iap10@3290 101 #define REG_ESI (6 << 8)
iap10@3290 102 #define REG_EDI (7 << 8)
leendert@4652 103 #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
iap10@3290 104
iap10@3290 105 /*
iap10@3290 106 * Exit Qualifications for MOV for Debug Register Access
iap10@3290 107 */
iap10@3290 108 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
iap10@3290 109 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
iap10@3290 110 #define TYPE_MOV_TO_DR (0 << 4)
iap10@3290 111 #define TYPE_MOV_FROM_DR (1 << 4)
iap10@3290 112 #define DEBUG_REG_ACCESS_REG 0x700 /* 11:8, general purpose register */
iap10@3290 113
iap10@3290 114 #define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */
iap10@3290 115 #define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */
iap10@3290 116 #define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */
iap10@3290 117 #define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */
iap10@3290 118 #define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */
iap10@3290 119 #define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */
iap10@3290 120 #define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */
iap10@3290 121 #define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */
iap10@3290 122 #define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */
iap10@3290 123 /* reserved */
iap10@3290 124 #define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */
iap10@3290 125 #define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */
iap10@3290 126 #define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */
iap10@3290 127 #define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */
iap10@3290 128 #define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */
iap10@3290 129 #define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */
iap10@3290 130 #define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */
iap10@3290 131 #define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */
iap10@3290 132 #define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */
iap10@3290 133
arun@4999 134 /* Pending Debug exceptions */
arun@4999 135
arun@4999 136 #define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */
arun@4999 137 #define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */
arun@4999 138
iap10@3290 139 #ifdef XEN_DEBUGGER
iap10@3290 140 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
iap10@3290 141 ( EXCEPTION_BITMAP_PG | \
iap10@3290 142 EXCEPTION_BITMAP_DB | \
iap10@3290 143 EXCEPTION_BITMAP_BP | \
iap10@3290 144 EXCEPTION_BITMAP_GP )
iap10@3290 145 #else
iap10@3290 146 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
iap10@3290 147 ( EXCEPTION_BITMAP_PG | \
iap10@3290 148 EXCEPTION_BITMAP_GP )
iap10@3290 149 #endif
iap10@3290 150
iap10@3290 151 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
iap10@3290 152 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
iap10@3290 153 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
iap10@3290 154 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */
iap10@3290 155 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */
iap10@3290 156 #define VMREAD_OPCODE ".byte 0x0f,0x78\n"
iap10@3290 157 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
iap10@3290 158 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n"
iap10@3290 159 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n"
iap10@3290 160 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n"
iap10@3290 161
iap10@3290 162 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
iap10@3290 163 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
iap10@3290 164 #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */
iap10@3290 165
iap10@3290 166 static inline int __vmptrld (u64 addr)
iap10@3290 167 {
iap10@3290 168 unsigned long eflags;
iap10@3290 169 __asm__ __volatile__ ( VMPTRLD_OPCODE
iap10@3290 170 MODRM_EAX_06
iap10@3290 171 :
iap10@3290 172 : "a" (&addr)
iap10@3290 173 : "memory");
iap10@3290 174
iap10@3290 175 __save_flags(eflags);
iap10@3290 176 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 177 return -1;
iap10@3290 178 return 0;
iap10@3290 179 }
iap10@3290 180
iap10@3290 181 static inline void __vmptrst (u64 addr)
iap10@3290 182 {
iap10@3290 183 __asm__ __volatile__ ( VMPTRST_OPCODE
iap10@3290 184 MODRM_EAX_07
iap10@3290 185 :
iap10@3290 186 : "a" (&addr)
iap10@3290 187 : "memory");
iap10@3290 188 }
iap10@3290 189
iap10@3290 190 static inline int __vmpclear (u64 addr)
iap10@3290 191 {
iap10@3290 192 unsigned long eflags;
iap10@3290 193
iap10@3290 194 __asm__ __volatile__ ( VMCLEAR_OPCODE
iap10@3290 195 MODRM_EAX_06
iap10@3290 196 :
iap10@3290 197 : "a" (&addr)
iap10@3290 198 : "memory");
iap10@3290 199 __save_flags(eflags);
iap10@3290 200 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 201 return -1;
iap10@3290 202 return 0;
iap10@3290 203 }
iap10@3290 204
arun@4586 205 static inline int __vmread (unsigned long field, void *value)
iap10@3290 206 {
iap10@3290 207 unsigned long eflags;
iap10@3290 208 unsigned long ecx = 0;
iap10@3290 209
iap10@3290 210 __asm__ __volatile__ ( VMREAD_OPCODE
iap10@3290 211 MODRM_EAX_ECX
iap10@3290 212 : "=c" (ecx)
iap10@3290 213 : "a" (field)
iap10@3290 214 : "memory");
iap10@3290 215
iap10@3290 216 *((long *) value) = ecx;
iap10@3290 217
iap10@3290 218 __save_flags(eflags);
iap10@3290 219 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 220 return -1;
iap10@3290 221 return 0;
iap10@3290 222 }
iap10@3290 223
arun@4586 224 static inline int __vmwrite (unsigned long field, unsigned long value)
iap10@3290 225 {
iap10@3290 226 unsigned long eflags;
iap10@3290 227
iap10@3290 228 __asm__ __volatile__ ( VMWRITE_OPCODE
iap10@3290 229 MODRM_EAX_ECX
iap10@3290 230 :
iap10@3290 231 : "a" (field) , "c" (value)
iap10@3290 232 : "memory");
iap10@3290 233 __save_flags(eflags);
iap10@3290 234 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 235 return -1;
iap10@3290 236 return 0;
iap10@3290 237 }
iap10@3290 238
arun@4999 239 static inline int __vm_set_bit(unsigned long field, unsigned long mask)
arun@4999 240 {
arun@4999 241 unsigned long tmp;
arun@4999 242 int err = 0;
arun@4999 243
arun@4999 244 err |= __vmread(field, &tmp);
arun@4999 245 tmp |= mask;
arun@4999 246 err |= __vmwrite(field, tmp);
arun@4999 247
arun@4999 248 return err;
arun@4999 249 }
arun@4999 250
arun@4999 251 static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
arun@4999 252 {
arun@4999 253 unsigned long tmp;
arun@4999 254 int err = 0;
arun@4999 255
arun@4999 256 err |= __vmread(field, &tmp);
arun@4999 257 tmp &= ~mask;
arun@4999 258 err |= __vmwrite(field, tmp);
arun@4999 259
arun@4999 260 return err;
arun@4999 261 }
arun@4999 262
iap10@3290 263 static inline void __vmxoff (void)
iap10@3290 264 {
iap10@3290 265 __asm__ __volatile__ ( VMXOFF_OPCODE
iap10@3290 266 ::: "memory");
iap10@3290 267 }
iap10@3290 268
iap10@3290 269 static inline int __vmxon (u64 addr)
iap10@3290 270 {
iap10@3290 271 unsigned long eflags;
iap10@3290 272
iap10@3290 273 __asm__ __volatile__ ( VMXON_OPCODE
iap10@3290 274 MODRM_EAX_06
iap10@3290 275 :
iap10@3290 276 : "a" (&addr)
iap10@3290 277 : "memory");
iap10@3290 278 __save_flags(eflags);
iap10@3290 279 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
iap10@3290 280 return -1;
iap10@3290 281 return 0;
iap10@3290 282 }
arun@3910 283
cl349@4856 284 /* Make sure that xen intercepts any FP accesses from current */
cl349@4856 285 static inline void vmx_stts()
cl349@4856 286 {
cl349@4856 287 unsigned long cr0;
cl349@4856 288
cl349@4856 289 __vmread(GUEST_CR0, &cr0);
cl349@4856 290 if (!(cr0 & X86_CR0_TS))
cl349@4856 291 __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
cl349@4856 292
cl349@4856 293 __vmread(CR0_READ_SHADOW, &cr0);
cl349@4856 294 if (!(cr0 & X86_CR0_TS))
arun@4999 295 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
cl349@4856 296 }
cl349@4856 297
iap10@3290 298 #endif /* __ASM_X86_VMX_H__ */