debuggers.hg

view xen/include/asm-x86/processor.h @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents e8acb9753ff1
children
line source
2 /* Portions are: Copyright (c) 1994 Linus Torvalds */
4 #ifndef __ASM_X86_PROCESSOR_H
5 #define __ASM_X86_PROCESSOR_H
7 #ifndef __ASSEMBLY__
8 #include <xen/config.h>
9 #include <xen/cache.h>
10 #include <xen/types.h>
11 #include <xen/smp.h>
12 #include <xen/percpu.h>
13 #include <public/xen.h>
14 #include <asm/types.h>
15 #include <asm/cpufeature.h>
16 #include <asm/desc.h>
17 #endif
19 /*
20 * CPU vendor IDs
21 */
22 #define X86_VENDOR_INTEL 0
23 #define X86_VENDOR_CYRIX 1
24 #define X86_VENDOR_AMD 2
25 #define X86_VENDOR_UMC 3
26 #define X86_VENDOR_NEXGEN 4
27 #define X86_VENDOR_CENTAUR 5
28 #define X86_VENDOR_RISE 6
29 #define X86_VENDOR_TRANSMETA 7
30 #define X86_VENDOR_NSC 8
31 #define X86_VENDOR_NUM 9
32 #define X86_VENDOR_UNKNOWN 0xff
34 /*
35 * EFLAGS bits
36 */
37 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
38 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
39 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
40 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
41 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
42 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
43 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
44 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
45 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
46 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
47 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
48 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
49 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
50 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
51 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
52 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
53 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
55 /*
56 * Intel CPU flags in CR0
57 */
58 #define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
59 #define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */
60 #define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */
61 #define X86_CR0_TS 0x00000008 /* Task Switched (RW) */
62 #define X86_CR0_ET 0x00000010 /* Extension type (RO) */
63 #define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */
64 #define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */
65 #define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */
66 #define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */
67 #define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
68 #define X86_CR0_PG 0x80000000 /* Paging (RW) */
70 /*
71 * Intel CPU features in CR4
72 */
73 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
74 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
75 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
76 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
77 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
78 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
79 #define X86_CR4_MCE 0x0040 /* Machine check enable */
80 #define X86_CR4_PGE 0x0080 /* enable global pages */
81 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
82 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
83 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
84 #define X86_CR4_VMXE 0x2000 /* enable VMX */
85 #define X86_CR4_SMXE 0x4000 /* enable SMX */
86 #define X86_CR4_FSGSBASE 0x10000 /* enable {rd,wr}{fs,gs}base */
87 #define X86_CR4_OSXSAVE 0x40000 /* enable XSAVE/XRSTOR */
89 /*
90 * Trap/fault mnemonics.
91 */
92 #define TRAP_divide_error 0
93 #define TRAP_debug 1
94 #define TRAP_nmi 2
95 #define TRAP_int3 3
96 #define TRAP_overflow 4
97 #define TRAP_bounds 5
98 #define TRAP_invalid_op 6
99 #define TRAP_no_device 7
100 #define TRAP_double_fault 8
101 #define TRAP_copro_seg 9
102 #define TRAP_invalid_tss 10
103 #define TRAP_no_segment 11
104 #define TRAP_stack_error 12
105 #define TRAP_gp_fault 13
106 #define TRAP_page_fault 14
107 #define TRAP_spurious_int 15
108 #define TRAP_copro_error 16
109 #define TRAP_alignment_check 17
110 #define TRAP_machine_check 18
111 #define TRAP_simd_error 19
113 /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
114 /* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
115 #define TRAP_syscall 256
117 /* Boolean return code: the reason for a fault has been fixed. */
118 #define EXCRET_fault_fixed 1
120 /* 'trap_bounce' flags values */
121 #define TBF_EXCEPTION 1
122 #define TBF_EXCEPTION_ERRCODE 2
123 #define TBF_INTERRUPT 8
124 #define TBF_FAILSAFE 16
126 /* 'arch_vcpu' flags values */
127 #define _TF_kernel_mode 0
128 #define TF_kernel_mode (1<<_TF_kernel_mode)
130 /* #PF error code values. */
131 #define PFEC_page_present (1U<<0)
132 #define PFEC_write_access (1U<<1)
133 #define PFEC_user_mode (1U<<2)
134 #define PFEC_reserved_bit (1U<<3)
135 #define PFEC_insn_fetch (1U<<4)
136 #define PFEC_page_paged (1U<<5)
137 #define PFEC_page_shared (1U<<6)
139 #ifndef __ASSEMBLY__
141 struct domain;
142 struct vcpu;
144 /*
145 * Default implementation of macro that returns current
146 * instruction pointer ("program counter").
147 */
148 #ifdef __x86_64__
149 #define current_text_addr() ({ \
150 void *pc; \
151 asm ( "leaq 1f(%%rip),%0\n1:" : "=r" (pc) ); \
152 pc; \
153 })
154 #else
155 #define current_text_addr() ({ \
156 void *pc; \
157 asm ( "movl $1f,%0\n1:" : "=g" (pc) ); \
158 pc; \
159 })
160 #endif
162 struct cpuinfo_x86 {
163 __u8 x86; /* CPU family */
164 __u8 x86_vendor; /* CPU vendor */
165 __u8 x86_model;
166 __u8 x86_mask;
167 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
168 unsigned int x86_capability[NCAPINTS];
169 char x86_vendor_id[16];
170 char x86_model_id[64];
171 int x86_cache_size; /* in KB - valid for CPUS which support this call */
172 int x86_cache_alignment; /* In bytes */
173 int x86_power;
174 __u32 x86_max_cores; /* cpuid returned max cores value */
175 __u32 booted_cores; /* number of cores as seen by OS */
176 __u32 x86_num_siblings; /* cpuid logical cpus per chip value */
177 __u32 apicid;
178 unsigned short x86_clflush_size;
179 } __cacheline_aligned;
181 /*
182 * capabilities of CPUs
183 */
185 extern struct cpuinfo_x86 boot_cpu_data;
187 #ifdef CONFIG_SMP
188 extern struct cpuinfo_x86 cpu_data[];
189 #define current_cpu_data cpu_data[smp_processor_id()]
190 #else
191 #define cpu_data (&boot_cpu_data)
192 #define current_cpu_data boot_cpu_data
193 #endif
195 extern u64 host_pat;
196 extern int phys_proc_id[NR_CPUS];
197 extern int cpu_core_id[NR_CPUS];
198 extern bool_t opt_cpu_info;
200 /* Maximum width of physical addresses supported by the hardware */
201 extern unsigned int paddr_bits;
203 extern void identify_cpu(struct cpuinfo_x86 *);
204 extern void setup_clear_cpu_cap(unsigned int);
205 extern void print_cpu_info(unsigned int cpu);
206 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
207 extern void dodgy_tsc(void);
209 extern void detect_extended_topology(struct cpuinfo_x86 *c);
211 #ifdef CONFIG_X86_HT
212 extern void detect_ht(struct cpuinfo_x86 *c);
213 #else
214 static always_inline void detect_ht(struct cpuinfo_x86 *c) {}
215 #endif
217 #define cpu_to_core(_cpu) (cpu_core_id[_cpu])
218 #define cpu_to_socket(_cpu) (phys_proc_id[_cpu])
220 /*
221 * Generic CPUID function
222 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
223 * resulting in stale register contents being returned.
224 */
225 #define cpuid(_op,_eax,_ebx,_ecx,_edx) \
226 asm ( "cpuid" \
227 : "=a" (*(int *)(_eax)), \
228 "=b" (*(int *)(_ebx)), \
229 "=c" (*(int *)(_ecx)), \
230 "=d" (*(int *)(_edx)) \
231 : "0" (_op), "2" (0) )
233 /* Some CPUID calls want 'count' to be placed in ecx */
234 static inline void cpuid_count(
235 int op,
236 int count,
237 unsigned int *eax,
238 unsigned int *ebx,
239 unsigned int *ecx,
240 unsigned int *edx)
241 {
242 asm ( "cpuid"
243 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
244 : "0" (op), "c" (count) );
245 }
247 /*
248 * CPUID functions returning a single datum
249 */
250 static always_inline unsigned int cpuid_eax(unsigned int op)
251 {
252 unsigned int eax;
254 asm ( "cpuid"
255 : "=a" (eax)
256 : "0" (op)
257 : "bx", "cx", "dx" );
258 return eax;
259 }
261 static always_inline unsigned int cpuid_ebx(unsigned int op)
262 {
263 unsigned int eax, ebx;
265 asm ( "cpuid"
266 : "=a" (eax), "=b" (ebx)
267 : "0" (op)
268 : "cx", "dx" );
269 return ebx;
270 }
272 static always_inline unsigned int cpuid_ecx(unsigned int op)
273 {
274 unsigned int eax, ecx;
276 asm ( "cpuid"
277 : "=a" (eax), "=c" (ecx)
278 : "0" (op)
279 : "bx", "dx" );
280 return ecx;
281 }
283 static always_inline unsigned int cpuid_edx(unsigned int op)
284 {
285 unsigned int eax, edx;
287 asm ( "cpuid"
288 : "=a" (eax), "=d" (edx)
289 : "0" (op)
290 : "bx", "cx" );
291 return edx;
292 }
294 static inline unsigned long read_cr0(void)
295 {
296 unsigned long cr0;
297 asm volatile ( "mov %%cr0,%0\n\t" : "=r" (cr0) );
298 return cr0;
299 }
301 static inline void write_cr0(unsigned long val)
302 {
303 asm volatile ( "mov %0,%%cr0" : : "r" ((unsigned long)val) );
304 }
306 static inline unsigned long read_cr2(void)
307 {
308 unsigned long cr2;
309 asm volatile ( "mov %%cr2,%0\n\t" : "=r" (cr2) );
310 return cr2;
311 }
313 DECLARE_PER_CPU(unsigned long, cr4);
315 static inline unsigned long read_cr4(void)
316 {
317 return this_cpu(cr4);
318 }
320 static inline void write_cr4(unsigned long val)
321 {
322 this_cpu(cr4) = val;
323 asm volatile ( "mov %0,%%cr4" : : "r" (val) );
324 }
326 /* Clear and set 'TS' bit respectively */
327 static inline void clts(void)
328 {
329 asm volatile ( "clts" );
330 }
332 static inline void stts(void)
333 {
334 write_cr0(X86_CR0_TS|read_cr0());
335 }
337 /*
338 * Save the cr4 feature set we're using (ie
339 * Pentium 4MB enable and PPro Global page
340 * enable), so that any CPU's that boot up
341 * after us can get the correct flags.
342 */
343 extern unsigned long mmu_cr4_features;
345 static always_inline void set_in_cr4 (unsigned long mask)
346 {
347 mmu_cr4_features |= mask;
348 write_cr4(read_cr4() | mask);
349 }
351 static always_inline void clear_in_cr4 (unsigned long mask)
352 {
353 mmu_cr4_features &= ~mask;
354 write_cr4(read_cr4() & ~mask);
355 }
357 /*
358 * NSC/Cyrix CPU configuration register indexes
359 */
361 #define CX86_PCR0 0x20
362 #define CX86_GCR 0xb8
363 #define CX86_CCR0 0xc0
364 #define CX86_CCR1 0xc1
365 #define CX86_CCR2 0xc2
366 #define CX86_CCR3 0xc3
367 #define CX86_CCR4 0xe8
368 #define CX86_CCR5 0xe9
369 #define CX86_CCR6 0xea
370 #define CX86_CCR7 0xeb
371 #define CX86_PCR1 0xf0
372 #define CX86_DIR0 0xfe
373 #define CX86_DIR1 0xff
374 #define CX86_ARR_BASE 0xc4
375 #define CX86_RCR_BASE 0xdc
377 /*
378 * NSC/Cyrix CPU indexed register access macros
379 */
381 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
383 #define setCx86(reg, data) do { \
384 outb((reg), 0x22); \
385 outb((data), 0x23); \
386 } while (0)
388 /* Stop speculative execution */
389 static inline void sync_core(void)
390 {
391 int tmp;
392 asm volatile (
393 "cpuid"
394 : "=a" (tmp)
395 : "0" (1)
396 : "ebx","ecx","edx","memory" );
397 }
399 static always_inline void __monitor(const void *eax, unsigned long ecx,
400 unsigned long edx)
401 {
402 /* "monitor %eax,%ecx,%edx;" */
403 asm volatile (
404 ".byte 0x0f,0x01,0xc8;"
405 : : "a" (eax), "c" (ecx), "d"(edx) );
406 }
408 static always_inline void __mwait(unsigned long eax, unsigned long ecx)
409 {
410 /* "mwait %eax,%ecx;" */
411 asm volatile (
412 ".byte 0x0f,0x01,0xc9;"
413 : : "a" (eax), "c" (ecx) );
414 }
416 #define IOBMP_BYTES 8192
417 #define IOBMP_INVALID_OFFSET 0x8000
419 struct tss_struct {
420 unsigned short back_link,__blh;
421 #ifdef __x86_64__
422 union { u64 rsp0, esp0; };
423 union { u64 rsp1, esp1; };
424 union { u64 rsp2, esp2; };
425 u64 reserved1;
426 u64 ist[7];
427 u64 reserved2;
428 u16 reserved3;
429 #else
430 u32 esp0;
431 u16 ss0,__ss0h;
432 u32 esp1;
433 u16 ss1,__ss1h;
434 u32 esp2;
435 u16 ss2,__ss2h;
436 u32 __cr3;
437 u32 eip;
438 u32 eflags;
439 u32 eax,ecx,edx,ebx;
440 u32 esp;
441 u32 ebp;
442 u32 esi;
443 u32 edi;
444 u16 es, __esh;
445 u16 cs, __csh;
446 u16 ss, __ssh;
447 u16 ds, __dsh;
448 u16 fs, __fsh;
449 u16 gs, __gsh;
450 u16 ldt, __ldth;
451 u16 trace;
452 #endif
453 u16 bitmap;
454 /* Pads the TSS to be cacheline-aligned (total size is 0x80). */
455 u8 __cacheline_filler[24];
456 } __cacheline_aligned __attribute__((packed));
458 #ifdef __x86_64__
459 # define IST_DF 1UL
460 # define IST_NMI 2UL
461 # define IST_MCE 3UL
462 # define IST_MAX 3UL
463 #endif
465 #define IDT_ENTRIES 256
466 extern idt_entry_t idt_table[];
467 extern idt_entry_t *idt_tables[];
469 DECLARE_PER_CPU(struct tss_struct, init_tss);
471 extern void init_int80_direct_trap(struct vcpu *v);
473 #if defined(CONFIG_X86_32)
475 #define set_int80_direct_trap(_ed) \
476 (memcpy(idt_tables[(_ed)->processor] + 0x80, \
477 &((_ed)->arch.int80_desc), 8))
479 #else
481 #define set_int80_direct_trap(_ed) ((void)0)
483 #endif
485 extern int gpf_emulate_4gb(struct cpu_user_regs *regs);
487 extern void write_ptbase(struct vcpu *v);
489 void destroy_gdt(struct vcpu *d);
490 long set_gdt(struct vcpu *d,
491 unsigned long *frames,
492 unsigned int entries);
494 #define write_debugreg(reg, val) do { \
495 unsigned long __val = val; \
496 asm volatile ( "mov %0,%%db" #reg : : "r" (__val) ); \
497 } while (0)
498 #define read_debugreg(reg) ({ \
499 unsigned long __val; \
500 asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) ); \
501 __val; \
502 })
503 long set_debugreg(struct vcpu *p, int reg, unsigned long value);
505 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
506 static always_inline void rep_nop(void)
507 {
508 asm volatile ( "rep;nop" : : : "memory" );
509 }
511 #define cpu_relax() rep_nop()
513 /* Prefetch instructions for Pentium III and AMD Athlon */
514 #ifdef CONFIG_MPENTIUMIII
516 #define ARCH_HAS_PREFETCH
517 extern always_inline void prefetch(const void *x)
518 {
519 asm volatile ( "prefetchnta (%0)" : : "r"(x) );
520 }
522 #elif CONFIG_X86_USE_3DNOW
524 #define ARCH_HAS_PREFETCH
525 #define ARCH_HAS_PREFETCHW
526 #define ARCH_HAS_SPINLOCK_PREFETCH
528 extern always_inline void prefetch(const void *x)
529 {
530 asm volatile ( "prefetch (%0)" : : "r"(x) );
531 }
533 extern always_inline void prefetchw(const void *x)
534 {
535 asm volatile ( "prefetchw (%0)" : : "r"(x) );
536 }
537 #define spin_lock_prefetch(x) prefetchw(x)
539 #endif
541 void show_stack(struct cpu_user_regs *regs);
542 void show_stack_overflow(unsigned int cpu, unsigned long esp);
543 void show_registers(struct cpu_user_regs *regs);
544 void show_execution_state(struct cpu_user_regs *regs);
545 #define dump_execution_state() run_in_exception_handler(show_execution_state)
546 void show_page_walk(unsigned long addr);
547 asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
549 #ifdef CONFIG_COMPAT
550 void compat_show_guest_stack(struct vcpu *, struct cpu_user_regs *, int lines);
551 #else
552 #define compat_show_guest_stack(vcpu, regs, lines) ((void)0)
553 #endif
555 extern void mtrr_ap_init(void);
556 extern void mtrr_bp_init(void);
558 void mcheck_init(struct cpuinfo_x86 *c);
560 #define DECLARE_TRAP_HANDLER(_name) \
561 asmlinkage void _name(void); \
562 asmlinkage void do_ ## _name(struct cpu_user_regs *regs)
563 DECLARE_TRAP_HANDLER(divide_error);
564 DECLARE_TRAP_HANDLER(debug);
565 DECLARE_TRAP_HANDLER(nmi);
566 DECLARE_TRAP_HANDLER(int3);
567 DECLARE_TRAP_HANDLER(overflow);
568 DECLARE_TRAP_HANDLER(bounds);
569 DECLARE_TRAP_HANDLER(invalid_op);
570 DECLARE_TRAP_HANDLER(device_not_available);
571 DECLARE_TRAP_HANDLER(coprocessor_segment_overrun);
572 DECLARE_TRAP_HANDLER(invalid_TSS);
573 DECLARE_TRAP_HANDLER(segment_not_present);
574 DECLARE_TRAP_HANDLER(stack_segment);
575 DECLARE_TRAP_HANDLER(general_protection);
576 DECLARE_TRAP_HANDLER(page_fault);
577 DECLARE_TRAP_HANDLER(coprocessor_error);
578 DECLARE_TRAP_HANDLER(simd_coprocessor_error);
579 DECLARE_TRAP_HANDLER(machine_check);
580 DECLARE_TRAP_HANDLER(alignment_check);
581 DECLARE_TRAP_HANDLER(spurious_interrupt_bug);
582 #undef DECLARE_TRAP_HANDLER
584 int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
585 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
586 int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *val);
587 int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val);
589 int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len);
590 int microcode_resume_cpu(int cpu);
592 #endif /* !__ASSEMBLY__ */
594 #endif /* __ASM_X86_PROCESSOR_H */
596 /*
597 * Local variables:
598 * mode: C
599 * c-set-style: "BSD"
600 * c-basic-offset: 4
601 * tab-width: 4
602 * indent-tabs-mode: nil
603 * End:
604 */