debuggers.hg

view xen/include/asm-x86/processor.h @ 3705:4294cfa9fad3

bitkeeper revision 1.1159.212.95 (4204aa0ee0re5Xx1zWrJ9ejxzgRs3w)

Various cleanups. Remove PDB pending simpler GDB stub and/or NetBSD debugger.
Force emacs mode to appropriate tabbing in various files.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 05 11:12:14 2005 +0000 (2005-02-05)
parents d55d523078f7
children d93748c50893
line source
1 /*
2 * include/asm-x86/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
7 #ifndef __ASM_X86_PROCESSOR_H
8 #define __ASM_X86_PROCESSOR_H
10 #ifndef __ASSEMBLY__
11 #include <asm/page.h>
12 #include <asm/types.h>
13 #include <asm/cpufeature.h>
14 #include <asm/desc.h>
15 #include <asm/flushtlb.h>
16 #include <xen/config.h>
17 #include <xen/spinlock.h>
18 #include <xen/cache.h>
19 #include <asm/vmx_vmcs.h>
20 #include <public/xen.h>
21 #endif
23 /*
24 * CPU vendor IDs
25 */
26 #define X86_VENDOR_INTEL 0
27 #define X86_VENDOR_CYRIX 1
28 #define X86_VENDOR_AMD 2
29 #define X86_VENDOR_UMC 3
30 #define X86_VENDOR_NEXGEN 4
31 #define X86_VENDOR_CENTAUR 5
32 #define X86_VENDOR_RISE 6
33 #define X86_VENDOR_TRANSMETA 7
34 #define X86_VENDOR_NSC 8
35 #define X86_VENDOR_SIS 9
36 #define X86_VENDOR_NUM 10
37 #define X86_VENDOR_UNKNOWN 0xff
39 /*
40 * EFLAGS bits
41 */
42 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
43 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
44 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
45 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
46 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
47 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
48 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
49 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
50 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
51 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
52 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
53 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
54 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
55 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
56 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
57 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
58 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
60 /*
61 * Intel CPU flags in CR0
62 */
63 #define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
64 #define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */
65 #define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */
66 #define X86_CR0_TS 0x00000008 /* Task Switched (RW) */
67 #define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */
68 #define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */
69 #define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */
70 #define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */
71 #define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
72 #define X86_CR0_PG 0x80000000 /* Paging (RW) */
74 /*
75 * Intel CPU features in CR4
76 */
77 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
78 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
79 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
80 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
81 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
82 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
83 #define X86_CR4_MCE 0x0040 /* Machine check enable */
84 #define X86_CR4_PGE 0x0080 /* enable global pages */
85 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
86 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
87 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
88 #define X86_CR4_VMXE 0x2000 /* enable VMX */
90 /*
91 * Trap/fault mnemonics.
92 */
93 #define TRAP_divide_error 0
94 #define TRAP_debug 1
95 #define TRAP_nmi 2
96 #define TRAP_int3 3
97 #define TRAP_overflow 4
98 #define TRAP_bounds 5
99 #define TRAP_invalid_op 6
100 #define TRAP_no_device 7
101 #define TRAP_double_fault 8
102 #define TRAP_copro_seg 9
103 #define TRAP_invalid_tss 10
104 #define TRAP_no_segment 11
105 #define TRAP_stack_error 12
106 #define TRAP_gp_fault 13
107 #define TRAP_page_fault 14
108 #define TRAP_spurious_int 15
109 #define TRAP_copro_error 16
110 #define TRAP_alignment_check 17
111 #define TRAP_machine_check 18
112 #define TRAP_simd_error 19
113 #define TRAP_deferred_nmi 31
115 /*
116 * Non-fatal fault/trap handlers return an error code to the caller. If the
117 * code is non-zero, it means that either the exception was not due to a fault
118 * (i.e., it was a trap) or that the fault has been fixed up so the instruction
119 * replay ought to succeed.
120 */
121 #define EXCRET_not_a_fault 1 /* It was a trap. No instruction replay needed. */
122 #define EXCRET_fault_fixed 1 /* It was fault that we fixed: try a replay. */
124 /*
125 * 'trap_bounce' flags values.
126 */
127 #define TBF_EXCEPTION 1
128 #define TBF_EXCEPTION_ERRCODE 2
129 #define TBF_EXCEPTION_CR2 4
130 #define TBF_INTERRUPT 8
131 #define TBF_FAILSAFE 16
133 /*
134 * thread.flags values.
135 */
136 #define TF_failsafe_return 1
138 #ifndef __ASSEMBLY__
140 struct domain;
141 struct exec_domain;
143 /*
144 * Default implementation of macro that returns current
145 * instruction pointer ("program counter").
146 */
147 #ifdef __x86_64__
148 #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
149 #else
150 #define current_text_addr() \
151 ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
152 #endif
154 /*
155 * CPU type and hardware bug flags. Kept separately for each CPU.
156 * Members of this structure are referenced in head.S, so think twice
157 * before touching them. [mj]
158 */
160 struct cpuinfo_x86 {
161 __u8 x86; /* CPU family */
162 __u8 x86_vendor; /* CPU vendor */
163 __u8 x86_model;
164 __u8 x86_mask;
165 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
166 __u32 x86_capability[NCAPINTS];
167 char x86_vendor_id[16];
168 int x86_cache_size; /* in KB - for CPUS that support this call */
169 int x86_clflush_size;
170 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined */
171 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
173 /*
174 * capabilities of CPUs
175 */
177 extern struct cpuinfo_x86 boot_cpu_data;
178 extern struct tss_struct init_tss[NR_CPUS];
180 #ifdef CONFIG_SMP
181 extern struct cpuinfo_x86 cpu_data[];
182 #define current_cpu_data cpu_data[smp_processor_id()]
183 #else
184 #define cpu_data (&boot_cpu_data)
185 #define current_cpu_data boot_cpu_data
186 #endif
188 extern char ignore_irq13;
190 extern void identify_cpu(struct cpuinfo_x86 *);
191 extern void print_cpu_info(struct cpuinfo_x86 *);
192 extern void dodgy_tsc(void);
194 /*
195 * Generic CPUID function
196 */
197 static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
198 {
199 __asm__("cpuid"
200 : "=a" (*eax),
201 "=b" (*ebx),
202 "=c" (*ecx),
203 "=d" (*edx)
204 : "0" (op));
205 }
207 /*
208 * CPUID functions returning a single datum
209 */
210 static inline unsigned int cpuid_eax(unsigned int op)
211 {
212 unsigned int eax;
214 __asm__("cpuid"
215 : "=a" (eax)
216 : "0" (op)
217 : "bx", "cx", "dx");
218 return eax;
219 }
220 static inline unsigned int cpuid_ebx(unsigned int op)
221 {
222 unsigned int eax, ebx;
224 __asm__("cpuid"
225 : "=a" (eax), "=b" (ebx)
226 : "0" (op)
227 : "cx", "dx" );
228 return ebx;
229 }
230 static inline unsigned int cpuid_ecx(unsigned int op)
231 {
232 unsigned int eax, ecx;
234 __asm__("cpuid"
235 : "=a" (eax), "=c" (ecx)
236 : "0" (op)
237 : "bx", "dx" );
238 return ecx;
239 }
240 static inline unsigned int cpuid_edx(unsigned int op)
241 {
242 unsigned int eax, edx;
244 __asm__("cpuid"
245 : "=a" (eax), "=d" (edx)
246 : "0" (op)
247 : "bx", "cx");
248 return edx;
249 }
252 #define read_cr0() ({ \
253 unsigned long __dummy; \
254 __asm__( \
255 "mov"__OS" %%cr0,%0\n\t" \
256 :"=r" (__dummy)); \
257 __dummy; \
258 })
260 #define write_cr0(x) \
261 __asm__("mov"__OS" %0,%%cr0": :"r" ((unsigned long)x));
263 #define read_cr4() ({ \
264 unsigned long __dummy; \
265 __asm__( \
266 "mov"__OS" %%cr4,%0\n\t" \
267 :"=r" (__dummy)); \
268 __dummy; \
269 })
271 #define write_cr4(x) \
272 __asm__("mov"__OS" %0,%%cr4": :"r" ((unsigned long)x));
274 /*
275 * Save the cr4 feature set we're using (ie
276 * Pentium 4MB enable and PPro Global page
277 * enable), so that any CPU's that boot up
278 * after us can get the correct flags.
279 */
280 extern unsigned long mmu_cr4_features;
282 static inline void set_in_cr4 (unsigned long mask)
283 {
284 mmu_cr4_features |= mask;
285 __asm__("mov"__OS" %%cr4,%%"__OP"ax\n\t"
286 "or"__OS" %0,%%"__OP"ax\n\t"
287 "mov"__OS" %%"__OP"ax,%%cr4\n"
288 : : "irg" (mask)
289 :"ax");
290 }
292 static inline void clear_in_cr4 (unsigned long mask)
293 {
294 mmu_cr4_features &= ~mask;
295 __asm__("mov"__OS" %%cr4,%%"__OP"ax\n\t"
296 "and"__OS" %0,%%"__OP"ax\n\t"
297 "mov"__OS" %%"__OP"ax,%%cr4\n"
298 : : "irg" (~mask)
299 :"ax");
300 }
302 /*
303 * NSC/Cyrix CPU configuration register indexes
304 */
306 #define CX86_PCR0 0x20
307 #define CX86_GCR 0xb8
308 #define CX86_CCR0 0xc0
309 #define CX86_CCR1 0xc1
310 #define CX86_CCR2 0xc2
311 #define CX86_CCR3 0xc3
312 #define CX86_CCR4 0xe8
313 #define CX86_CCR5 0xe9
314 #define CX86_CCR6 0xea
315 #define CX86_CCR7 0xeb
316 #define CX86_PCR1 0xf0
317 #define CX86_DIR0 0xfe
318 #define CX86_DIR1 0xff
319 #define CX86_ARR_BASE 0xc4
320 #define CX86_RCR_BASE 0xdc
322 /*
323 * NSC/Cyrix CPU indexed register access macros
324 */
326 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
328 #define setCx86(reg, data) do { \
329 outb((reg), 0x22); \
330 outb((data), 0x23); \
331 } while (0)
333 #define IOBMP_BYTES 8192
334 #define IOBMP_BYTES_PER_SELBIT (IOBMP_BYTES / 64)
335 #define IOBMP_BITS_PER_SELBIT (IOBMP_BYTES_PER_SELBIT * 8)
336 #define IOBMP_OFFSET offsetof(struct tss_struct, io_bitmap)
337 #define IOBMP_INVALID_OFFSET 0x8000
339 struct i387_state {
340 u8 state[512]; /* big enough for FXSAVE */
341 } __attribute__ ((aligned (16)));
343 struct tss_struct {
344 unsigned short back_link,__blh;
345 #ifdef __x86_64__
346 u64 rsp0;
347 u64 rsp1;
348 u64 rsp2;
349 u64 reserved1;
350 u64 ist[7];
351 u64 reserved2;
352 u16 reserved3;
353 #else
354 u32 esp0;
355 u16 ss0,__ss0h;
356 u32 esp1;
357 u16 ss1,__ss1h;
358 u32 esp2;
359 u16 ss2,__ss2h;
360 u32 __cr3;
361 u32 eip;
362 u32 eflags;
363 u32 eax,ecx,edx,ebx;
364 u32 esp;
365 u32 ebp;
366 u32 esi;
367 u32 edi;
368 u16 es, __esh;
369 u16 cs, __csh;
370 u16 ss, __ssh;
371 u16 ds, __dsh;
372 u16 fs, __fsh;
373 u16 gs, __gsh;
374 u16 ldt, __ldth;
375 u16 trace;
376 #endif
377 u16 bitmap;
378 u8 io_bitmap[IOBMP_BYTES+1];
379 /* Pads the TSS to be cacheline-aligned (total size is 0x2080). */
380 u8 __cacheline_filler[23];
381 } __cacheline_aligned PACKED;
383 struct trap_bounce {
384 unsigned long error_code;
385 unsigned long cr2;
386 unsigned short flags; /* TBF_ */
387 unsigned short cs;
388 unsigned long eip;
389 };
391 struct thread_struct {
392 unsigned long guestos_sp;
393 unsigned long guestos_ss;
395 unsigned long flags; /* TF_ */
397 /* Hardware debugging registers */
398 unsigned long debugreg[8]; /* %%db0-7 debug registers */
400 /* floating point info */
401 struct i387_state i387;
403 /* general user-visible register state */
404 execution_context_t user_ctxt;
406 void (*schedule_tail) (struct exec_domain *);
408 /*
409 * Return vectors pushed to us by guest OS.
410 * The stack frame for events is exactly that of an x86 hardware interrupt.
411 * The stack frame for a failsafe callback is augmented with saved values
412 * for segment registers %ds, %es, %fs and %gs:
413 * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
414 */
415 unsigned long event_selector; /* entry CS */
416 unsigned long event_address; /* entry EIP */
418 unsigned long failsafe_selector; /* entry CS */
419 unsigned long failsafe_address; /* entry EIP */
421 /* Bounce information for propagating an exception to guest OS. */
422 struct trap_bounce trap_bounce;
424 /* I/O-port access bitmap. */
425 u64 io_bitmap_sel; /* Selector to tell us which part of the IO bitmap are
426 * "interesting" (i.e. have clear bits) */
427 u8 *io_bitmap; /* Pointer to task's IO bitmap or NULL */
429 /* Trap info. */
430 #ifdef ARCH_HAS_FAST_TRAP
431 int fast_trap_idx;
432 struct desc_struct fast_trap_desc;
433 #endif
434 trap_info_t traps[256];
435 #ifdef CONFIG_VMX
436 struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
437 #endif
438 } __cacheline_aligned;
440 #define IDT_ENTRIES 256
441 extern idt_entry_t idt_table[];
442 extern idt_entry_t *idt_tables[];
444 #ifdef ARCH_HAS_FAST_TRAP
446 #define SET_DEFAULT_FAST_TRAP(_p) \
447 (_p)->fast_trap_idx = 0x20; \
448 (_p)->fast_trap_desc.a = 0; \
449 (_p)->fast_trap_desc.b = 0;
451 #define CLEAR_FAST_TRAP(_p) \
452 (memset(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
453 0, 8))
455 #define SET_FAST_TRAP(_p) \
456 (memcpy(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
457 &((_p)->fast_trap_desc), 8))
459 long set_fast_trap(struct exec_domain *p, int idx);
461 #else
463 #define SET_DEFAULT_FAST_TRAP(_p) ((void)0)
464 #define CLEAR_FAST_TRAP(_p) ((void)0)
465 #define SET_FAST_TRAP(_p) ((void)0)
466 #define set_fast_trap(_p, _i) (0)
468 #endif
470 #define INIT_THREAD { 0 }
472 extern int gpf_emulate_4gb(struct xen_regs *regs);
474 struct mm_struct {
475 /*
476 * Every domain has a L1 pagetable of its own. Per-domain mappings
477 * are put in this table (eg. the current GDT is mapped here).
478 */
479 l1_pgentry_t *perdomain_ptes;
480 pagetable_t pagetable;
482 pagetable_t monitor_table;
483 l2_pgentry_t *vpagetable; /* virtual address of pagetable */
484 l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */
485 l2_pgentry_t *guest_pl2e_cache; /* guest page directory cache */
486 unsigned long min_pfn; /* min host physical */
487 unsigned long max_pfn; /* max host physical */
489 /* Virtual CR2 value. Can be read/written by guest. */
490 unsigned long guest_cr2;
492 /* shadow mode status and controls */
493 unsigned int shadow_mode; /* flags to control shadow table operation */
494 pagetable_t shadow_table;
495 spinlock_t shadow_lock;
496 unsigned int shadow_max_page_count; // currently unused
498 /* shadow hashtable */
499 struct shadow_status *shadow_ht;
500 struct shadow_status *shadow_ht_free;
501 struct shadow_status *shadow_ht_extras; /* extra allocation units */
502 unsigned int shadow_extras_count;
504 /* shadow dirty bitmap */
505 unsigned long *shadow_dirty_bitmap;
506 unsigned int shadow_dirty_bitmap_size; /* in pages, bit per page */
508 /* shadow mode stats */
509 unsigned int shadow_page_count;
510 unsigned int shadow_fault_count;
511 unsigned int shadow_dirty_count;
512 unsigned int shadow_dirty_net_count;
513 unsigned int shadow_dirty_block_count;
515 /* Current LDT details. */
516 unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
517 /* Next entry is passed to LGDT on domain switch. */
518 char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
519 };
521 #define SHM_full_32 (8) /* full virtualization for 32-bit */
523 static inline void write_ptbase(struct mm_struct *mm)
524 {
525 unsigned long pa;
527 #ifdef CONFIG_VMX
528 if ( unlikely(mm->shadow_mode) ) {
529 if (mm->shadow_mode == SHM_full_32)
530 pa = pagetable_val(mm->monitor_table);
531 else
532 pa = pagetable_val(mm->shadow_table);
533 }
534 #else
535 if ( unlikely(mm->shadow_mode) )
536 pa = pagetable_val(mm->shadow_table);
537 #endif
538 else
539 pa = pagetable_val(mm->pagetable);
541 write_cr3(pa);
542 }
544 #define IDLE0_MM \
545 { \
546 perdomain_ptes: 0, \
547 pagetable: mk_pagetable(__pa(idle_pg_table)) \
548 }
550 /* Convenient accessor for mm.gdt. */
551 #define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (((_e)<<3)-1))
552 #define SET_GDT_ADDRESS(_p, _a) ((*(unsigned long *)((_p)->mm.gdt + 2)) = (_a))
553 #define GET_GDT_ENTRIES(_p) (((*(u16 *)((_p)->mm.gdt + 0))+1)>>3)
554 #define GET_GDT_ADDRESS(_p) (*(unsigned long *)((_p)->mm.gdt + 2))
556 void destroy_gdt(struct exec_domain *d);
557 long set_gdt(struct exec_domain *d,
558 unsigned long *frames,
559 unsigned int entries);
561 long set_debugreg(struct exec_domain *p, int reg, unsigned long value);
563 struct microcode_header {
564 unsigned int hdrver;
565 unsigned int rev;
566 unsigned int date;
567 unsigned int sig;
568 unsigned int cksum;
569 unsigned int ldrver;
570 unsigned int pf;
571 unsigned int datasize;
572 unsigned int totalsize;
573 unsigned int reserved[3];
574 };
576 struct microcode {
577 struct microcode_header hdr;
578 unsigned int bits[0];
579 };
581 typedef struct microcode microcode_t;
582 typedef struct microcode_header microcode_header_t;
584 /* microcode format is extended from prescott processors */
585 struct extended_signature {
586 unsigned int sig;
587 unsigned int pf;
588 unsigned int cksum;
589 };
591 struct extended_sigtable {
592 unsigned int count;
593 unsigned int cksum;
594 unsigned int reserved[3];
595 struct extended_signature sigs[0];
596 };
598 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
599 static inline void rep_nop(void)
600 {
601 __asm__ __volatile__("rep;nop");
602 }
604 #define cpu_relax() rep_nop()
606 /* Prefetch instructions for Pentium III and AMD Athlon */
607 #ifdef CONFIG_MPENTIUMIII
609 #define ARCH_HAS_PREFETCH
610 extern inline void prefetch(const void *x)
611 {
612 __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
613 }
615 #elif CONFIG_X86_USE_3DNOW
617 #define ARCH_HAS_PREFETCH
618 #define ARCH_HAS_PREFETCHW
619 #define ARCH_HAS_SPINLOCK_PREFETCH
621 extern inline void prefetch(const void *x)
622 {
623 __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
624 }
626 extern inline void prefetchw(const void *x)
627 {
628 __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
629 }
630 #define spin_lock_prefetch(x) prefetchw(x)
632 #endif
634 void show_guest_stack();
635 void show_trace(unsigned long *esp);
636 void show_stack(unsigned long *esp);
637 void show_registers(struct xen_regs *regs);
638 void show_page_walk(unsigned long addr);
639 asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs);
641 #endif /* !__ASSEMBLY__ */
643 #endif /* __ASM_X86_PROCESSOR_H */