debuggers.hg

view xen/arch/x86/x86_32/asm-offsets.c @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents b9017fdaad4d
children
line source
1 /*
2 * Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 */
6 #define COMPILE_OFFSETS
8 #include <xen/config.h>
9 #include <xen/perfc.h>
10 #include <xen/sched.h>
11 #include <asm/fixmap.h>
12 #include <asm/hardirq.h>
13 #include <xen/multiboot.h>
15 #define DEFINE(_sym, _val) \
16 __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
17 #define BLANK() \
18 __asm__ __volatile__ ( "\n->" : : )
19 #define OFFSET(_sym, _str, _mem) \
20 DEFINE(_sym, offsetof(_str, _mem));
22 /* base-2 logarithm */
23 #define __L2(_x) (((_x) & 0x00000002) ? 1 : 0)
24 #define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
25 #define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
26 #define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
27 #define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
29 void __dummy__(void)
30 {
31 OFFSET(UREGS_eax, struct cpu_user_regs, eax);
32 OFFSET(UREGS_ebx, struct cpu_user_regs, ebx);
33 OFFSET(UREGS_ecx, struct cpu_user_regs, ecx);
34 OFFSET(UREGS_edx, struct cpu_user_regs, edx);
35 OFFSET(UREGS_esi, struct cpu_user_regs, esi);
36 OFFSET(UREGS_edi, struct cpu_user_regs, edi);
37 OFFSET(UREGS_esp, struct cpu_user_regs, esp);
38 OFFSET(UREGS_ebp, struct cpu_user_regs, ebp);
39 OFFSET(UREGS_eip, struct cpu_user_regs, eip);
40 OFFSET(UREGS_cs, struct cpu_user_regs, cs);
41 OFFSET(UREGS_ds, struct cpu_user_regs, ds);
42 OFFSET(UREGS_es, struct cpu_user_regs, es);
43 OFFSET(UREGS_fs, struct cpu_user_regs, fs);
44 OFFSET(UREGS_gs, struct cpu_user_regs, gs);
45 OFFSET(UREGS_ss, struct cpu_user_regs, ss);
46 OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
47 OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
48 OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
49 OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
50 OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
51 DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
52 BLANK();
54 OFFSET(VCPU_processor, struct vcpu, processor);
55 OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
56 OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
57 OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
58 OFFSET(VCPU_event_sel, struct vcpu,
59 arch.guest_context.event_callback_cs);
60 OFFSET(VCPU_event_addr, struct vcpu,
61 arch.guest_context.event_callback_eip);
62 OFFSET(VCPU_failsafe_sel, struct vcpu,
63 arch.guest_context.failsafe_callback_cs);
64 OFFSET(VCPU_failsafe_addr, struct vcpu,
65 arch.guest_context.failsafe_callback_eip);
66 OFFSET(VCPU_kernel_ss, struct vcpu,
67 arch.guest_context.kernel_ss);
68 OFFSET(VCPU_kernel_sp, struct vcpu,
69 arch.guest_context.kernel_sp);
70 OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
71 OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
72 OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
73 OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
74 OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
75 OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
76 DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
77 DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
78 DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
79 BLANK();
81 OFFSET(TSS_ss0, struct tss_struct, ss0);
82 OFFSET(TSS_esp0, struct tss_struct, esp0);
83 OFFSET(TSS_ss1, struct tss_struct, ss1);
84 OFFSET(TSS_esp1, struct tss_struct, esp1);
85 DEFINE(TSS_sizeof, sizeof(struct tss_struct));
86 BLANK();
88 OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
89 OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
90 OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
91 BLANK();
93 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
94 OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
95 OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
96 OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
97 OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
98 BLANK();
100 OFFSET(VMCB_rax, struct vmcb_struct, rax);
101 OFFSET(VMCB_rip, struct vmcb_struct, rip);
102 OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
103 OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
104 BLANK();
106 OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
107 OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
108 BLANK();
110 OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
111 OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
112 OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
113 DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
114 BLANK();
116 OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
117 OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
118 OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
119 OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
120 BLANK();
122 #if PERF_COUNTERS
123 DEFINE(ASM_PERFC_hypercalls, PERFC_hypercalls);
124 DEFINE(ASM_PERFC_exceptions, PERFC_exceptions);
125 BLANK();
126 #endif
128 DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE));
129 BLANK();
131 DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
132 BLANK();
134 OFFSET(CPUINFO86_ext_features, struct cpuinfo_x86, x86_capability[1]);
135 BLANK();
137 OFFSET(MB_flags, multiboot_info_t, flags);
138 OFFSET(MB_cmdline, multiboot_info_t, cmdline);
139 }