Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/include/asm/asm-offsets.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * DO NOT MODIFY.
3
 *
4
 * This file was auto-generated from arch/x86/asm-offsets.s
5
 *
6
 */
7
8
#ifndef __ASM_OFFSETS_H__
9
#define __ASM_OFFSETS_H__
10
11
#define UREGS_r15 0 /* offsetof(struct cpu_user_regs, r15) */
12
#define UREGS_r14 8 /* offsetof(struct cpu_user_regs, r14) */
13
#define UREGS_r13 16 /* offsetof(struct cpu_user_regs, r13) */
14
#define UREGS_r12 24 /* offsetof(struct cpu_user_regs, r12) */
15
#define UREGS_rbp 32 /* offsetof(struct cpu_user_regs, rbp) */
16
#define UREGS_rbx 40 /* offsetof(struct cpu_user_regs, rbx) */
17
#define UREGS_r11 48 /* offsetof(struct cpu_user_regs, r11) */
18
#define UREGS_r10 56 /* offsetof(struct cpu_user_regs, r10) */
19
#define UREGS_r9 64 /* offsetof(struct cpu_user_regs, r9) */
20
#define UREGS_r8 72 /* offsetof(struct cpu_user_regs, r8) */
21
#define UREGS_rax 80 /* offsetof(struct cpu_user_regs, rax) */
22
#define UREGS_rcx 88 /* offsetof(struct cpu_user_regs, rcx) */
23
#define UREGS_rdx 96 /* offsetof(struct cpu_user_regs, rdx) */
24
#define UREGS_rsi 104 /* offsetof(struct cpu_user_regs, rsi) */
25
#define UREGS_rdi 112 /* offsetof(struct cpu_user_regs, rdi) */
26
#define UREGS_error_code 120 /* offsetof(struct cpu_user_regs, error_code) */
27
#define UREGS_entry_vector 124 /* offsetof(struct cpu_user_regs, entry_vector) */
28
#define UREGS_saved_upcall_mask 140 /* offsetof(struct cpu_user_regs, saved_upcall_mask) */
29
#define UREGS_rip 128 /* offsetof(struct cpu_user_regs, rip) */
30
#define UREGS_cs 136 /* offsetof(struct cpu_user_regs, cs) */
31
#define UREGS_eflags 144 /* offsetof(struct cpu_user_regs, rflags) */
32
#define UREGS_rsp 152 /* offsetof(struct cpu_user_regs, rsp) */
33
#define UREGS_ss 160 /* offsetof(struct cpu_user_regs, ss) */
34
#define UREGS_ds 176 /* offsetof(struct cpu_user_regs, ds) */
35
#define UREGS_es 168 /* offsetof(struct cpu_user_regs, es) */
36
#define UREGS_fs 184 /* offsetof(struct cpu_user_regs, fs) */
37
#define UREGS_gs 192 /* offsetof(struct cpu_user_regs, gs) */
38
#define UREGS_kernel_sizeof 168 /* offsetof(struct cpu_user_regs, es) */
39
#define UREGS_user_sizeof 200 /* sizeof(struct cpu_user_regs) */
40
41
#define irq_caps_offset 424 /* offsetof(struct domain, irq_caps) */
42
#define next_in_list_offset 112 /* offsetof(struct domain, next_in_list) */
43
#define VCPU_processor 4 /* offsetof(struct vcpu, processor) */
44
#define VCPU_domain 16 /* offsetof(struct vcpu, domain) */
45
#define VCPU_vcpu_info 8 /* offsetof(struct vcpu, vcpu_info) */
46
#define VCPU_trap_bounce 1448 /* offsetof(struct vcpu, arch.pv_vcpu.trap_bounce) */
47
#define VCPU_int80_bounce 1464 /* offsetof(struct vcpu, arch.pv_vcpu.int80_bounce) */
48
#define VCPU_thread_flags 920 /* offsetof(struct vcpu, arch.flags) */
49
#define VCPU_event_addr 1376 /* offsetof(struct vcpu, arch.pv_vcpu.event_callback_eip) */
50
#define VCPU_event_sel 1392 /* offsetof(struct vcpu, arch.pv_vcpu.event_callback_cs) */
51
#define VCPU_failsafe_addr 1384 /* offsetof(struct vcpu, arch.pv_vcpu.failsafe_callback_eip) */
52
#define VCPU_failsafe_sel 1396 /* offsetof(struct vcpu, arch.pv_vcpu.failsafe_callback_cs) */
53
#define VCPU_syscall_addr 1392 /* offsetof(struct vcpu, arch.pv_vcpu.syscall_callback_eip) */
54
#define VCPU_syscall32_addr 1400 /* offsetof(struct vcpu, arch.pv_vcpu.syscall32_callback_eip) */
55
#define VCPU_syscall32_sel 1416 /* offsetof(struct vcpu, arch.pv_vcpu.syscall32_callback_cs) */
56
#define VCPU_syscall32_disables_events 1420 /* offsetof(struct vcpu, arch.pv_vcpu.syscall32_disables_events) */
57
#define VCPU_sysenter_addr 1408 /* offsetof(struct vcpu, arch.pv_vcpu.sysenter_callback_eip) */
58
#define VCPU_sysenter_sel 1418 /* offsetof(struct vcpu, arch.pv_vcpu.sysenter_callback_cs) */
59
#define VCPU_sysenter_disables_events 1421 /* offsetof(struct vcpu, arch.pv_vcpu.sysenter_disables_events) */
60
#define VCPU_trap_ctxt 1160 /* offsetof(struct vcpu, arch.pv_vcpu.trap_ctxt) */
61
#define VCPU_kernel_sp 1304 /* offsetof(struct vcpu, arch.pv_vcpu.kernel_sp) */
62
#define VCPU_kernel_ss 1296 /* offsetof(struct vcpu, arch.pv_vcpu.kernel_ss) */
63
#define VCPU_iopl 1492 /* offsetof(struct vcpu, arch.pv_vcpu.iopl) */
64
#define VCPU_guest_context_flags 648 /* offsetof(struct vcpu, arch.vgc_flags) */
65
#define VCPU_nmi_pending 269 /* offsetof(struct vcpu, async_exception_state[(1)-1].pending) */
66
#define VCPU_mce_pending 271 /* offsetof(struct vcpu, async_exception_state[(2)-1].pending) */
67
#define VCPU_nmi_old_mask 270 /* offsetof(struct vcpu, async_exception_state[(1)-1].old_mask) */
68
#define VCPU_mce_old_mask 272 /* offsetof(struct vcpu, async_exception_state[(2)-1].old_mask) */
69
#define VCPU_async_exception_mask 273 /* offsetof(struct vcpu, async_exception_mask) */
70
#define VCPU_TRAP_NMI 1 /* VCPU_TRAP_NMI */
71
#define VCPU_TRAP_MCE 2 /* VCPU_TRAP_MCE */
72
0
#define _VGCF_failsafe_disables_events 3 /* _VGCF_failsafe_disables_events */
73
#define _VGCF_syscall_disables_events 4 /* _VGCF_syscall_disables_events */
74
75
#define VCPU_svm_vmcb_pa 1480 /* offsetof(struct vcpu, arch.hvm_vcpu.u.svm.vmcb_pa) */
76
#define VCPU_svm_vmcb 1472 /* offsetof(struct vcpu, arch.hvm_vcpu.u.svm.vmcb) */
77
#define VCPU_svm_vmcb_in_sync 1500 /* offsetof(struct vcpu, arch.hvm_vcpu.u.svm.vmcb_in_sync) */
78
79
#define VCPU_vmx_launched 1516 /* offsetof(struct vcpu, arch.hvm_vcpu.u.vmx.launched) */
80
#define VCPU_vmx_realmode 1737 /* offsetof(struct vcpu, arch.hvm_vcpu.u.vmx.vmx_realmode) */
81
#define VCPU_vmx_emulate 1738 /* offsetof(struct vcpu, arch.hvm_vcpu.u.vmx.vmx_emulate) */
82
#define VCPU_vm86_seg_mask 1740 /* offsetof(struct vcpu, arch.hvm_vcpu.u.vmx.vm86_segment_mask) */
83
#define VCPU_hvm_guest_cr2 1040 /* offsetof(struct vcpu, arch.hvm_vcpu.guest_cr[2]) */
84
85
#define VCPU_nhvm_guestmode 1960 /* offsetof(struct vcpu, arch.hvm_vcpu.nvcpu.nv_guestmode) */
86
#define VCPU_nhvm_p2m 2192 /* offsetof(struct vcpu, arch.hvm_vcpu.nvcpu.nv_p2m) */
87
#define VCPU_nsvm_hap_enabled 2148 /* offsetof(struct vcpu, arch.hvm_vcpu.nvcpu.u.nsvm.ns_hap_enabled) */
88
89
#define DOMAIN_is_32bit_pv 2112 /* offsetof(struct domain, arch.is_32bit_pv) */
90
91
#define VMCB_rax 1528 /* offsetof(struct vmcb_struct, rax) */
92
#define VMCB_rip 1400 /* offsetof(struct vmcb_struct, rip) */
93
#define VMCB_rsp 1496 /* offsetof(struct vmcb_struct, rsp) */
94
#define VMCB_rflags 1392 /* offsetof(struct vmcb_struct, rflags) */
95
96
#define VCPUINFO_upcall_pending 0 /* offsetof(struct vcpu_info, evtchn_upcall_pending) */
97
#define VCPUINFO_upcall_mask 1 /* offsetof(struct vcpu_info, evtchn_upcall_mask) */
98
99
#define COMPAT_VCPUINFO_upcall_pending 0 /* offsetof(struct compat_vcpu_info, evtchn_upcall_pending) */
100
#define COMPAT_VCPUINFO_upcall_mask 1 /* offsetof(struct compat_vcpu_info, evtchn_upcall_mask) */
101
102
#define CPUINFO_guest_cpu_user_regs 0 /* offsetof(struct cpu_info, guest_cpu_user_regs) */
103
#define CPUINFO_processor_id 200 /* offsetof(struct cpu_info, processor_id) */
104
#define CPUINFO_current_vcpu 208 /* offsetof(struct cpu_info, current_vcpu) */
105
#define CPUINFO_cr4 224 /* offsetof(struct cpu_info, cr4) */
106
#define CPUINFO_sizeof 232 /* sizeof(struct cpu_info) */
107
108
#define TRAPINFO_eip 8 /* offsetof(struct trap_info, address) */
109
#define TRAPINFO_cs 2 /* offsetof(struct trap_info, cs) */
110
#define TRAPINFO_flags 1 /* offsetof(struct trap_info, flags) */
111
#define TRAPINFO_sizeof 16 /* sizeof(struct trap_info) */
112
113
#define TRAPBOUNCE_error_code 0 /* offsetof(struct trap_bounce, error_code) */
114
#define TRAPBOUNCE_flags 4 /* offsetof(struct trap_bounce, flags) */
115
#define TRAPBOUNCE_cs 6 /* offsetof(struct trap_bounce, cs) */
116
#define TRAPBOUNCE_eip 8 /* offsetof(struct trap_bounce, eip) */
117
118
#define IRQSTAT_shift 7 /* LOG_2(sizeof(irq_cpustat_t)) */
119
#define IRQSTAT_softirq_pending 0 /* offsetof(irq_cpustat_t, __softirq_pending) */
120
121
#define CPUINFO_features 12 /* offsetof(struct cpuinfo_x86, x86_capability) */
122
123
#define MB_flags 0 /* offsetof(multiboot_info_t, flags) */
124
#define MB_cmdline 16 /* offsetof(multiboot_info_t, cmdline) */
125
#define MB_mem_lower 4 /* offsetof(multiboot_info_t, mem_lower) */
126
127
#define MB2_fixed_sizeof 8 /* sizeof(multiboot2_fixed_t) */
128
#define MB2_fixed_total_size 0 /* offsetof(multiboot2_fixed_t, total_size) */
129
#define MB2_tag_type 0 /* offsetof(multiboot2_tag_t, type) */
130
#define MB2_tag_size 4 /* offsetof(multiboot2_tag_t, size) */
131
#define MB2_load_base_addr 8 /* offsetof(multiboot2_tag_load_base_addr_t, load_base_addr) */
132
#define MB2_mem_lower 8 /* offsetof(multiboot2_tag_basic_meminfo_t, mem_lower) */
133
#define MB2_efi64_st 8 /* offsetof(multiboot2_tag_efi64_t, pointer) */
134
#define MB2_efi64_ih 8 /* offsetof(multiboot2_tag_efi64_ih_t, pointer) */
135
136
#define l2_identmap_sizeof 16384 /* sizeof(l2_identmap) */
137
138
#define DOMAIN_vm_assist 464 /* offsetof(struct domain, vm_assist) */
139
140
#endif