/root/src/xen/xen/arch/x86/oprofile/backtrace.c
Line | Count | Source (jump to first uncovered line) |
1 | | /** |
2 | | * @file backtrace.c |
3 | | * |
4 | | * @remark Copyright 2002 OProfile authors |
5 | | * @remark Read the file COPYING |
6 | | * |
7 | | * @author John Levon |
8 | | * @author David Smith |
9 | | * Modified for Xen by Amitabha Roy |
10 | | * |
11 | | */ |
12 | | |
13 | | #include <xen/types.h> |
14 | | #include <asm/page.h> |
15 | | #include <xen/xenoprof.h> |
16 | | #include <xen/guest_access.h> |
17 | | |
18 | | struct __packed frame_head { |
19 | | struct frame_head * ebp; |
20 | | unsigned long ret; |
21 | | }; |
22 | | typedef struct frame_head frame_head_t; |
23 | | DEFINE_XEN_GUEST_HANDLE(frame_head_t); |
24 | | |
25 | | struct __packed frame_head_32bit { |
26 | | uint32_t ebp; |
27 | | uint32_t ret; |
28 | | }; |
29 | | typedef struct frame_head_32bit frame_head32_t; |
30 | | DEFINE_COMPAT_HANDLE(frame_head32_t); |
31 | | |
32 | | static struct frame_head * |
33 | | dump_hypervisor_backtrace(struct vcpu *vcpu, const struct frame_head *head, |
34 | | int mode) |
35 | 0 | { |
36 | 0 | if (!xenoprof_add_trace(vcpu, head->ret, mode)) |
37 | 0 | return 0; |
38 | 0 | |
39 | 0 | /* frame pointers should strictly progress back up the stack |
40 | 0 | * (towards higher addresses) */ |
41 | 0 | if (head >= head->ebp) |
42 | 0 | return NULL; |
43 | 0 | |
44 | 0 | return head->ebp; |
45 | 0 | } |
46 | | |
47 | | static inline int is_32bit_vcpu(struct vcpu *vcpu) |
48 | 0 | { |
49 | 0 | if (is_hvm_vcpu(vcpu)) |
50 | 0 | return !hvm_long_mode_active(vcpu); |
51 | 0 | else |
52 | 0 | return is_pv_32bit_vcpu(vcpu); |
53 | 0 | } |
54 | | |
55 | | static struct frame_head * |
56 | | dump_guest_backtrace(struct vcpu *vcpu, const struct frame_head *head, |
57 | | int mode) |
58 | 0 | { |
59 | 0 | frame_head_t bufhead; |
60 | 0 |
|
61 | 0 | if ( is_32bit_vcpu(vcpu) ) |
62 | 0 | { |
63 | 0 | __compat_handle_const_frame_head32_t guest_head = |
64 | 0 | { .c = (unsigned long)head }; |
65 | 0 | frame_head32_t bufhead32; |
66 | 0 |
|
67 | 0 | /* Also check accessibility of one struct frame_head beyond */ |
68 | 0 | if (!compat_handle_okay(guest_head, 2)) |
69 | 0 | return 0; |
70 | 0 | if (__copy_from_compat(&bufhead32, guest_head, 1)) |
71 | 0 | return 0; |
72 | 0 | bufhead.ebp = (struct frame_head *)(unsigned long)bufhead32.ebp; |
73 | 0 | bufhead.ret = bufhead32.ret; |
74 | 0 | } |
75 | 0 | else |
76 | 0 | { |
77 | 0 | XEN_GUEST_HANDLE(const_frame_head_t) guest_head; |
78 | 0 | XEN_GUEST_HANDLE_PARAM(const_frame_head_t) guest_head_param = |
79 | 0 | const_guest_handle_from_ptr(head, frame_head_t); |
80 | 0 | guest_head = guest_handle_from_param(guest_head_param, |
81 | 0 | const_frame_head_t); |
82 | 0 |
|
83 | 0 | /* Also check accessibility of one struct frame_head beyond */ |
84 | 0 | if (!guest_handle_okay(guest_head, 2)) |
85 | 0 | return 0; |
86 | 0 | if (__copy_from_guest(&bufhead, guest_head, 1)) |
87 | 0 | return 0; |
88 | 0 | } |
89 | 0 | |
90 | 0 | if (!xenoprof_add_trace(vcpu, bufhead.ret, mode)) |
91 | 0 | return 0; |
92 | 0 | |
93 | 0 | /* frame pointers should strictly progress back up the stack |
94 | 0 | * (towards higher addresses) */ |
95 | 0 | if (head >= bufhead.ebp) |
96 | 0 | return NULL; |
97 | 0 | |
98 | 0 | return bufhead.ebp; |
99 | 0 | } |
100 | | |
101 | | /* |
102 | | * | | /\ Higher addresses |
103 | | * | | |
104 | | * --------------- stack base (address of current_thread_info) |
105 | | * | thread info | |
106 | | * . . |
107 | | * | stack | |
108 | | * --------------- saved regs->ebp value if valid (frame_head address) |
109 | | * . . |
110 | | * --------------- saved regs->rsp value if x86_64 |
111 | | * | | |
112 | | * --------------- struct pt_regs * stored on stack if 32-bit |
113 | | * | | |
114 | | * . . |
115 | | * | | |
116 | | * --------------- %esp |
117 | | * | | |
118 | | * | | \/ Lower addresses |
119 | | * |
120 | | * Thus, regs (or regs->rsp for x86_64) <-> stack base restricts the |
121 | | * valid(ish) ebp values. Note: (1) for x86_64, NMI and several other |
122 | | * exceptions use special stacks, maintained by the interrupt stack table |
123 | | * (IST). These stacks are set up in trap_init() in |
124 | | * arch/x86_64/kernel/traps.c. Thus, for x86_64, regs now does not point |
125 | | * to the kernel stack; instead, it points to some location on the NMI |
126 | | * stack. On the other hand, regs->rsp is the stack pointer saved when the |
127 | | * NMI occurred. (2) For 32-bit, regs->esp is not valid because the |
128 | | * processor does not save %esp on the kernel stack when interrupts occur |
129 | | * in the kernel mode. |
130 | | */ |
131 | | #if defined(CONFIG_FRAME_POINTER) |
132 | | static int valid_hypervisor_stack(const struct frame_head *head, |
133 | | const struct cpu_user_regs *regs) |
134 | 0 | { |
135 | 0 | unsigned long headaddr = (unsigned long)head; |
136 | 0 | unsigned long stack = (unsigned long)regs->rsp; |
137 | 0 | unsigned long stack_base = (stack & ~(STACK_SIZE - 1)) + STACK_SIZE; |
138 | 0 |
|
139 | 0 | return headaddr > stack && headaddr < stack_base; |
140 | 0 | } |
141 | | #else |
142 | | /* without fp, it's just junk */ |
143 | | static int valid_hypervisor_stack(const struct frame_head *head, |
144 | | const struct cpu_user_regs *regs) |
145 | | { |
146 | | return 0; |
147 | | } |
148 | | #endif |
149 | | |
150 | | void xenoprof_backtrace(struct vcpu *vcpu, const struct cpu_user_regs *regs, |
151 | | unsigned long depth, int mode) |
152 | 0 | { |
153 | 0 | const struct frame_head *head = (void *)regs->rbp; |
154 | 0 |
|
155 | 0 | if (mode > 1) { |
156 | 0 | while (depth-- && valid_hypervisor_stack(head, regs)) |
157 | 0 | head = dump_hypervisor_backtrace(vcpu, head, mode); |
158 | 0 | return; |
159 | 0 | } |
160 | 0 |
|
161 | 0 | while (depth-- && head) |
162 | 0 | head = dump_guest_backtrace(vcpu, head, mode); |
163 | 0 | } |