debuggers.hg

view xen/arch/x86/debug.c @ 20357:de04fe4e472c

gdbsx: a gdbserver stub for xen.

It should be run on dom0 on gdbsx enabled hypervisor. For details,
please see tools/debugger/gdbsx/README

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Oct 15 09:36:40 2009 +0100 (2009-10-15)
parents
children 809b20f066fb
line source
1 /*
2 * Copyright (C) 2009, Mukesh Rathor, Oracle Corp. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
19 #include <xen/config.h>
20 #include <xen/sched.h>
21 #include <xen/compile.h>
22 #include <xen/mm.h>
23 #include <xen/domain_page.h>
24 #include <xen/guest_access.h>
25 #include <asm/p2m.h>
27 /*
28 * This file for general routines common to more than one debugger, like kdb,
29 * gdbsx, etc..
30 */
32 #ifdef XEN_KDB_CONFIG
33 extern volatile int kdbdbg;
34 extern void kdbp(const char *fmt, ...);
35 #define DBGP(...) {(kdbdbg) ? kdbp(__VA_ARGS__):0;}
36 #define DBGP1(...) {(kdbdbg>1) ? kdbp(__VA_ARGS__):0;}
37 #define DBGP2(...) {(kdbdbg>2) ? kdbp(__VA_ARGS__):0;}
38 #else
39 #define DBGP1(...) {0;}
40 #define DBGP2(...) {0;}
41 #endif
43 typedef unsigned long dbgva_t;
44 typedef unsigned char dbgbyte_t;
47 /* Returns: mfn for the given (hvm guest) vaddr */
48 static unsigned long
49 dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr)
50 {
51 unsigned long mfn, gfn;
52 uint32_t pfec = PFEC_page_present;
53 p2m_type_t gfntype;
55 DBGP2("vaddr:%lx domid:%d\n", vaddr, dp->domain_id);
57 gfn = paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec);
58 if ( gfn == INVALID_GFN )
59 {
60 DBGP2("kdb:bad gfn from gva_to_gfn\n");
61 return INVALID_MFN;
62 }
64 mfn = mfn_x(gfn_to_mfn(dp, gfn, &gfntype));
65 if ( p2m_is_readonly(gfntype) && toaddr )
66 {
67 DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
68 return INVALID_MFN;
69 }
71 DBGP2("X: vaddr:%lx domid:%d mfn:%lx\n", vaddr, dp->domain_id, mfn);
72 return mfn;
73 }
75 #if defined(__x86_64__)
77 /*
78 * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional.
79 * This to assist debug of modules in the guest. The kernel address
80 * space seems is always mapped, but modules are not necessarily
81 * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0.
82 * Modules should always be addressible if we use cr3 from init_mm.
83 * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to
84 * do 2 level lookups.
85 *
86 * NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e
87 * mode.
88 * Returns: mfn for the given (pv guest) vaddr
89 */
90 static unsigned long
91 dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
92 {
93 l4_pgentry_t l4e, *l4t;
94 l3_pgentry_t l3e, *l3t;
95 l2_pgentry_t l2e, *l2t;
96 l1_pgentry_t l1e, *l1t;
97 unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
98 unsigned long mfn = cr3 >> PAGE_SHIFT;
100 DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
101 cr3, pgd3val);
103 if ( pgd3val == 0 )
104 {
105 l4t = mfn_to_virt(mfn);
106 l4e = l4t[l4_table_offset(vaddr)];
107 mfn = l4e_get_pfn(l4e);
108 DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%lx\n", l4t,
109 l4_table_offset(vaddr), l4e, mfn);
110 if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
111 {
112 DBGP1("l4 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
113 return INVALID_MFN;
114 }
116 l3t = mfn_to_virt(mfn);
117 l3e = l3t[l3_table_offset(vaddr)];
118 mfn = l3e_get_pfn(l3e);
119 DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%lx\n", l3t,
120 l3_table_offset(vaddr), l3e, mfn);
121 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
122 {
123 DBGP1("l3 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
124 return INVALID_MFN;
125 }
126 }
128 l2t = mfn_to_virt(mfn);
129 l2e = l2t[l2_table_offset(vaddr)];
130 mfn = l2e_get_pfn(l2e);
131 DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr),
132 l2e, mfn);
133 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
134 (l2e_get_flags(l2e) & _PAGE_PSE) )
135 {
136 DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
137 return INVALID_MFN;
138 }
139 l1t = mfn_to_virt(mfn);
140 l1e = l1t[l1_table_offset(vaddr)];
141 mfn = l1e_get_pfn(l1e);
142 DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vaddr),
143 l1e, mfn);
145 return mfn_valid(mfn) ? mfn : INVALID_MFN;
146 }
148 #else
150 /* Returns: mfn for the given (pv guest) vaddr */
151 static unsigned long
152 dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
153 {
154 l3_pgentry_t l3e, *l3t;
155 l2_pgentry_t l2e, *l2t;
156 l1_pgentry_t l1e, *l1t;
157 unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
158 unsigned long mfn = cr3 >> PAGE_SHIFT;
160 DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
161 cr3, pgd3val);
163 if ( pgd3val == 0 )
164 {
165 l3t = map_domain_page(mfn);
166 l3t += (cr3 & 0xFE0UL) >> 3;
167 l3e = l3t[l3_table_offset(vaddr)];
168 mfn = l3e_get_pfn(l3e);
169 unmap_domain_page(l3t);
170 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
171 return INVALID_MFN;
172 }
174 l2t = map_domain_page(mfn);
175 l2e = l2t[l2_table_offset(vaddr)];
176 mfn = l2e_get_pfn(l2e);
177 unmap_domain_page(l2t);
178 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
179 (l2e_get_flags(l2e) & _PAGE_PSE) )
180 return INVALID_MFN;
182 l1t = map_domain_page(mfn);
183 l1e = l1t[l1_table_offset(vaddr)];
184 mfn = l1e_get_pfn(l1e);
185 unmap_domain_page(l1t);
187 return mfn_valid(mfn) ? mfn : INVALID_MFN;
188 }
189 #endif /* defined(__x86_64__) */
191 /* Returns: number of bytes remaining to be copied */
192 static int
193 dbg_rw_guest_mem(dbgva_t addr, dbgbyte_t *buf, int len, struct domain *dp,
194 int toaddr, uint64_t pgd3)
195 {
196 while ( len > 0 )
197 {
198 char *va;
199 unsigned long mfn, pagecnt;
201 pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
203 mfn = (dp->is_hvm
204 ? dbg_hvm_va2mfn(addr, dp, toaddr)
205 : dbg_pv_va2mfn(addr, dp, pgd3));
206 if ( mfn == INVALID_MFN )
207 break;
209 va = map_domain_page(mfn);
210 va = va + (addr & (PAGE_SIZE-1));
212 if ( toaddr )
213 {
214 memcpy(va, buf, pagecnt); /* va = buf */
215 paging_mark_dirty(dp, mfn);
216 }
217 else
218 {
219 memcpy(buf, va, pagecnt); /* buf = va */
220 }
222 unmap_domain_page(va);
224 addr += pagecnt;
225 buf += pagecnt;
226 len -= pagecnt;
227 }
229 return len;
230 }
232 /*
233 * addr is hypervisor addr if domid == IDLE_DOMAIN_ID, else it's guest addr
234 * buf is debugger buffer.
235 * if toaddr, then addr = buf (write to addr), else buf = addr (rd from guest)
236 * pgd3: value of init_mm.pgd[3] in guest. see above.
237 * Returns: number of bytes remaining to be copied.
238 */
239 int
240 dbg_rw_mem(dbgva_t addr, dbgbyte_t *buf, int len, domid_t domid, int toaddr,
241 uint64_t pgd3)
242 {
243 struct domain *dp = get_domain_by_id(domid);
244 int hyp = (domid == IDLE_DOMAIN_ID);
246 DBGP2("gmem:addr:%lx buf:%p len:$%d domid:%x toaddr:%x dp:%p\n",
247 addr, buf, len, domid, toaddr, dp);
248 if ( hyp )
249 {
250 if ( toaddr )
251 len = __copy_to_user((void *)addr, buf, len);
252 else
253 len = __copy_from_user(buf, (void *)addr, len);
254 }
255 else
256 {
257 if ( dp && !dp->is_dying ) /* make sure guest is still there */
258 len= dbg_rw_guest_mem(addr, buf, len, dp, toaddr, pgd3);
259 }
261 DBGP2("gmem:exit:len:$%d\n", len);
262 return len;
263 }