debuggers.hg

view xen/arch/x86/debug.c @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents 5ac189556629
children
line source
1 /*
2 * Copyright (C) 2009, Mukesh Rathor, Oracle Corp. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
19 #include <xen/config.h>
20 #include <xen/sched.h>
21 #include <xen/compile.h>
22 #include <xen/mm.h>
23 #include <xen/domain_page.h>
24 #include <xen/guest_access.h>
25 #include <asm/p2m.h>
27 /*
28 * This file for general routines common to more than one debugger, like kdb,
29 * gdbsx, etc..
30 */
32 #ifdef XEN_KDB_CONFIG
33 extern volatile int kdbdbg;
34 #define DBGP(...) {(kdbdbg) ? kdbp(__VA_ARGS__):0;}
35 #define DBGP1(...) {(kdbdbg>1) ? kdbp(__VA_ARGS__):0;}
36 #define DBGP2(...) {(kdbdbg>2) ? kdbp(__VA_ARGS__):0;}
37 #else
38 #define DBGP1(...) {0;}
39 #define DBGP2(...) {0;}
40 #endif
42 typedef unsigned long dbgva_t;
43 typedef unsigned char dbgbyte_t;
46 /* Returns: mfn for the given (hvm guest) vaddr */
47 static unsigned long
48 dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr)
49 {
50 unsigned long mfn, gfn;
51 uint32_t pfec = PFEC_page_present;
52 p2m_type_t gfntype;
54 DBGP2("vaddr:%lx domid:%d\n", vaddr, dp->domain_id);
56 gfn = paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec);
57 if ( gfn == INVALID_GFN )
58 {
59 DBGP2("kdb:bad gfn from gva_to_gfn\n");
60 return INVALID_MFN;
61 }
63 mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(dp), gfn, &gfntype));
64 if ( p2m_is_readonly(gfntype) && toaddr )
65 {
66 DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
67 return INVALID_MFN;
68 }
70 DBGP2("X: vaddr:%lx domid:%d mfn:%lx\n", vaddr, dp->domain_id, mfn);
71 return mfn;
72 }
74 #if defined(__x86_64__)
76 /*
77 * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional.
78 * This to assist debug of modules in the guest. The kernel address
79 * space seems is always mapped, but modules are not necessarily
80 * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0.
81 * Modules should always be addressible if we use cr3 from init_mm.
82 * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to
83 * do 2 level lookups.
84 *
85 * NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e
86 * mode.
87 * Returns: mfn for the given (pv guest) vaddr
88 */
89 static unsigned long
90 dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
91 {
92 l4_pgentry_t l4e, *l4t;
93 l3_pgentry_t l3e, *l3t;
94 l2_pgentry_t l2e, *l2t;
95 l1_pgentry_t l1e, *l1t;
96 unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
97 unsigned long mfn = cr3 >> PAGE_SHIFT;
99 DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
100 cr3, pgd3val);
102 if ( pgd3val == 0 )
103 {
104 l4t = mfn_to_virt(mfn);
105 l4e = l4t[l4_table_offset(vaddr)];
106 mfn = l4e_get_pfn(l4e);
107 DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%lx\n", l4t,
108 l4_table_offset(vaddr), l4e, mfn);
109 if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
110 {
111 DBGP1("l4 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
112 return INVALID_MFN;
113 }
115 l3t = mfn_to_virt(mfn);
116 l3e = l3t[l3_table_offset(vaddr)];
117 mfn = l3e_get_pfn(l3e);
118 DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%lx\n", l3t,
119 l3_table_offset(vaddr), l3e, mfn);
120 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
121 {
122 DBGP1("l3 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
123 return INVALID_MFN;
124 }
125 }
127 l2t = mfn_to_virt(mfn);
128 l2e = l2t[l2_table_offset(vaddr)];
129 mfn = l2e_get_pfn(l2e);
130 DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr),
131 l2e, mfn);
132 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
133 (l2e_get_flags(l2e) & _PAGE_PSE) )
134 {
135 DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
136 return INVALID_MFN;
137 }
138 l1t = mfn_to_virt(mfn);
139 l1e = l1t[l1_table_offset(vaddr)];
140 mfn = l1e_get_pfn(l1e);
141 DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vaddr),
142 l1e, mfn);
144 return mfn_valid(mfn) ? mfn : INVALID_MFN;
145 }
147 #else
149 /* Returns: mfn for the given (pv guest) vaddr */
150 static unsigned long
151 dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
152 {
153 l3_pgentry_t l3e, *l3t;
154 l2_pgentry_t l2e, *l2t;
155 l1_pgentry_t l1e, *l1t;
156 unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
157 unsigned long mfn = cr3 >> PAGE_SHIFT;
159 DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
160 cr3, pgd3val);
162 if ( pgd3val == 0 )
163 {
164 l3t = map_domain_page(mfn);
165 l3t += (cr3 & 0xFE0UL) >> 3;
166 l3e = l3t[l3_table_offset(vaddr)];
167 mfn = l3e_get_pfn(l3e);
168 unmap_domain_page(l3t);
169 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
170 return INVALID_MFN;
171 }
173 l2t = map_domain_page(mfn);
174 l2e = l2t[l2_table_offset(vaddr)];
175 mfn = l2e_get_pfn(l2e);
176 unmap_domain_page(l2t);
177 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
178 (l2e_get_flags(l2e) & _PAGE_PSE) )
179 return INVALID_MFN;
181 l1t = map_domain_page(mfn);
182 l1e = l1t[l1_table_offset(vaddr)];
183 mfn = l1e_get_pfn(l1e);
184 unmap_domain_page(l1t);
186 return mfn_valid(mfn) ? mfn : INVALID_MFN;
187 }
188 #endif /* defined(__x86_64__) */
190 /* Returns: number of bytes remaining to be copied */
191 static int
192 dbg_rw_guest_mem(dbgva_t addr, dbgbyte_t *buf, int len, struct domain *dp,
193 int toaddr, uint64_t pgd3)
194 {
195 while ( len > 0 )
196 {
197 char *va;
198 unsigned long mfn, pagecnt;
200 pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
202 mfn = (dp->is_hvm
203 ? dbg_hvm_va2mfn(addr, dp, toaddr)
204 : dbg_pv_va2mfn(addr, dp, pgd3));
205 if ( mfn == INVALID_MFN )
206 break;
208 va = map_domain_page(mfn);
209 va = va + (addr & (PAGE_SIZE-1));
211 if ( toaddr )
212 {
213 memcpy(va, buf, pagecnt); /* va = buf */
214 paging_mark_dirty(dp, mfn);
215 }
216 else
217 {
218 memcpy(buf, va, pagecnt); /* buf = va */
219 }
221 unmap_domain_page(va);
223 addr += pagecnt;
224 buf += pagecnt;
225 len -= pagecnt;
226 }
228 return len;
229 }
231 /*
232 * addr is hypervisor addr if domid == DOMID_IDLE, else it's guest addr
233 * buf is debugger buffer.
234 * if toaddr, then addr = buf (write to addr), else buf = addr (rd from guest)
235 * pgd3: value of init_mm.pgd[3] in guest. see above.
236 * Returns: number of bytes remaining to be copied.
237 */
238 int
239 dbg_rw_mem(dbgva_t addr, dbgbyte_t *buf, int len, domid_t domid, int toaddr,
240 uint64_t pgd3)
241 {
242 struct domain *dp = get_domain_by_id(domid);
243 int hyp = (domid == DOMID_IDLE);
245 DBGP2("gmem:addr:%lx buf:%p len:$%d domid:%x toaddr:%x dp:%p\n",
246 addr, buf, len, domid, toaddr, dp);
247 if ( hyp )
248 {
249 if ( toaddr )
250 len = __copy_to_user((void *)addr, buf, len);
251 else
252 len = __copy_from_user(buf, (void *)addr, len);
253 }
254 else if ( dp )
255 {
256 if ( !dp->is_dying ) /* make sure guest is still there */
257 len= dbg_rw_guest_mem(addr, buf, len, dp, toaddr, pgd3);
258 put_domain(dp);
259 }
261 DBGP2("gmem:exit:len:$%d\n", len);
262 return len;
263 }