debuggers.hg

view xen/arch/x86/dom0_ops.c @ 3705:4294cfa9fad3

bitkeeper revision 1.1159.212.95 (4204aa0ee0re5Xx1zWrJ9ejxzgRs3w)

Various cleanups. Remove PDB pending simpler GDB stub and/or NetBSD debugger.
Force emacs mode to appropriate tabbing in various files.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 05 11:12:14 2005 +0000 (2005-02-05)
parents 1c55bbe02576
children d93748c50893
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/domain_page.h>
17 #include <asm/msr.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <asm/shadow.h>
21 #include <public/sched_ctl.h>
23 #include <asm/mtrr.h>
24 #include "mtrr/mtrr.h"
26 #define TRC_DOM0OP_ENTER_BASE 0x00020000
27 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
29 extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
31 static int msr_cpu_mask;
32 static unsigned long msr_addr;
33 static unsigned long msr_lo;
34 static unsigned long msr_hi;
36 static void write_msr_for(void *unused)
37 {
38 if (((1 << current->processor) & msr_cpu_mask))
39 wrmsr(msr_addr, msr_lo, msr_hi);
40 }
42 static void read_msr_for(void *unused)
43 {
44 if (((1 << current->processor) & msr_cpu_mask))
45 rdmsr(msr_addr, msr_lo, msr_hi);
46 }
48 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
49 {
50 long ret = 0;
52 if ( !IS_PRIV(current->domain) )
53 return -EPERM;
55 switch ( op->cmd )
56 {
58 case DOM0_MSR:
59 {
60 if ( op->u.msr.write )
61 {
62 msr_cpu_mask = op->u.msr.cpu_mask;
63 msr_addr = op->u.msr.msr;
64 msr_lo = op->u.msr.in1;
65 msr_hi = op->u.msr.in2;
66 smp_call_function(write_msr_for, NULL, 1, 1);
67 write_msr_for(NULL);
68 }
69 else
70 {
71 msr_cpu_mask = op->u.msr.cpu_mask;
72 msr_addr = op->u.msr.msr;
73 smp_call_function(read_msr_for, NULL, 1, 1);
74 read_msr_for(NULL);
76 op->u.msr.out1 = msr_lo;
77 op->u.msr.out2 = msr_hi;
78 copy_to_user(u_dom0_op, op, sizeof(*op));
79 }
80 ret = 0;
81 }
82 break;
84 case DOM0_SHADOW_CONTROL:
85 {
86 struct domain *d;
87 ret = -ESRCH;
88 d = find_domain_by_id(op->u.shadow_control.domain);
89 if ( d != NULL )
90 {
91 ret = shadow_mode_control(d, &op->u.shadow_control);
92 put_domain(d);
93 copy_to_user(u_dom0_op, op, sizeof(*op));
94 }
95 }
96 break;
98 case DOM0_ADD_MEMTYPE:
99 {
100 ret = mtrr_add_page(
101 op->u.add_memtype.pfn,
102 op->u.add_memtype.nr_pfns,
103 op->u.add_memtype.type,
104 1);
105 }
106 break;
108 case DOM0_DEL_MEMTYPE:
109 {
110 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
111 }
112 break;
114 case DOM0_READ_MEMTYPE:
115 {
116 unsigned long pfn;
117 unsigned int nr_pfns;
118 mtrr_type type;
120 ret = -EINVAL;
121 if ( op->u.read_memtype.reg < num_var_ranges )
122 {
123 mtrr_if->get(op->u.read_memtype.reg, &pfn, &nr_pfns, &type);
124 (void)__put_user(pfn, &u_dom0_op->u.read_memtype.pfn);
125 (void)__put_user(nr_pfns, &u_dom0_op->u.read_memtype.nr_pfns);
126 (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
127 ret = 0;
128 }
129 }
130 break;
132 case DOM0_MICROCODE:
133 {
134 extern int microcode_update(void *buf, unsigned long len);
135 ret = microcode_update(op->u.microcode.data, op->u.microcode.length);
136 }
137 break;
139 case DOM0_IOPL:
140 {
141 extern long do_iopl(domid_t, unsigned int);
142 ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl);
143 }
144 break;
146 case DOM0_PHYSINFO:
147 {
148 dom0_physinfo_t *pi = &op->u.physinfo;
150 pi->ht_per_core = opt_noht ? 1 : ht_per_core;
151 pi->cores = smp_num_cpus / pi->ht_per_core;
152 pi->total_pages = max_page;
153 pi->free_pages = avail_domheap_pages();
154 pi->cpu_khz = cpu_khz;
156 copy_to_user(u_dom0_op, op, sizeof(*op));
157 ret = 0;
158 }
159 break;
161 case DOM0_GETPAGEFRAMEINFO:
162 {
163 struct pfn_info *page;
164 unsigned long pfn = op->u.getpageframeinfo.pfn;
165 domid_t dom = op->u.getpageframeinfo.domain;
166 struct domain *d;
168 ret = -EINVAL;
170 if ( unlikely(pfn >= max_page) ||
171 unlikely((d = find_domain_by_id(dom)) == NULL) )
172 break;
174 page = &frame_table[pfn];
176 if ( likely(get_page(page, d)) )
177 {
178 ret = 0;
180 op->u.getpageframeinfo.type = NOTAB;
182 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
183 {
184 switch ( page->u.inuse.type_info & PGT_type_mask )
185 {
186 case PGT_l1_page_table:
187 op->u.getpageframeinfo.type = L1TAB;
188 break;
189 case PGT_l2_page_table:
190 op->u.getpageframeinfo.type = L2TAB;
191 break;
192 case PGT_l3_page_table:
193 op->u.getpageframeinfo.type = L3TAB;
194 break;
195 case PGT_l4_page_table:
196 op->u.getpageframeinfo.type = L4TAB;
197 break;
198 }
199 }
201 put_page(page);
202 }
204 put_domain(d);
206 copy_to_user(u_dom0_op, op, sizeof(*op));
207 }
208 break;
210 case DOM0_GETPAGEFRAMEINFO2:
211 {
212 #define GPF2_BATCH 128
213 int n,j;
214 int num = op->u.getpageframeinfo2.num;
215 domid_t dom = op->u.getpageframeinfo2.domain;
216 unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
217 struct domain *d;
218 unsigned long l_arr[GPF2_BATCH];
219 ret = -ESRCH;
221 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
222 break;
224 if ( unlikely(num > 1024) )
225 {
226 ret = -E2BIG;
227 break;
228 }
230 ret = 0;
231 for( n = 0; n < num; )
232 {
233 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
235 if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
236 {
237 ret = -EINVAL;
238 break;
239 }
241 for( j = 0; j < k; j++ )
242 {
243 struct pfn_info *page;
244 unsigned long mfn = l_arr[j];
246 if ( unlikely(mfn >= max_page) )
247 goto e2_err;
249 page = &frame_table[mfn];
251 if ( likely(get_page(page, d)) )
252 {
253 unsigned long type = 0;
255 switch( page->u.inuse.type_info & PGT_type_mask )
256 {
257 case PGT_l1_page_table:
258 type = L1TAB;
259 break;
260 case PGT_l2_page_table:
261 type = L2TAB;
262 break;
263 case PGT_l3_page_table:
264 type = L3TAB;
265 break;
266 case PGT_l4_page_table:
267 type = L4TAB;
268 break;
269 }
271 if ( page->u.inuse.type_info & PGT_pinned )
272 type |= LPINTAB;
273 l_arr[j] |= type;
274 put_page(page);
275 }
276 else
277 {
278 e2_err:
279 l_arr[j] |= XTAB;
280 }
282 }
284 if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
285 {
286 ret = -EINVAL;
287 break;
288 }
290 n += j;
291 }
293 put_domain(d);
294 }
295 break;
297 case DOM0_GETMEMLIST:
298 {
299 int i;
300 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
301 unsigned long max_pfns = op->u.getmemlist.max_pfns;
302 unsigned long pfn;
303 unsigned long *buffer = op->u.getmemlist.buffer;
304 struct list_head *list_ent;
306 ret = -EINVAL;
307 if ( d != NULL )
308 {
309 ret = 0;
311 spin_lock(&d->page_alloc_lock);
312 list_ent = d->page_list.next;
313 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
314 {
315 pfn = list_entry(list_ent, struct pfn_info, list) -
316 frame_table;
317 if ( put_user(pfn, buffer) )
318 {
319 ret = -EFAULT;
320 break;
321 }
322 buffer++;
323 list_ent = frame_table[pfn].list.next;
324 }
325 spin_unlock(&d->page_alloc_lock);
327 op->u.getmemlist.num_pfns = i;
328 copy_to_user(u_dom0_op, op, sizeof(*op));
330 put_domain(d);
331 }
332 }
333 break;
335 default:
336 ret = -ENOSYS;
338 }
340 return ret;
341 }
343 void arch_getdomaininfo_ctxt(struct exec_domain *d, full_execution_context_t *c)
344 {
345 int i;
347 c->flags = 0;
348 memcpy(&c->cpu_ctxt,
349 &d->thread.user_ctxt,
350 sizeof(d->thread.user_ctxt));
351 if ( test_bit(EDF_DONEFPUINIT, &d->ed_flags) )
352 c->flags |= ECF_I387_VALID;
353 memcpy(&c->fpu_ctxt,
354 &d->thread.i387,
355 sizeof(d->thread.i387));
356 memcpy(&c->trap_ctxt,
357 d->thread.traps,
358 sizeof(d->thread.traps));
359 #ifdef ARCH_HAS_FAST_TRAP
360 if ( (d->thread.fast_trap_desc.a == 0) &&
361 (d->thread.fast_trap_desc.b == 0) )
362 c->fast_trap_idx = 0;
363 else
364 c->fast_trap_idx =
365 d->thread.fast_trap_idx;
366 #endif
367 c->ldt_base = d->mm.ldt_base;
368 c->ldt_ents = d->mm.ldt_ents;
369 c->gdt_ents = 0;
370 if ( GET_GDT_ADDRESS(d) == GDT_VIRT_START(d) )
371 {
372 for ( i = 0; i < 16; i++ )
373 c->gdt_frames[i] =
374 l1_pgentry_to_pagenr(d->mm.perdomain_ptes[i]);
375 c->gdt_ents = GET_GDT_ENTRIES(d);
376 }
377 c->guestos_ss = d->thread.guestos_ss;
378 c->guestos_esp = d->thread.guestos_sp;
379 c->pt_base =
380 pagetable_val(d->mm.pagetable);
381 memcpy(c->debugreg,
382 d->thread.debugreg,
383 sizeof(d->thread.debugreg));
384 c->event_callback_cs = d->thread.event_selector;
385 c->event_callback_eip = d->thread.event_address;
386 c->failsafe_callback_cs = d->thread.failsafe_selector;
387 c->failsafe_callback_eip = d->thread.failsafe_address;
388 }