debuggers.hg

view xen/arch/x86/dom0_ops.c @ 4629:6375127fdf23

bitkeeper revision 1.1311.1.1 (426641eeBv97w6sl983zxeR4Dc3Utg)

Cleanup page table handling. Add macros to access page table
entries, fixup plenty of places in the code to use the page
table types instead of "unsigned long".

Signed-off-by: Gerd Knorr <kraxel@bytesex.org>
Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Wed Apr 20 11:50:06 2005 +0000 (2005-04-20)
parents 445b12a7221a
children 1803018b3b05
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/domain_page.h>
17 #include <asm/msr.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <asm/shadow.h>
21 #include <public/sched_ctl.h>
23 #include <asm/mtrr.h>
24 #include "mtrr/mtrr.h"
26 #define TRC_DOM0OP_ENTER_BASE 0x00020000
27 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
29 extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
31 static int msr_cpu_mask;
32 static unsigned long msr_addr;
33 static unsigned long msr_lo;
34 static unsigned long msr_hi;
36 static void write_msr_for(void *unused)
37 {
38 if (((1 << current->processor) & msr_cpu_mask))
39 (void)wrmsr_user(msr_addr, msr_lo, msr_hi);
40 }
42 static void read_msr_for(void *unused)
43 {
44 if (((1 << current->processor) & msr_cpu_mask))
45 (void)rdmsr_user(msr_addr, msr_lo, msr_hi);
46 }
48 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
49 {
50 long ret = 0;
52 if ( !IS_PRIV(current->domain) )
53 return -EPERM;
55 switch ( op->cmd )
56 {
58 case DOM0_MSR:
59 {
60 if ( op->u.msr.write )
61 {
62 msr_cpu_mask = op->u.msr.cpu_mask;
63 msr_addr = op->u.msr.msr;
64 msr_lo = op->u.msr.in1;
65 msr_hi = op->u.msr.in2;
66 smp_call_function(write_msr_for, NULL, 1, 1);
67 write_msr_for(NULL);
68 }
69 else
70 {
71 msr_cpu_mask = op->u.msr.cpu_mask;
72 msr_addr = op->u.msr.msr;
73 smp_call_function(read_msr_for, NULL, 1, 1);
74 read_msr_for(NULL);
76 op->u.msr.out1 = msr_lo;
77 op->u.msr.out2 = msr_hi;
78 copy_to_user(u_dom0_op, op, sizeof(*op));
79 }
80 ret = 0;
81 }
82 break;
84 case DOM0_SHADOW_CONTROL:
85 {
86 struct domain *d;
87 ret = -ESRCH;
88 d = find_domain_by_id(op->u.shadow_control.domain);
89 if ( d != NULL )
90 {
91 ret = shadow_mode_control(d, &op->u.shadow_control);
92 put_domain(d);
93 copy_to_user(u_dom0_op, op, sizeof(*op));
94 }
95 }
96 break;
98 case DOM0_ADD_MEMTYPE:
99 {
100 ret = mtrr_add_page(
101 op->u.add_memtype.pfn,
102 op->u.add_memtype.nr_pfns,
103 op->u.add_memtype.type,
104 1);
105 }
106 break;
108 case DOM0_DEL_MEMTYPE:
109 {
110 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
111 }
112 break;
114 case DOM0_READ_MEMTYPE:
115 {
116 unsigned long pfn;
117 unsigned int nr_pfns;
118 mtrr_type type;
120 ret = -EINVAL;
121 if ( op->u.read_memtype.reg < num_var_ranges )
122 {
123 mtrr_if->get(op->u.read_memtype.reg, &pfn, &nr_pfns, &type);
124 (void)__put_user(pfn, &u_dom0_op->u.read_memtype.pfn);
125 (void)__put_user(nr_pfns, &u_dom0_op->u.read_memtype.nr_pfns);
126 (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
127 ret = 0;
128 }
129 }
130 break;
132 case DOM0_MICROCODE:
133 {
134 extern int microcode_update(void *buf, unsigned long len);
135 ret = microcode_update(op->u.microcode.data, op->u.microcode.length);
136 }
137 break;
139 case DOM0_IOPORT_PERMISSION:
140 {
141 struct domain *d;
142 unsigned int fp = op->u.ioport_permission.first_port;
143 unsigned int np = op->u.ioport_permission.nr_ports;
144 unsigned int p;
146 ret = -EINVAL;
147 if ( (fp + np) >= 65536 )
148 break;
150 ret = -ESRCH;
151 if ( unlikely((d = find_domain_by_id(
152 op->u.ioport_permission.domain)) == NULL) )
153 break;
155 ret = -ENOMEM;
156 if ( d->arch.iobmp_mask != NULL )
157 {
158 if ( (d->arch.iobmp_mask = xmalloc_array(
159 u8, IOBMP_BYTES)) == NULL )
160 break;
161 memset(d->arch.iobmp_mask, 0xFF, IOBMP_BYTES);
162 }
164 ret = 0;
165 for ( p = fp; p < (fp + np); p++ )
166 {
167 if ( op->u.ioport_permission.allow_access )
168 clear_bit(p, d->arch.iobmp_mask);
169 else
170 set_bit(p, d->arch.iobmp_mask);
171 }
173 put_domain(d);
174 }
175 break;
177 case DOM0_PHYSINFO:
178 {
179 dom0_physinfo_t *pi = &op->u.physinfo;
181 pi->ht_per_core = opt_noht ? 1 : ht_per_core;
182 pi->cores = smp_num_cpus / pi->ht_per_core;
183 pi->total_pages = max_page;
184 pi->free_pages = avail_domheap_pages();
185 pi->cpu_khz = cpu_khz;
187 copy_to_user(u_dom0_op, op, sizeof(*op));
188 ret = 0;
189 }
190 break;
192 case DOM0_GETPAGEFRAMEINFO:
193 {
194 struct pfn_info *page;
195 unsigned long pfn = op->u.getpageframeinfo.pfn;
196 domid_t dom = op->u.getpageframeinfo.domain;
197 struct domain *d;
199 ret = -EINVAL;
201 if ( unlikely(pfn >= max_page) ||
202 unlikely((d = find_domain_by_id(dom)) == NULL) )
203 break;
205 page = &frame_table[pfn];
207 if ( likely(get_page(page, d)) )
208 {
209 ret = 0;
211 op->u.getpageframeinfo.type = NOTAB;
213 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
214 {
215 switch ( page->u.inuse.type_info & PGT_type_mask )
216 {
217 case PGT_l1_page_table:
218 op->u.getpageframeinfo.type = L1TAB;
219 break;
220 case PGT_l2_page_table:
221 op->u.getpageframeinfo.type = L2TAB;
222 break;
223 case PGT_l3_page_table:
224 op->u.getpageframeinfo.type = L3TAB;
225 break;
226 case PGT_l4_page_table:
227 op->u.getpageframeinfo.type = L4TAB;
228 break;
229 }
230 }
232 put_page(page);
233 }
235 put_domain(d);
237 copy_to_user(u_dom0_op, op, sizeof(*op));
238 }
239 break;
241 case DOM0_GETPAGEFRAMEINFO2:
242 {
243 #define GPF2_BATCH 128
244 int n,j;
245 int num = op->u.getpageframeinfo2.num;
246 domid_t dom = op->u.getpageframeinfo2.domain;
247 unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
248 struct domain *d;
249 unsigned long *l_arr;
250 ret = -ESRCH;
252 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
253 break;
255 if ( unlikely(num > 1024) )
256 {
257 ret = -E2BIG;
258 break;
259 }
261 l_arr = (unsigned long *)alloc_xenheap_page();
263 ret = 0;
264 for( n = 0; n < num; )
265 {
266 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
268 if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
269 {
270 ret = -EINVAL;
271 break;
272 }
274 for( j = 0; j < k; j++ )
275 {
276 struct pfn_info *page;
277 unsigned long mfn = l_arr[j];
279 if ( unlikely(mfn >= max_page) )
280 goto e2_err;
282 page = &frame_table[mfn];
284 if ( likely(get_page(page, d)) )
285 {
286 unsigned long type = 0;
288 switch( page->u.inuse.type_info & PGT_type_mask )
289 {
290 case PGT_l1_page_table:
291 type = L1TAB;
292 break;
293 case PGT_l2_page_table:
294 type = L2TAB;
295 break;
296 case PGT_l3_page_table:
297 type = L3TAB;
298 break;
299 case PGT_l4_page_table:
300 type = L4TAB;
301 break;
302 }
304 if ( page->u.inuse.type_info & PGT_pinned )
305 type |= LPINTAB;
306 l_arr[j] |= type;
307 put_page(page);
308 }
309 else
310 {
311 e2_err:
312 l_arr[j] |= XTAB;
313 }
315 }
317 if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
318 {
319 ret = -EINVAL;
320 break;
321 }
323 n += j;
324 }
326 free_xenheap_page((unsigned long)l_arr);
328 put_domain(d);
329 }
330 break;
332 case DOM0_GETMEMLIST:
333 {
334 int i;
335 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
336 unsigned long max_pfns = op->u.getmemlist.max_pfns;
337 unsigned long pfn;
338 unsigned long *buffer = op->u.getmemlist.buffer;
339 struct list_head *list_ent;
341 ret = -EINVAL;
342 if ( d != NULL )
343 {
344 ret = 0;
346 spin_lock(&d->page_alloc_lock);
347 list_ent = d->page_list.next;
348 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
349 {
350 pfn = list_entry(list_ent, struct pfn_info, list) -
351 frame_table;
352 if ( put_user(pfn, buffer) )
353 {
354 ret = -EFAULT;
355 break;
356 }
357 buffer++;
358 list_ent = frame_table[pfn].list.next;
359 }
360 spin_unlock(&d->page_alloc_lock);
362 op->u.getmemlist.num_pfns = i;
363 copy_to_user(u_dom0_op, op, sizeof(*op));
365 put_domain(d);
366 }
367 }
368 break;
370 default:
371 ret = -ENOSYS;
373 }
375 return ret;
376 }
378 void arch_getdomaininfo_ctxt(
379 struct exec_domain *ed, full_execution_context_t *c)
380 {
381 int i;
382 #ifdef __i386__ /* Remove when x86_64 VMX is implemented */
383 #ifdef CONFIG_VMX
384 extern void save_vmx_execution_context(execution_context_t *);
385 #endif
386 #endif
388 c->flags = 0;
389 memcpy(&c->cpu_ctxt,
390 &ed->arch.user_ctxt,
391 sizeof(ed->arch.user_ctxt));
392 /* IOPL privileges are virtualised -- merge back into returned eflags. */
393 BUG_ON((c->cpu_ctxt.eflags & EF_IOPL) != 0);
394 c->cpu_ctxt.eflags |= ed->arch.iopl << 12;
396 #ifdef __i386__
397 #ifdef CONFIG_VMX
398 if ( VMX_DOMAIN(ed) )
399 save_vmx_execution_context(&c->cpu_ctxt);
400 #endif
401 #endif
403 if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
404 c->flags |= ECF_I387_VALID;
405 if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) )
406 c->flags |= ECF_IN_KERNEL;
407 memcpy(&c->fpu_ctxt,
408 &ed->arch.i387,
409 sizeof(ed->arch.i387));
410 memcpy(&c->trap_ctxt,
411 ed->arch.traps,
412 sizeof(ed->arch.traps));
413 #ifdef ARCH_HAS_FAST_TRAP
414 if ( (ed->arch.fast_trap_desc.a == 0) &&
415 (ed->arch.fast_trap_desc.b == 0) )
416 c->fast_trap_idx = 0;
417 else
418 c->fast_trap_idx =
419 ed->arch.fast_trap_idx;
420 #endif
421 c->ldt_base = ed->arch.ldt_base;
422 c->ldt_ents = ed->arch.ldt_ents;
423 c->gdt_ents = 0;
424 if ( GET_GDT_ADDRESS(ed) == GDT_VIRT_START(ed) )
425 {
426 for ( i = 0; i < 16; i++ )
427 c->gdt_frames[i] =
428 l1e_get_pfn(ed->arch.perdomain_ptes[i]);
429 c->gdt_ents = GET_GDT_ENTRIES(ed);
430 }
431 c->kernel_ss = ed->arch.kernel_ss;
432 c->kernel_esp = ed->arch.kernel_sp;
433 c->pt_base =
434 pagetable_val(ed->arch.guest_table);
435 memcpy(c->debugreg,
436 ed->arch.debugreg,
437 sizeof(ed->arch.debugreg));
438 #if defined(__i386__)
439 c->event_callback_cs = ed->arch.event_selector;
440 c->event_callback_eip = ed->arch.event_address;
441 c->failsafe_callback_cs = ed->arch.failsafe_selector;
442 c->failsafe_callback_eip = ed->arch.failsafe_address;
443 #elif defined(__x86_64__)
444 c->event_callback_eip = ed->arch.event_address;
445 c->failsafe_callback_eip = ed->arch.failsafe_address;
446 c->syscall_callback_eip = ed->arch.syscall_address;
447 #endif
448 c->vm_assist = ed->domain->vm_assist;
449 }