debuggers.hg

view xen/arch/x86/dom0_ops.c @ 3765:4dfebfdc7933

bitkeeper revision 1.1159.252.1 (4208e2a42Fwe83QQfJdFQI8V302tYg)

Reorganise mm.h to split out 32-bit and 64-bit definitions. Fix x86_64
definitions to mask out the bits that we don't care about.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 08 16:02:44 2005 +0000 (2005-02-08)
parents f4eb69e2ad9e
children 89e86842952a
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/domain_page.h>
17 #include <asm/msr.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <asm/shadow.h>
21 #include <public/sched_ctl.h>
23 #include <asm/mtrr.h>
24 #include "mtrr/mtrr.h"
26 #define TRC_DOM0OP_ENTER_BASE 0x00020000
27 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
29 extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
31 static int msr_cpu_mask;
32 static unsigned long msr_addr;
33 static unsigned long msr_lo;
34 static unsigned long msr_hi;
36 static void write_msr_for(void *unused)
37 {
38 if (((1 << current->processor) & msr_cpu_mask))
39 wrmsr(msr_addr, msr_lo, msr_hi);
40 }
42 static void read_msr_for(void *unused)
43 {
44 if (((1 << current->processor) & msr_cpu_mask))
45 rdmsr(msr_addr, msr_lo, msr_hi);
46 }
48 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
49 {
50 long ret = 0;
52 if ( !IS_PRIV(current->domain) )
53 return -EPERM;
55 switch ( op->cmd )
56 {
58 case DOM0_MSR:
59 {
60 if ( op->u.msr.write )
61 {
62 msr_cpu_mask = op->u.msr.cpu_mask;
63 msr_addr = op->u.msr.msr;
64 msr_lo = op->u.msr.in1;
65 msr_hi = op->u.msr.in2;
66 smp_call_function(write_msr_for, NULL, 1, 1);
67 write_msr_for(NULL);
68 }
69 else
70 {
71 msr_cpu_mask = op->u.msr.cpu_mask;
72 msr_addr = op->u.msr.msr;
73 smp_call_function(read_msr_for, NULL, 1, 1);
74 read_msr_for(NULL);
76 op->u.msr.out1 = msr_lo;
77 op->u.msr.out2 = msr_hi;
78 copy_to_user(u_dom0_op, op, sizeof(*op));
79 }
80 ret = 0;
81 }
82 break;
84 case DOM0_SHADOW_CONTROL:
85 {
86 struct domain *d;
87 ret = -ESRCH;
88 d = find_domain_by_id(op->u.shadow_control.domain);
89 if ( d != NULL )
90 {
91 ret = shadow_mode_control(d, &op->u.shadow_control);
92 put_domain(d);
93 copy_to_user(u_dom0_op, op, sizeof(*op));
94 }
95 }
96 break;
98 case DOM0_ADD_MEMTYPE:
99 {
100 ret = mtrr_add_page(
101 op->u.add_memtype.pfn,
102 op->u.add_memtype.nr_pfns,
103 op->u.add_memtype.type,
104 1);
105 }
106 break;
108 case DOM0_DEL_MEMTYPE:
109 {
110 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
111 }
112 break;
114 case DOM0_READ_MEMTYPE:
115 {
116 unsigned long pfn;
117 unsigned int nr_pfns;
118 mtrr_type type;
120 ret = -EINVAL;
121 if ( op->u.read_memtype.reg < num_var_ranges )
122 {
123 mtrr_if->get(op->u.read_memtype.reg, &pfn, &nr_pfns, &type);
124 (void)__put_user(pfn, &u_dom0_op->u.read_memtype.pfn);
125 (void)__put_user(nr_pfns, &u_dom0_op->u.read_memtype.nr_pfns);
126 (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
127 ret = 0;
128 }
129 }
130 break;
132 case DOM0_MICROCODE:
133 {
134 extern int microcode_update(void *buf, unsigned long len);
135 ret = microcode_update(op->u.microcode.data, op->u.microcode.length);
136 }
137 break;
139 case DOM0_IOPL:
140 {
141 extern long do_iopl(domid_t, unsigned int);
142 ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl);
143 }
144 break;
146 case DOM0_PHYSINFO:
147 {
148 dom0_physinfo_t *pi = &op->u.physinfo;
150 pi->ht_per_core = opt_noht ? 1 : ht_per_core;
151 pi->cores = smp_num_cpus / pi->ht_per_core;
152 pi->total_pages = max_page;
153 pi->free_pages = avail_domheap_pages();
154 pi->cpu_khz = cpu_khz;
156 copy_to_user(u_dom0_op, op, sizeof(*op));
157 ret = 0;
158 }
159 break;
161 case DOM0_GETPAGEFRAMEINFO:
162 {
163 struct pfn_info *page;
164 unsigned long pfn = op->u.getpageframeinfo.pfn;
165 domid_t dom = op->u.getpageframeinfo.domain;
166 struct domain *d;
168 ret = -EINVAL;
170 if ( unlikely(pfn >= max_page) ||
171 unlikely((d = find_domain_by_id(dom)) == NULL) )
172 break;
174 page = &frame_table[pfn];
176 if ( likely(get_page(page, d)) )
177 {
178 ret = 0;
180 op->u.getpageframeinfo.type = NOTAB;
182 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
183 {
184 switch ( page->u.inuse.type_info & PGT_type_mask )
185 {
186 case PGT_l1_page_table:
187 op->u.getpageframeinfo.type = L1TAB;
188 break;
189 case PGT_l2_page_table:
190 op->u.getpageframeinfo.type = L2TAB;
191 break;
192 case PGT_l3_page_table:
193 op->u.getpageframeinfo.type = L3TAB;
194 break;
195 case PGT_l4_page_table:
196 op->u.getpageframeinfo.type = L4TAB;
197 break;
198 }
199 }
201 put_page(page);
202 }
204 put_domain(d);
206 copy_to_user(u_dom0_op, op, sizeof(*op));
207 }
208 break;
210 case DOM0_GETPAGEFRAMEINFO2:
211 {
212 #define GPF2_BATCH 128
213 int n,j;
214 int num = op->u.getpageframeinfo2.num;
215 domid_t dom = op->u.getpageframeinfo2.domain;
216 unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
217 struct domain *d;
218 unsigned long *l_arr;
219 ret = -ESRCH;
221 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
222 break;
224 if ( unlikely(num > 1024) )
225 {
226 ret = -E2BIG;
227 break;
228 }
230 l_arr = (unsigned long *)alloc_xenheap_page();
232 ret = 0;
233 for( n = 0; n < num; )
234 {
235 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
237 if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
238 {
239 ret = -EINVAL;
240 break;
241 }
243 for( j = 0; j < k; j++ )
244 {
245 struct pfn_info *page;
246 unsigned long mfn = l_arr[j];
248 if ( unlikely(mfn >= max_page) )
249 goto e2_err;
251 page = &frame_table[mfn];
253 if ( likely(get_page(page, d)) )
254 {
255 unsigned long type = 0;
257 switch( page->u.inuse.type_info & PGT_type_mask )
258 {
259 case PGT_l1_page_table:
260 type = L1TAB;
261 break;
262 case PGT_l2_page_table:
263 type = L2TAB;
264 break;
265 case PGT_l3_page_table:
266 type = L3TAB;
267 break;
268 case PGT_l4_page_table:
269 type = L4TAB;
270 break;
271 }
273 if ( page->u.inuse.type_info & PGT_pinned )
274 type |= LPINTAB;
275 l_arr[j] |= type;
276 put_page(page);
277 }
278 else
279 {
280 e2_err:
281 l_arr[j] |= XTAB;
282 }
284 }
286 if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
287 {
288 ret = -EINVAL;
289 break;
290 }
292 n += j;
293 }
295 free_xenheap_page((unsigned long)l_arr);
297 put_domain(d);
298 }
299 break;
301 case DOM0_GETMEMLIST:
302 {
303 int i;
304 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
305 unsigned long max_pfns = op->u.getmemlist.max_pfns;
306 unsigned long pfn;
307 unsigned long *buffer = op->u.getmemlist.buffer;
308 struct list_head *list_ent;
310 ret = -EINVAL;
311 if ( d != NULL )
312 {
313 ret = 0;
315 spin_lock(&d->page_alloc_lock);
316 list_ent = d->page_list.next;
317 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
318 {
319 pfn = list_entry(list_ent, struct pfn_info, list) -
320 frame_table;
321 if ( put_user(pfn, buffer) )
322 {
323 ret = -EFAULT;
324 break;
325 }
326 buffer++;
327 list_ent = frame_table[pfn].list.next;
328 }
329 spin_unlock(&d->page_alloc_lock);
331 op->u.getmemlist.num_pfns = i;
332 copy_to_user(u_dom0_op, op, sizeof(*op));
334 put_domain(d);
335 }
336 }
337 break;
339 default:
340 ret = -ENOSYS;
342 }
344 return ret;
345 }
347 void arch_getdomaininfo_ctxt(
348 struct exec_domain *ed, full_execution_context_t *c)
349 {
350 int i;
352 c->flags = 0;
353 memcpy(&c->cpu_ctxt,
354 &ed->arch.user_ctxt,
355 sizeof(ed->arch.user_ctxt));
356 if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
357 c->flags |= ECF_I387_VALID;
358 memcpy(&c->fpu_ctxt,
359 &ed->arch.i387,
360 sizeof(ed->arch.i387));
361 memcpy(&c->trap_ctxt,
362 ed->arch.traps,
363 sizeof(ed->arch.traps));
364 #ifdef ARCH_HAS_FAST_TRAP
365 if ( (ed->arch.fast_trap_desc.a == 0) &&
366 (ed->arch.fast_trap_desc.b == 0) )
367 c->fast_trap_idx = 0;
368 else
369 c->fast_trap_idx =
370 ed->arch.fast_trap_idx;
371 #endif
372 c->ldt_base = ed->arch.ldt_base;
373 c->ldt_ents = ed->arch.ldt_ents;
374 c->gdt_ents = 0;
375 if ( GET_GDT_ADDRESS(ed) == GDT_VIRT_START(ed) )
376 {
377 for ( i = 0; i < 16; i++ )
378 c->gdt_frames[i] =
379 l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i]);
380 c->gdt_ents = GET_GDT_ENTRIES(ed);
381 }
382 c->guestos_ss = ed->arch.guestos_ss;
383 c->guestos_esp = ed->arch.guestos_sp;
384 c->pt_base =
385 pagetable_val(ed->arch.pagetable);
386 memcpy(c->debugreg,
387 ed->arch.debugreg,
388 sizeof(ed->arch.debugreg));
389 c->event_callback_cs = ed->arch.event_selector;
390 c->event_callback_eip = ed->arch.event_address;
391 c->failsafe_callback_cs = ed->arch.failsafe_selector;
392 c->failsafe_callback_eip = ed->arch.failsafe_address;
393 }