debuggers.hg

view xen/arch/x86/dom0_ops.c @ 3683:1c55bbe02576

bitkeeper revision 1.1159.212.84 (42033fc2Q0eAAtQcKyCf8cXCt_Fgfg)

Common-code cleanups, for ia64.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Fri Feb 04 09:26:26 2005 +0000 (2005-02-04)
parents 610068179f96
children bbe8541361dd 4294cfa9fad3
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/domain_page.h>
17 #include <asm/msr.h>
18 #include <asm/pdb.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <asm/shadow.h>
22 #include <public/sched_ctl.h>
24 #include <asm/mtrr.h>
25 #include "mtrr/mtrr.h"
27 #define TRC_DOM0OP_ENTER_BASE 0x00020000
28 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
30 extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
32 static int msr_cpu_mask;
33 static unsigned long msr_addr;
34 static unsigned long msr_lo;
35 static unsigned long msr_hi;
37 static void write_msr_for(void *unused)
38 {
39 if (((1 << current->processor) & msr_cpu_mask))
40 wrmsr(msr_addr, msr_lo, msr_hi);
41 }
43 static void read_msr_for(void *unused)
44 {
45 if (((1 << current->processor) & msr_cpu_mask))
46 rdmsr(msr_addr, msr_lo, msr_hi);
47 }
49 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
50 {
51 long ret = 0;
53 if ( !IS_PRIV(current->domain) )
54 return -EPERM;
56 switch ( op->cmd )
57 {
59 case DOM0_MSR:
60 {
61 if ( op->u.msr.write )
62 {
63 msr_cpu_mask = op->u.msr.cpu_mask;
64 msr_addr = op->u.msr.msr;
65 msr_lo = op->u.msr.in1;
66 msr_hi = op->u.msr.in2;
67 smp_call_function(write_msr_for, NULL, 1, 1);
68 write_msr_for(NULL);
69 }
70 else
71 {
72 msr_cpu_mask = op->u.msr.cpu_mask;
73 msr_addr = op->u.msr.msr;
74 smp_call_function(read_msr_for, NULL, 1, 1);
75 read_msr_for(NULL);
77 op->u.msr.out1 = msr_lo;
78 op->u.msr.out2 = msr_hi;
79 copy_to_user(u_dom0_op, op, sizeof(*op));
80 }
81 ret = 0;
82 }
83 break;
85 case DOM0_SHADOW_CONTROL:
86 {
87 struct domain *d;
88 ret = -ESRCH;
89 d = find_domain_by_id(op->u.shadow_control.domain);
90 if ( d != NULL )
91 {
92 ret = shadow_mode_control(d, &op->u.shadow_control);
93 put_domain(d);
94 copy_to_user(u_dom0_op, op, sizeof(*op));
95 }
96 }
97 break;
99 case DOM0_ADD_MEMTYPE:
100 {
101 ret = mtrr_add_page(
102 op->u.add_memtype.pfn,
103 op->u.add_memtype.nr_pfns,
104 op->u.add_memtype.type,
105 1);
106 }
107 break;
109 case DOM0_DEL_MEMTYPE:
110 {
111 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
112 }
113 break;
115 case DOM0_READ_MEMTYPE:
116 {
117 unsigned long pfn;
118 unsigned int nr_pfns;
119 mtrr_type type;
121 ret = -EINVAL;
122 if ( op->u.read_memtype.reg < num_var_ranges )
123 {
124 mtrr_if->get(op->u.read_memtype.reg, &pfn, &nr_pfns, &type);
125 (void)__put_user(pfn, &u_dom0_op->u.read_memtype.pfn);
126 (void)__put_user(nr_pfns, &u_dom0_op->u.read_memtype.nr_pfns);
127 (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
128 ret = 0;
129 }
130 }
131 break;
133 case DOM0_MICROCODE:
134 {
135 extern int microcode_update(void *buf, unsigned long len);
136 ret = microcode_update(op->u.microcode.data, op->u.microcode.length);
137 }
138 break;
140 case DOM0_IOPL:
141 {
142 extern long do_iopl(domid_t, unsigned int);
143 ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl);
144 }
145 break;
147 case DOM0_PHYSINFO:
148 {
149 dom0_physinfo_t *pi = &op->u.physinfo;
151 pi->ht_per_core = opt_noht ? 1 : ht_per_core;
152 pi->cores = smp_num_cpus / pi->ht_per_core;
153 pi->total_pages = max_page;
154 pi->free_pages = avail_domheap_pages();
155 pi->cpu_khz = cpu_khz;
157 copy_to_user(u_dom0_op, op, sizeof(*op));
158 ret = 0;
159 }
160 break;
162 case DOM0_GETPAGEFRAMEINFO:
163 {
164 struct pfn_info *page;
165 unsigned long pfn = op->u.getpageframeinfo.pfn;
166 domid_t dom = op->u.getpageframeinfo.domain;
167 struct domain *d;
169 ret = -EINVAL;
171 if ( unlikely(pfn >= max_page) ||
172 unlikely((d = find_domain_by_id(dom)) == NULL) )
173 break;
175 page = &frame_table[pfn];
177 if ( likely(get_page(page, d)) )
178 {
179 ret = 0;
181 op->u.getpageframeinfo.type = NOTAB;
183 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
184 {
185 switch ( page->u.inuse.type_info & PGT_type_mask )
186 {
187 case PGT_l1_page_table:
188 op->u.getpageframeinfo.type = L1TAB;
189 break;
190 case PGT_l2_page_table:
191 op->u.getpageframeinfo.type = L2TAB;
192 break;
193 case PGT_l3_page_table:
194 op->u.getpageframeinfo.type = L3TAB;
195 break;
196 case PGT_l4_page_table:
197 op->u.getpageframeinfo.type = L4TAB;
198 break;
199 }
200 }
202 put_page(page);
203 }
205 put_domain(d);
207 copy_to_user(u_dom0_op, op, sizeof(*op));
208 }
209 break;
211 case DOM0_GETPAGEFRAMEINFO2:
212 {
213 #define GPF2_BATCH 128
214 int n,j;
215 int num = op->u.getpageframeinfo2.num;
216 domid_t dom = op->u.getpageframeinfo2.domain;
217 unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
218 struct domain *d;
219 unsigned long l_arr[GPF2_BATCH];
220 ret = -ESRCH;
222 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
223 break;
225 if ( unlikely(num > 1024) )
226 {
227 ret = -E2BIG;
228 break;
229 }
231 ret = 0;
232 for( n = 0; n < num; )
233 {
234 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
236 if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
237 {
238 ret = -EINVAL;
239 break;
240 }
242 for( j = 0; j < k; j++ )
243 {
244 struct pfn_info *page;
245 unsigned long mfn = l_arr[j];
247 if ( unlikely(mfn >= max_page) )
248 goto e2_err;
250 page = &frame_table[mfn];
252 if ( likely(get_page(page, d)) )
253 {
254 unsigned long type = 0;
256 switch( page->u.inuse.type_info & PGT_type_mask )
257 {
258 case PGT_l1_page_table:
259 type = L1TAB;
260 break;
261 case PGT_l2_page_table:
262 type = L2TAB;
263 break;
264 case PGT_l3_page_table:
265 type = L3TAB;
266 break;
267 case PGT_l4_page_table:
268 type = L4TAB;
269 break;
270 }
272 if ( page->u.inuse.type_info & PGT_pinned )
273 type |= LPINTAB;
274 l_arr[j] |= type;
275 put_page(page);
276 }
277 else
278 {
279 e2_err:
280 l_arr[j] |= XTAB;
281 }
283 }
285 if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
286 {
287 ret = -EINVAL;
288 break;
289 }
291 n += j;
292 }
294 put_domain(d);
295 }
296 break;
298 case DOM0_GETMEMLIST:
299 {
300 int i;
301 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
302 unsigned long max_pfns = op->u.getmemlist.max_pfns;
303 unsigned long pfn;
304 unsigned long *buffer = op->u.getmemlist.buffer;
305 struct list_head *list_ent;
307 ret = -EINVAL;
308 if ( d != NULL )
309 {
310 ret = 0;
312 spin_lock(&d->page_alloc_lock);
313 list_ent = d->page_list.next;
314 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
315 {
316 pfn = list_entry(list_ent, struct pfn_info, list) -
317 frame_table;
318 if ( put_user(pfn, buffer) )
319 {
320 ret = -EFAULT;
321 break;
322 }
323 buffer++;
324 list_ent = frame_table[pfn].list.next;
325 }
326 spin_unlock(&d->page_alloc_lock);
328 op->u.getmemlist.num_pfns = i;
329 copy_to_user(u_dom0_op, op, sizeof(*op));
331 put_domain(d);
332 }
333 }
334 break;
336 default:
337 ret = -ENOSYS;
339 }
341 return ret;
342 }
344 void arch_getdomaininfo_ctxt(struct exec_domain *d, full_execution_context_t *c)
345 {
346 int i;
348 c->flags = 0;
349 memcpy(&c->cpu_ctxt,
350 &d->thread.user_ctxt,
351 sizeof(d->thread.user_ctxt));
352 if ( test_bit(EDF_DONEFPUINIT, &d->ed_flags) )
353 c->flags |= ECF_I387_VALID;
354 memcpy(&c->fpu_ctxt,
355 &d->thread.i387,
356 sizeof(d->thread.i387));
357 memcpy(&c->trap_ctxt,
358 d->thread.traps,
359 sizeof(d->thread.traps));
360 #ifdef ARCH_HAS_FAST_TRAP
361 if ( (d->thread.fast_trap_desc.a == 0) &&
362 (d->thread.fast_trap_desc.b == 0) )
363 c->fast_trap_idx = 0;
364 else
365 c->fast_trap_idx =
366 d->thread.fast_trap_idx;
367 #endif
368 c->ldt_base = d->mm.ldt_base;
369 c->ldt_ents = d->mm.ldt_ents;
370 c->gdt_ents = 0;
371 if ( GET_GDT_ADDRESS(d) == GDT_VIRT_START(d) )
372 {
373 for ( i = 0; i < 16; i++ )
374 c->gdt_frames[i] =
375 l1_pgentry_to_pagenr(d->mm.perdomain_ptes[i]);
376 c->gdt_ents = GET_GDT_ENTRIES(d);
377 }
378 c->guestos_ss = d->thread.guestos_ss;
379 c->guestos_esp = d->thread.guestos_sp;
380 c->pt_base =
381 pagetable_val(d->mm.pagetable);
382 memcpy(c->debugreg,
383 d->thread.debugreg,
384 sizeof(d->thread.debugreg));
385 c->event_callback_cs = d->thread.event_selector;
386 c->event_callback_eip = d->thread.event_address;
387 c->failsafe_callback_cs = d->thread.failsafe_selector;
388 c->failsafe_callback_eip = d->thread.failsafe_address;
389 }