debuggers.hg

view xen/arch/x86/dom0_ops.c @ 3674:fb875591fd72

bitkeeper revision 1.1159.223.63 (42028527-fv-d9BM0_LRp8UKGP19gQ)

Fix NMI deferral.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 20:10:15 2005 +0000 (2005-02-03)
parents d331c6994d28
children 46c14b1a4351 2e9105d1c5a6
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/domain_page.h>
17 #include <asm/msr.h>
18 #include <asm/pdb.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <asm/shadow.h>
22 #include <public/sched_ctl.h>
24 #include <asm/mtrr.h>
25 #include "mtrr/mtrr.h"
27 #define TRC_DOM0OP_ENTER_BASE 0x00020000
28 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
30 extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
32 static int msr_cpu_mask;
33 static unsigned long msr_addr;
34 static unsigned long msr_lo;
35 static unsigned long msr_hi;
37 static void write_msr_for(void *unused)
38 {
39 if (((1 << current->processor) & msr_cpu_mask))
40 wrmsr(msr_addr, msr_lo, msr_hi);
41 }
43 static void read_msr_for(void *unused)
44 {
45 if (((1 << current->processor) & msr_cpu_mask))
46 rdmsr(msr_addr, msr_lo, msr_hi);
47 }
49 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
50 {
51 long ret = 0;
53 if ( !IS_PRIV(current) )
54 return -EPERM;
56 switch ( op->cmd )
57 {
59 case DOM0_MSR:
60 {
61 if ( op->u.msr.write )
62 {
63 msr_cpu_mask = op->u.msr.cpu_mask;
64 msr_addr = op->u.msr.msr;
65 msr_lo = op->u.msr.in1;
66 msr_hi = op->u.msr.in2;
67 smp_call_function(write_msr_for, NULL, 1, 1);
68 write_msr_for(NULL);
69 }
70 else
71 {
72 msr_cpu_mask = op->u.msr.cpu_mask;
73 msr_addr = op->u.msr.msr;
74 smp_call_function(read_msr_for, NULL, 1, 1);
75 read_msr_for(NULL);
77 op->u.msr.out1 = msr_lo;
78 op->u.msr.out2 = msr_hi;
79 copy_to_user(u_dom0_op, op, sizeof(*op));
80 }
81 ret = 0;
82 }
83 break;
85 case DOM0_SHADOW_CONTROL:
86 {
87 struct domain *d;
88 ret = -ESRCH;
89 d = find_domain_by_id(op->u.shadow_control.domain);
90 if ( d != NULL )
91 {
92 ret = shadow_mode_control(d, &op->u.shadow_control);
93 put_domain(d);
94 copy_to_user(u_dom0_op, op, sizeof(*op));
95 }
96 }
97 break;
99 case DOM0_ADD_MEMTYPE:
100 {
101 ret = mtrr_add_page(
102 op->u.add_memtype.pfn,
103 op->u.add_memtype.nr_pfns,
104 op->u.add_memtype.type,
105 1);
106 }
107 break;
109 case DOM0_DEL_MEMTYPE:
110 {
111 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
112 }
113 break;
115 case DOM0_READ_MEMTYPE:
116 {
117 unsigned long pfn;
118 unsigned int nr_pfns;
119 mtrr_type type;
121 ret = -EINVAL;
122 if ( op->u.read_memtype.reg < num_var_ranges )
123 {
124 mtrr_if->get(op->u.read_memtype.reg, &pfn, &nr_pfns, &type);
125 (void)__put_user(pfn, &u_dom0_op->u.read_memtype.pfn);
126 (void)__put_user(nr_pfns, &u_dom0_op->u.read_memtype.nr_pfns);
127 (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
128 ret = 0;
129 }
130 }
131 break;
133 case DOM0_MICROCODE:
134 {
135 extern int microcode_update(void *buf, unsigned long len);
136 ret = microcode_update(op->u.microcode.data, op->u.microcode.length);
137 }
138 break;
140 case DOM0_IOPL:
141 {
142 extern long do_iopl(domid_t, unsigned int);
143 ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl);
144 }
145 break;
147 case DOM0_PHYSINFO:
148 {
149 dom0_physinfo_t *pi = &op->u.physinfo;
151 pi->ht_per_core = opt_noht ? 1 : ht_per_core;
152 pi->cores = smp_num_cpus / pi->ht_per_core;
153 pi->total_pages = max_page;
154 pi->free_pages = avail_domheap_pages();
155 pi->cpu_khz = cpu_khz;
157 copy_to_user(u_dom0_op, op, sizeof(*op));
158 ret = 0;
159 }
160 break;
162 case DOM0_GETPAGEFRAMEINFO:
163 {
164 struct pfn_info *page;
165 unsigned long pfn = op->u.getpageframeinfo.pfn;
166 domid_t dom = op->u.getpageframeinfo.domain;
167 struct domain *d;
169 ret = -EINVAL;
171 if ( unlikely(pfn >= max_page) ||
172 unlikely((d = find_domain_by_id(dom)) == NULL) )
173 break;
175 page = &frame_table[pfn];
177 if ( likely(get_page(page, d)) )
178 {
179 ret = 0;
181 op->u.getpageframeinfo.type = NOTAB;
183 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
184 {
185 switch ( page->u.inuse.type_info & PGT_type_mask )
186 {
187 case PGT_l1_page_table:
188 op->u.getpageframeinfo.type = L1TAB;
189 break;
190 case PGT_l2_page_table:
191 op->u.getpageframeinfo.type = L2TAB;
192 break;
193 case PGT_l3_page_table:
194 op->u.getpageframeinfo.type = L3TAB;
195 break;
196 case PGT_l4_page_table:
197 op->u.getpageframeinfo.type = L4TAB;
198 break;
199 }
200 }
202 put_page(page);
203 }
205 put_domain(d);
207 copy_to_user(u_dom0_op, op, sizeof(*op));
208 }
209 break;
211 case DOM0_GETPAGEFRAMEINFO2:
212 {
213 #define GPF2_BATCH 128
214 int n,j;
215 int num = op->u.getpageframeinfo2.num;
216 domid_t dom = op->u.getpageframeinfo2.domain;
217 unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
218 struct domain *d;
219 unsigned long l_arr[GPF2_BATCH];
220 ret = -ESRCH;
222 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
223 break;
225 if ( unlikely(num > 1024) )
226 {
227 ret = -E2BIG;
228 break;
229 }
231 ret = 0;
232 for( n = 0; n < num; )
233 {
234 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
236 if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
237 {
238 ret = -EINVAL;
239 break;
240 }
242 for( j = 0; j < k; j++ )
243 {
244 struct pfn_info *page;
245 unsigned long mfn = l_arr[j];
247 if ( unlikely(mfn >= max_page) )
248 goto e2_err;
250 page = &frame_table[mfn];
252 if ( likely(get_page(page, d)) )
253 {
254 unsigned long type = 0;
256 switch( page->u.inuse.type_info & PGT_type_mask )
257 {
258 case PGT_l1_page_table:
259 type = L1TAB;
260 break;
261 case PGT_l2_page_table:
262 type = L2TAB;
263 break;
264 case PGT_l3_page_table:
265 type = L3TAB;
266 break;
267 case PGT_l4_page_table:
268 type = L4TAB;
269 break;
270 }
272 if ( page->u.inuse.type_info & PGT_pinned )
273 type |= LPINTAB;
274 l_arr[j] |= type;
275 put_page(page);
276 }
277 else
278 {
279 e2_err:
280 l_arr[j] |= XTAB;
281 }
283 }
285 if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
286 {
287 ret = -EINVAL;
288 break;
289 }
291 n += j;
292 }
294 put_domain(d);
295 }
296 break;
298 default:
299 ret = -ENOSYS;
301 }
303 return ret;
304 }
306 void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c)
307 {
308 int i;
310 c->flags = 0;
311 memcpy(&c->cpu_ctxt,
312 &d->thread.user_ctxt,
313 sizeof(d->thread.user_ctxt));
314 if ( test_bit(DF_DONEFPUINIT, &d->flags) )
315 c->flags |= ECF_I387_VALID;
316 memcpy(&c->fpu_ctxt,
317 &d->thread.i387,
318 sizeof(d->thread.i387));
319 memcpy(&c->trap_ctxt,
320 d->thread.traps,
321 sizeof(d->thread.traps));
322 #ifdef ARCH_HAS_FAST_TRAP
323 if ( (d->thread.fast_trap_desc.a == 0) &&
324 (d->thread.fast_trap_desc.b == 0) )
325 c->fast_trap_idx = 0;
326 else
327 c->fast_trap_idx =
328 d->thread.fast_trap_idx;
329 #endif
330 c->ldt_base = d->mm.ldt_base;
331 c->ldt_ents = d->mm.ldt_ents;
332 c->gdt_ents = 0;
333 if ( GET_GDT_ADDRESS(d) == GDT_VIRT_START )
334 {
335 for ( i = 0; i < 16; i++ )
336 c->gdt_frames[i] =
337 l1_pgentry_to_pagenr(d->mm.perdomain_pt[i]);
338 c->gdt_ents = GET_GDT_ENTRIES(d);
339 }
340 c->guestos_ss = d->thread.guestos_ss;
341 c->guestos_esp = d->thread.guestos_sp;
342 c->pt_base =
343 pagetable_val(d->mm.pagetable);
344 memcpy(c->debugreg,
345 d->thread.debugreg,
346 sizeof(d->thread.debugreg));
347 c->event_callback_cs = d->thread.event_selector;
348 c->event_callback_eip = d->thread.event_address;
349 c->failsafe_callback_cs = d->thread.failsafe_selector;
350 c->failsafe_callback_eip = d->thread.failsafe_address;
351 }