debuggers.hg

view xen/common/dom0_ops.c @ 3658:0ef6e8e6e85d

bitkeeper revision 1.1159.212.71 (4200f0afX_JumfbEHQex6TdFENULMQ)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
author iap10@labyrinth.cl.cam.ac.uk
date Wed Feb 02 15:24:31 2005 +0000 (2005-02-02)
parents 610068179f96 beb0887c54bc
children 1c55bbe02576
line source
1 /******************************************************************************
2 * dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/domain_page.h>
17 #include <asm/pdb.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <asm/shadow.h>
21 #include <public/sched_ctl.h>
23 #define TRC_DOM0OP_ENTER_BASE 0x00020000
24 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
26 extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
27 extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
28 extern void arch_getdomaininfo_ctxt(
29 struct exec_domain *, full_execution_context_t *);
31 static inline int is_free_domid(domid_t dom)
32 {
33 struct domain *d;
35 if ( dom >= DOMID_FIRST_RESERVED )
36 return 0;
38 if ( (d = find_domain_by_id(dom)) == NULL )
39 return 1;
41 put_domain(d);
42 return 0;
43 }
45 /*
46 * Allocate a free domain id. We try to reuse domain ids in a fairly low range,
47 * only expanding the range when there are no free domain ids. This is to keep
48 * domain ids in a range depending on the number that exist simultaneously,
49 * rather than incrementing domain ids in the full 32-bit range.
50 */
51 static int allocate_domid(domid_t *pdom)
52 {
53 static spinlock_t domid_lock = SPIN_LOCK_UNLOCKED;
54 static domid_t curdom = 0;
55 static domid_t topdom = 101;
56 int err = 0;
57 domid_t dom;
59 spin_lock(&domid_lock);
61 /* Try to use a domain id in the range 0..topdom, starting at curdom. */
62 for ( dom = curdom + 1; dom != curdom; dom++ )
63 {
64 if ( dom == topdom )
65 dom = 1;
66 if ( is_free_domid(dom) )
67 goto exit;
68 }
70 /* Couldn't find a free domain id in 0..topdom, try higher. */
71 for ( dom = topdom; dom < DOMID_FIRST_RESERVED; dom++ )
72 {
73 if ( is_free_domid(dom) )
74 {
75 topdom = dom + 1;
76 goto exit;
77 }
78 }
80 /* No free domain ids. */
81 err = -ENOMEM;
83 exit:
84 if ( err == 0 )
85 {
86 curdom = dom;
87 *pdom = dom;
88 }
90 spin_unlock(&domid_lock);
91 return err;
92 }
94 long do_dom0_op(dom0_op_t *u_dom0_op)
95 {
96 long ret = 0;
97 dom0_op_t curop, *op = &curop;
99 if ( !IS_PRIV(current->domain) )
100 return -EPERM;
102 if ( copy_from_user(op, u_dom0_op, sizeof(*op)) )
103 return -EFAULT;
105 if ( op->interface_version != DOM0_INTERFACE_VERSION )
106 return -EACCES;
108 TRACE_5D(TRC_DOM0OP_ENTER_BASE + op->cmd,
109 0, op->u.dummy[0], op->u.dummy[1],
110 op->u.dummy[2], op->u.dummy[3] );
112 switch ( op->cmd )
113 {
115 case DOM0_BUILDDOMAIN:
116 {
117 struct domain *d = find_domain_by_id(op->u.builddomain.domain);
118 ret = -EINVAL;
119 if ( d != NULL )
120 {
121 ret = final_setup_guestos(d, &op->u.builddomain);
122 put_domain(d);
123 }
124 }
125 break;
127 case DOM0_PAUSEDOMAIN:
128 {
129 struct domain *d = find_domain_by_id(op->u.pausedomain.domain);
130 ret = -ESRCH;
131 if ( d != NULL )
132 {
133 ret = -EINVAL;
134 if ( d != current->domain )
135 {
136 domain_pause_by_systemcontroller(d);
137 ret = 0;
138 }
139 put_domain(d);
140 }
141 }
142 break;
144 case DOM0_UNPAUSEDOMAIN:
145 {
146 struct domain *d = find_domain_by_id(op->u.unpausedomain.domain);
147 ret = -ESRCH;
148 if ( d != NULL )
149 {
150 ret = -EINVAL;
151 if ( test_bit(DF_CONSTRUCTED, &d->d_flags) )
152 {
153 domain_unpause_by_systemcontroller(d);
154 ret = 0;
155 }
156 put_domain(d);
157 }
158 }
159 break;
161 case DOM0_CREATEDOMAIN:
162 {
163 struct domain *d;
164 unsigned int pro = 0;
165 domid_t dom;
167 dom = op->u.createdomain.domain;
168 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
169 {
170 ret = -EINVAL;
171 if ( !is_free_domid(dom) )
172 break;
173 }
174 else if ( (ret = allocate_domid(&dom)) != 0 )
175 break;
177 if ( op->u.createdomain.cpu == -1 )
178 {
179 /* Do an initial placement. Pick the least-populated CPU. */
180 struct domain *d;
181 struct exec_domain *ed;
182 unsigned int i, cnt[NR_CPUS] = { 0 };
184 read_lock(&domlist_lock);
185 for_each_domain ( d ) {
186 for_each_exec_domain ( d, ed )
187 cnt[ed->processor]++;
188 }
189 read_unlock(&domlist_lock);
191 for ( i = 0; i < smp_num_cpus; i++ )
192 if ( cnt[i] < cnt[pro] )
193 pro = i;
194 }
195 else
196 pro = op->u.createdomain.cpu % smp_num_cpus;
198 ret = -ENOMEM;
199 if ( (d = do_createdomain(dom, pro)) == NULL )
200 break;
202 ret = alloc_new_dom_mem(d, op->u.createdomain.memory_kb);
203 if ( ret != 0 )
204 {
205 domain_kill(d);
206 break;
207 }
209 ret = 0;
211 op->u.createdomain.domain = d->id;
212 copy_to_user(u_dom0_op, op, sizeof(*op));
213 }
214 break;
216 case DOM0_DESTROYDOMAIN:
217 {
218 struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
219 ret = -ESRCH;
220 if ( d != NULL )
221 {
222 ret = -EINVAL;
223 if ( d != current->domain )
224 {
225 domain_kill(d);
226 ret = 0;
227 }
228 put_domain(d);
229 }
230 }
231 break;
233 case DOM0_PINCPUDOMAIN:
234 {
235 domid_t dom = op->u.pincpudomain.domain;
236 struct domain *d = find_domain_by_id(dom);
237 struct exec_domain *ed;
238 int cpu = op->u.pincpudomain.cpu;
240 if ( d == NULL )
241 {
242 ret = -ESRCH;
243 break;
244 }
246 ed = d->exec_domain[op->u.pincpudomain.exec_domain];
247 if ( ed == NULL )
248 {
249 ret = -ESRCH;
250 put_domain(d);
251 break;
252 }
254 if ( ed == current )
255 {
256 ret = -EINVAL;
257 put_domain(d);
258 break;
259 }
261 if ( cpu == -1 )
262 {
263 clear_bit(EDF_CPUPINNED, &ed->ed_flags);
264 }
265 else
266 {
267 exec_domain_pause(ed);
268 synchronise_pagetables(~0UL);
269 if ( ed->processor != (cpu % smp_num_cpus) )
270 set_bit(EDF_MIGRATED, &ed->ed_flags);
271 set_bit(EDF_CPUPINNED, &ed->ed_flags);
272 ed->processor = cpu % smp_num_cpus;
273 exec_domain_unpause(ed);
274 }
276 put_domain(d);
277 }
278 break;
280 case DOM0_SCHEDCTL:
281 {
282 ret = sched_ctl(&op->u.schedctl);
283 copy_to_user(u_dom0_op, op, sizeof(*op));
284 }
285 break;
287 case DOM0_ADJUSTDOM:
288 {
289 ret = sched_adjdom(&op->u.adjustdom);
290 copy_to_user(u_dom0_op, op, sizeof(*op));
291 }
292 break;
294 case DOM0_GETMEMLIST:
295 {
296 int i;
297 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
298 unsigned long max_pfns = op->u.getmemlist.max_pfns;
299 unsigned long pfn;
300 unsigned long *buffer = op->u.getmemlist.buffer;
301 struct list_head *list_ent;
303 ret = -EINVAL;
304 if ( d != NULL )
305 {
306 ret = 0;
308 spin_lock(&d->page_alloc_lock);
309 list_ent = d->page_list.next;
310 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
311 {
312 pfn = list_entry(list_ent, struct pfn_info, list) -
313 frame_table;
314 if ( put_user(pfn, buffer) )
315 {
316 ret = -EFAULT;
317 break;
318 }
319 buffer++;
320 list_ent = frame_table[pfn].list.next;
321 }
322 spin_unlock(&d->page_alloc_lock);
324 op->u.getmemlist.num_pfns = i;
325 copy_to_user(u_dom0_op, op, sizeof(*op));
327 put_domain(d);
328 }
329 }
330 break;
332 case DOM0_GETDOMAININFO:
333 {
334 full_execution_context_t *c;
335 struct domain *d;
336 struct exec_domain *ed;
338 read_lock(&domlist_lock);
340 for_each_domain ( d )
341 {
342 if ( d->id >= op->u.getdomaininfo.domain )
343 break;
344 }
346 if ( (d == NULL) || !get_domain(d) )
347 {
348 read_unlock(&domlist_lock);
349 ret = -ESRCH;
350 break;
351 }
353 read_unlock(&domlist_lock);
355 op->u.getdomaininfo.domain = d->id;
357 if ( (op->u.getdomaininfo.exec_domain >= MAX_VIRT_CPUS) ||
358 !d->exec_domain[op->u.getdomaininfo.exec_domain] )
359 {
360 ret = -EINVAL;
361 break;
362 }
364 ed = d->exec_domain[op->u.getdomaininfo.exec_domain];
366 op->u.getdomaininfo.flags =
367 (test_bit( DF_DYING, &d->d_flags) ? DOMFLAGS_DYING : 0) |
368 (test_bit( DF_CRASHED, &d->d_flags) ? DOMFLAGS_CRASHED : 0) |
369 (test_bit( DF_SHUTDOWN, &d->d_flags) ? DOMFLAGS_SHUTDOWN : 0) |
370 (test_bit(EDF_CTRLPAUSE, &ed->ed_flags) ? DOMFLAGS_PAUSED : 0) |
371 (test_bit(EDF_BLOCKED, &ed->ed_flags) ? DOMFLAGS_BLOCKED : 0) |
372 (test_bit(EDF_RUNNING, &ed->ed_flags) ? DOMFLAGS_RUNNING : 0);
374 op->u.getdomaininfo.flags |= ed->processor << DOMFLAGS_CPUSHIFT;
375 op->u.getdomaininfo.flags |=
376 d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
378 op->u.getdomaininfo.tot_pages = d->tot_pages;
379 op->u.getdomaininfo.max_pages = d->max_pages;
380 op->u.getdomaininfo.cpu_time = ed->cpu_time;
381 op->u.getdomaininfo.shared_info_frame =
382 __pa(d->shared_info) >> PAGE_SHIFT;
384 if ( op->u.getdomaininfo.ctxt != NULL )
385 {
386 if ( (c = xmalloc(full_execution_context_t)) == NULL )
387 {
388 ret = -ENOMEM;
389 put_domain(d);
390 break;
391 }
393 if ( ed != current )
394 exec_domain_pause(ed);
396 arch_getdomaininfo_ctxt(ed,c);
398 if ( ed != current )
399 exec_domain_unpause(ed);
401 if ( copy_to_user(op->u.getdomaininfo.ctxt, c, sizeof(*c)) )
402 ret = -EINVAL;
404 if ( c != NULL )
405 xfree(c);
406 }
408 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
409 ret = -EINVAL;
411 put_domain(d);
412 }
413 break;
415 #ifdef XEN_DEBUGGER
416 case DOM0_DEBUG:
417 {
418 pdb_do_debug(op);
419 copy_to_user(u_dom0_op, op, sizeof(*op));
420 ret = 0;
421 }
422 break;
423 #endif
425 case DOM0_SETTIME:
426 {
427 do_settime(op->u.settime.secs,
428 op->u.settime.usecs,
429 op->u.settime.system_time);
430 ret = 0;
431 }
432 break;
434 #ifdef TRACE_BUFFER
435 case DOM0_GETTBUFS:
436 {
437 ret = get_tb_info(&op->u.gettbufs);
438 copy_to_user(u_dom0_op, op, sizeof(*op));
439 }
440 break;
441 #endif
443 case DOM0_READCONSOLE:
444 {
445 ret = read_console_ring(op->u.readconsole.str,
446 op->u.readconsole.count,
447 op->u.readconsole.cmd);
448 }
449 break;
451 case DOM0_PCIDEV_ACCESS:
452 {
453 extern int physdev_pci_access_modify(domid_t, int, int, int, int);
454 ret = physdev_pci_access_modify(op->u.pcidev_access.domain,
455 op->u.pcidev_access.bus,
456 op->u.pcidev_access.dev,
457 op->u.pcidev_access.func,
458 op->u.pcidev_access.enable);
459 }
460 break;
462 case DOM0_SCHED_ID:
463 {
464 op->u.sched_id.sched_id = sched_id();
465 copy_to_user(u_dom0_op, op, sizeof(*op));
466 ret = 0;
467 }
468 break;
470 case DOM0_SETDOMAININITIALMEM:
471 {
472 struct domain *d;
473 ret = -ESRCH;
474 d = find_domain_by_id(op->u.setdomaininitialmem.domain);
475 if ( d != NULL )
476 {
477 /* should only be used *before* domain is built. */
478 if ( !test_bit(DF_CONSTRUCTED, &d->d_flags) )
479 ret = alloc_new_dom_mem(
480 d, op->u.setdomaininitialmem.initial_memkb );
481 else
482 ret = -EINVAL;
483 put_domain(d);
484 }
485 }
486 break;
488 case DOM0_SETDOMAINMAXMEM:
489 {
490 struct domain *d;
491 ret = -ESRCH;
492 d = find_domain_by_id( op->u.setdomainmaxmem.domain );
493 if ( d != NULL )
494 {
495 d->max_pages =
496 (op->u.setdomainmaxmem.max_memkb+PAGE_SIZE-1)>> PAGE_SHIFT;
497 put_domain(d);
498 ret = 0;
499 }
500 }
501 break;
503 case DOM0_SETDOMAINVMASSIST:
504 {
505 struct domain *d;
506 ret = -ESRCH;
507 d = find_domain_by_id( op->u.setdomainvmassist.domain );
508 if ( d != NULL )
509 {
510 vm_assist(d, op->u.setdomainvmassist.cmd,
511 op->u.setdomainvmassist.type);
512 put_domain(d);
513 ret = 0;
514 }
515 }
516 break;
518 #ifdef PERF_COUNTERS
519 case DOM0_PERFCCONTROL:
520 {
521 extern int perfc_control(dom0_perfccontrol_t *);
522 ret = perfc_control(&op->u.perfccontrol);
523 copy_to_user(u_dom0_op, op, sizeof(*op));
524 }
525 break;
526 #endif
528 default:
529 ret = arch_do_dom0_op(op,u_dom0_op);
531 }
533 TRACE_5D(TRC_DOM0OP_LEAVE_BASE + op->cmd, ret,
534 op->u.dummy[0], op->u.dummy[1], op->u.dummy[2], op->u.dummy[3]);
537 return ret;
538 }