debuggers.hg

view xen/common/domctl.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 2208a036f8d9
children
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/sched-if.h>
15 #include <xen/domain.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <xen/iocap.h>
21 #include <xen/rcupdate.h>
22 #include <xen/guest_access.h>
23 #include <xen/bitmap.h>
24 #include <xen/paging.h>
25 #include <asm/current.h>
26 #include <public/domctl.h>
27 #include <xsm/xsm.h>
29 static DEFINE_SPINLOCK(domctl_lock);
31 extern long arch_do_domctl(
32 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
34 int cpumask_to_xenctl_cpumap(
35 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
36 {
37 unsigned int guest_bytes, copy_bytes, i;
38 uint8_t zero = 0;
39 uint8_t bytemap[(NR_CPUS + 7) / 8];
41 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
42 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
44 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
46 if ( copy_bytes != 0 )
47 if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
48 return -EFAULT;
50 for ( i = copy_bytes; i < guest_bytes; i++ )
51 if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
52 return -EFAULT;
54 return 0;
55 }
57 int xenctl_cpumap_to_cpumask(
58 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
59 {
60 unsigned int guest_bytes, copy_bytes;
61 uint8_t bytemap[(NR_CPUS + 7) / 8];
63 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
64 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
66 memset(bytemap, 0, sizeof(bytemap));
68 if ( copy_bytes != 0 )
69 {
70 if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
71 return -EFAULT;
72 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
73 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
74 }
76 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
78 return 0;
79 }
81 static inline int is_free_domid(domid_t dom)
82 {
83 struct domain *d;
85 if ( dom >= DOMID_FIRST_RESERVED )
86 return 0;
88 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
89 return 1;
91 rcu_unlock_domain(d);
92 return 0;
93 }
95 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
96 {
97 struct vcpu *v;
98 u64 cpu_time = 0;
99 int flags = XEN_DOMINF_blocked;
100 struct vcpu_runstate_info runstate;
102 info->domain = d->domain_id;
103 info->nr_online_vcpus = 0;
104 info->ssidref = 0;
106 /*
107 * - domain is marked as blocked only if all its vcpus are blocked
108 * - domain is marked as running if any of its vcpus is running
109 */
110 for_each_vcpu ( d, v )
111 {
112 vcpu_runstate_get(v, &runstate);
113 cpu_time += runstate.time[RUNSTATE_running];
114 info->max_vcpu_id = v->vcpu_id;
115 if ( !test_bit(_VPF_down, &v->pause_flags) )
116 {
117 if ( !(v->pause_flags & VPF_blocked) )
118 flags &= ~XEN_DOMINF_blocked;
119 if ( v->is_running )
120 flags |= XEN_DOMINF_running;
121 info->nr_online_vcpus++;
122 }
123 }
125 info->cpu_time = cpu_time;
127 info->flags = (info->nr_online_vcpus ? flags : 0) |
128 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
129 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
130 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
131 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
132 d->shutdown_code << XEN_DOMINF_shutdownshift;
134 if ( is_hvm_domain(d) )
135 info->flags |= XEN_DOMINF_hvm_guest;
137 xsm_security_domaininfo(d, info);
139 info->tot_pages = d->tot_pages;
140 info->max_pages = d->max_pages;
141 info->shr_pages = atomic_read(&d->shr_pages);
142 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
143 BUG_ON(SHARED_M2P(info->shared_info_frame));
145 info->cpupool = d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
147 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
148 }
150 static unsigned int default_vcpu0_location(cpumask_t *online)
151 {
152 struct domain *d;
153 struct vcpu *v;
154 unsigned int i, cpu, nr_cpus, *cnt;
155 cpumask_t cpu_exclude_map;
157 /* Do an initial CPU placement. Pick the least-populated CPU. */
158 nr_cpus = last_cpu(cpu_online_map) + 1;
159 cnt = xmalloc_array(unsigned int, nr_cpus);
160 if ( cnt )
161 {
162 memset(cnt, 0, nr_cpus * sizeof(*cnt));
164 rcu_read_lock(&domlist_read_lock);
165 for_each_domain ( d )
166 for_each_vcpu ( d, v )
167 if ( !test_bit(_VPF_down, &v->pause_flags)
168 && ((cpu = v->processor) < nr_cpus) )
169 cnt[cpu]++;
170 rcu_read_unlock(&domlist_read_lock);
171 }
173 /*
174 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
175 * favour high numbered CPUs in the event of a tie.
176 */
177 cpu = first_cpu(per_cpu(cpu_sibling_map, 0));
178 if ( cpus_weight(per_cpu(cpu_sibling_map, 0)) > 1 )
179 cpu = next_cpu(cpu, per_cpu(cpu_sibling_map, 0));
180 cpu_exclude_map = per_cpu(cpu_sibling_map, 0);
181 for_each_cpu_mask(i, *online)
182 {
183 if ( cpu_isset(i, cpu_exclude_map) )
184 continue;
185 if ( (i == first_cpu(per_cpu(cpu_sibling_map, i))) &&
186 (cpus_weight(per_cpu(cpu_sibling_map, i)) > 1) )
187 continue;
188 cpus_or(cpu_exclude_map, cpu_exclude_map, per_cpu(cpu_sibling_map, i));
189 if ( !cnt || cnt[i] <= cnt[cpu] )
190 cpu = i;
191 }
193 xfree(cnt);
195 return cpu;
196 }
198 bool_t domctl_lock_acquire(void)
199 {
200 /*
201 * Caller may try to pause its own VCPUs. We must prevent deadlock
202 * against other non-domctl routines which try to do the same.
203 */
204 if ( !spin_trylock(&current->domain->hypercall_deadlock_mutex) )
205 return 0;
207 /*
208 * Trylock here is paranoia if we have multiple privileged domains. Then
209 * we could have one domain trying to pause another which is spinning
210 * on domctl_lock -- results in deadlock.
211 */
212 if ( spin_trylock(&domctl_lock) )
213 return 1;
215 spin_unlock(&current->domain->hypercall_deadlock_mutex);
216 return 0;
217 }
219 void domctl_lock_release(void)
220 {
221 spin_unlock(&domctl_lock);
222 spin_unlock(&current->domain->hypercall_deadlock_mutex);
223 }
225 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
226 {
227 long ret = 0;
228 struct xen_domctl curop, *op = &curop;
230 if ( copy_from_guest(op, u_domctl, 1) )
231 return -EFAULT;
233 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
234 return -EACCES;
236 switch ( op->cmd )
237 {
238 case XEN_DOMCTL_ioport_mapping:
239 case XEN_DOMCTL_memory_mapping:
240 case XEN_DOMCTL_bind_pt_irq:
241 case XEN_DOMCTL_unbind_pt_irq: {
242 struct domain *d;
243 bool_t is_priv = IS_PRIV(current->domain);
244 if ( !is_priv && ((d = rcu_lock_domain_by_id(op->domain)) != NULL) )
245 {
246 is_priv = IS_PRIV_FOR(current->domain, d);
247 rcu_unlock_domain(d);
248 }
249 if ( !is_priv )
250 return -EPERM;
251 break;
252 }
253 default:
254 if ( !IS_PRIV(current->domain) )
255 return -EPERM;
256 break;
257 }
259 if ( !domctl_lock_acquire() )
260 return hypercall_create_continuation(
261 __HYPERVISOR_domctl, "h", u_domctl);
263 switch ( op->cmd )
264 {
266 case XEN_DOMCTL_setvcpucontext:
267 {
268 struct domain *d = rcu_lock_domain_by_id(op->domain);
269 vcpu_guest_context_u c = { .nat = NULL };
270 unsigned int vcpu = op->u.vcpucontext.vcpu;
271 struct vcpu *v;
273 ret = -ESRCH;
274 if ( d == NULL )
275 break;
277 ret = xsm_setvcpucontext(d);
278 if ( ret )
279 goto svc_out;
281 ret = -EINVAL;
282 if ( (d == current->domain) || /* no domain_pause() */
283 (vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) )
284 goto svc_out;
286 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
287 {
288 vcpu_reset(v);
289 ret = 0;
290 goto svc_out;
291 }
293 #ifdef CONFIG_COMPAT
294 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
295 < sizeof(struct compat_vcpu_guest_context));
296 #endif
297 ret = -ENOMEM;
298 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
299 goto svc_out;
301 #ifdef CONFIG_COMPAT
302 if ( !is_pv_32on64_vcpu(v) )
303 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
304 else
305 ret = copy_from_guest(c.cmp,
306 guest_handle_cast(op->u.vcpucontext.ctxt,
307 void), 1);
308 #else
309 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
310 #endif
311 ret = ret ? -EFAULT : 0;
313 if ( ret == 0 )
314 {
315 domain_pause(d);
316 ret = arch_set_info_guest(v, c);
317 domain_unpause(d);
318 }
320 svc_out:
321 xfree(c.nat);
322 rcu_unlock_domain(d);
323 }
324 break;
326 case XEN_DOMCTL_pausedomain:
327 {
328 struct domain *d = rcu_lock_domain_by_id(op->domain);
329 ret = -ESRCH;
330 if ( d != NULL )
331 {
332 ret = xsm_pausedomain(d);
333 if ( ret )
334 goto pausedomain_out;
336 ret = -EINVAL;
337 if ( d != current->domain )
338 {
339 domain_pause_by_systemcontroller(d);
340 ret = 0;
341 }
342 pausedomain_out:
343 rcu_unlock_domain(d);
344 }
345 }
346 break;
348 case XEN_DOMCTL_unpausedomain:
349 {
350 struct domain *d = rcu_lock_domain_by_id(op->domain);
352 ret = -ESRCH;
353 if ( d == NULL )
354 break;
356 ret = xsm_unpausedomain(d);
357 if ( ret )
358 {
359 rcu_unlock_domain(d);
360 break;
361 }
363 domain_unpause_by_systemcontroller(d);
364 rcu_unlock_domain(d);
365 ret = 0;
366 }
367 break;
369 case XEN_DOMCTL_resumedomain:
370 {
371 struct domain *d = rcu_lock_domain_by_id(op->domain);
373 ret = -ESRCH;
374 if ( d == NULL )
375 break;
377 ret = xsm_resumedomain(d);
378 if ( ret )
379 {
380 rcu_unlock_domain(d);
381 break;
382 }
384 domain_resume(d);
385 rcu_unlock_domain(d);
386 ret = 0;
387 }
388 break;
390 case XEN_DOMCTL_createdomain:
391 {
392 struct domain *d;
393 domid_t dom;
394 static domid_t rover = 0;
395 unsigned int domcr_flags;
397 ret = -EINVAL;
398 if ( supervisor_mode_kernel ||
399 (op->u.createdomain.flags &
400 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap |
401 XEN_DOMCTL_CDF_s3_integrity | XEN_DOMCTL_CDF_oos_off)) )
402 break;
404 dom = op->domain;
405 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
406 {
407 ret = -EINVAL;
408 if ( !is_free_domid(dom) )
409 break;
410 }
411 else
412 {
413 for ( dom = rover + 1; dom != rover; dom++ )
414 {
415 if ( dom == DOMID_FIRST_RESERVED )
416 dom = 0;
417 if ( is_free_domid(dom) )
418 break;
419 }
421 ret = -ENOMEM;
422 if ( dom == rover )
423 break;
425 rover = dom;
426 }
428 domcr_flags = 0;
429 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
430 domcr_flags |= DOMCRF_hvm;
431 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
432 domcr_flags |= DOMCRF_hap;
433 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_s3_integrity )
434 domcr_flags |= DOMCRF_s3_integrity;
435 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_oos_off )
436 domcr_flags |= DOMCRF_oos_off;
438 ret = -ENOMEM;
439 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
440 if ( d == NULL )
441 break;
443 ret = 0;
445 memcpy(d->handle, op->u.createdomain.handle,
446 sizeof(xen_domain_handle_t));
448 op->domain = d->domain_id;
449 if ( copy_to_guest(u_domctl, op, 1) )
450 ret = -EFAULT;
451 }
452 break;
454 case XEN_DOMCTL_max_vcpus:
455 {
456 struct domain *d;
457 unsigned int i, max = op->u.max_vcpus.max, cpu;
458 cpumask_t *online;
460 ret = -ESRCH;
461 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
462 break;
464 ret = -EINVAL;
465 if ( (d == current->domain) || /* no domain_pause() */
466 (max > MAX_VIRT_CPUS) ||
467 (is_hvm_domain(d) && (max > MAX_HVM_VCPUS)) )
468 {
469 rcu_unlock_domain(d);
470 break;
471 }
473 ret = xsm_max_vcpus(d);
474 if ( ret )
475 {
476 rcu_unlock_domain(d);
477 break;
478 }
480 /* Until Xenoprof can dynamically grow its vcpu-s array... */
481 if ( d->xenoprof )
482 {
483 rcu_unlock_domain(d);
484 ret = -EAGAIN;
485 break;
486 }
488 /* Needed, for example, to ensure writable p.t. state is synced. */
489 domain_pause(d);
491 /* We cannot reduce maximum VCPUs. */
492 ret = -EINVAL;
493 if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) )
494 goto maxvcpu_out;
496 /*
497 * For now don't allow increasing the vcpu count from a non-zero
498 * value: This code and all readers of d->vcpu would otherwise need
499 * to be converted to use RCU, but at present there's no tools side
500 * code path that would issue such a request.
501 */
502 ret = -EBUSY;
503 if ( (d->max_vcpus > 0) && (max > d->max_vcpus) )
504 goto maxvcpu_out;
506 ret = -ENOMEM;
507 online = (d->cpupool == NULL) ? &cpu_online_map : &d->cpupool->cpu_valid;
508 if ( max > d->max_vcpus )
509 {
510 struct vcpu **vcpus;
512 BUG_ON(d->vcpu != NULL);
513 BUG_ON(d->max_vcpus != 0);
515 if ( (vcpus = xmalloc_array(struct vcpu *, max)) == NULL )
516 goto maxvcpu_out;
517 memset(vcpus, 0, max * sizeof(*vcpus));
519 /* Install vcpu array /then/ update max_vcpus. */
520 d->vcpu = vcpus;
521 wmb();
522 d->max_vcpus = max;
523 }
525 for ( i = 0; i < max; i++ )
526 {
527 if ( d->vcpu[i] != NULL )
528 continue;
530 cpu = (i == 0) ?
531 default_vcpu0_location(online) :
532 cycle_cpu(d->vcpu[i-1]->processor, *online);
534 if ( alloc_vcpu(d, i, cpu) == NULL )
535 goto maxvcpu_out;
536 }
538 ret = 0;
540 maxvcpu_out:
541 domain_unpause(d);
542 rcu_unlock_domain(d);
543 }
544 break;
546 case XEN_DOMCTL_destroydomain:
547 {
548 struct domain *d = rcu_lock_domain_by_id(op->domain);
549 ret = -ESRCH;
550 if ( d != NULL )
551 {
552 ret = xsm_destroydomain(d) ? : domain_kill(d);
553 rcu_unlock_domain(d);
554 }
555 }
556 break;
558 case XEN_DOMCTL_setvcpuaffinity:
559 case XEN_DOMCTL_getvcpuaffinity:
560 {
561 domid_t dom = op->domain;
562 struct domain *d = rcu_lock_domain_by_id(dom);
563 struct vcpu *v;
564 cpumask_t new_affinity;
566 ret = -ESRCH;
567 if ( d == NULL )
568 break;
570 ret = xsm_vcpuaffinity(op->cmd, d);
571 if ( ret )
572 goto vcpuaffinity_out;
574 ret = -EINVAL;
575 if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
576 goto vcpuaffinity_out;
578 ret = -ESRCH;
579 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
580 goto vcpuaffinity_out;
582 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
583 {
584 ret = xenctl_cpumap_to_cpumask(
585 &new_affinity, &op->u.vcpuaffinity.cpumap);
586 if ( !ret )
587 ret = vcpu_set_affinity(v, &new_affinity);
588 }
589 else
590 {
591 ret = cpumask_to_xenctl_cpumap(
592 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
593 }
595 vcpuaffinity_out:
596 rcu_unlock_domain(d);
597 }
598 break;
600 case XEN_DOMCTL_scheduler_op:
601 {
602 struct domain *d;
604 ret = -ESRCH;
605 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
606 break;
608 ret = xsm_scheduler(d);
609 if ( ret )
610 goto scheduler_op_out;
612 ret = sched_adjust(d, &op->u.scheduler_op);
613 if ( copy_to_guest(u_domctl, op, 1) )
614 ret = -EFAULT;
616 scheduler_op_out:
617 rcu_unlock_domain(d);
618 }
619 break;
621 case XEN_DOMCTL_getdomaininfo:
622 {
623 struct domain *d;
624 domid_t dom = op->domain;
626 rcu_read_lock(&domlist_read_lock);
628 for_each_domain ( d )
629 if ( d->domain_id >= dom )
630 break;
632 if ( d == NULL )
633 {
634 rcu_read_unlock(&domlist_read_lock);
635 ret = -ESRCH;
636 break;
637 }
639 ret = xsm_getdomaininfo(d);
640 if ( ret )
641 goto getdomaininfo_out;
643 getdomaininfo(d, &op->u.getdomaininfo);
645 op->domain = op->u.getdomaininfo.domain;
646 if ( copy_to_guest(u_domctl, op, 1) )
647 ret = -EFAULT;
649 getdomaininfo_out:
650 rcu_read_unlock(&domlist_read_lock);
651 }
652 break;
654 case XEN_DOMCTL_getvcpucontext:
655 {
656 vcpu_guest_context_u c = { .nat = NULL };
657 struct domain *d;
658 struct vcpu *v;
660 ret = -ESRCH;
661 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
662 break;
664 ret = xsm_getvcpucontext(d);
665 if ( ret )
666 goto getvcpucontext_out;
668 ret = -EINVAL;
669 if ( op->u.vcpucontext.vcpu >= d->max_vcpus )
670 goto getvcpucontext_out;
672 ret = -ESRCH;
673 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
674 goto getvcpucontext_out;
676 ret = -ENODATA;
677 if ( !v->is_initialised )
678 goto getvcpucontext_out;
680 #ifdef CONFIG_COMPAT
681 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
682 < sizeof(struct compat_vcpu_guest_context));
683 #endif
684 ret = -ENOMEM;
685 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
686 goto getvcpucontext_out;
688 if ( v != current )
689 vcpu_pause(v);
691 arch_get_info_guest(v, c);
692 ret = 0;
694 if ( v != current )
695 vcpu_unpause(v);
697 #ifdef CONFIG_COMPAT
698 if ( !is_pv_32on64_vcpu(v) )
699 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
700 else
701 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
702 void), c.cmp, 1);
703 #else
704 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
705 #endif
707 if ( copy_to_guest(u_domctl, op, 1) || ret )
708 ret = -EFAULT;
710 getvcpucontext_out:
711 xfree(c.nat);
712 rcu_unlock_domain(d);
713 }
714 break;
716 case XEN_DOMCTL_getvcpuinfo:
717 {
718 struct domain *d;
719 struct vcpu *v;
720 struct vcpu_runstate_info runstate;
722 ret = -ESRCH;
723 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
724 break;
726 ret = xsm_getvcpuinfo(d);
727 if ( ret )
728 goto getvcpuinfo_out;
730 ret = -EINVAL;
731 if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus )
732 goto getvcpuinfo_out;
734 ret = -ESRCH;
735 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
736 goto getvcpuinfo_out;
738 vcpu_runstate_get(v, &runstate);
740 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
741 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
742 op->u.getvcpuinfo.running = v->is_running;
743 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
744 op->u.getvcpuinfo.cpu = v->processor;
745 ret = 0;
747 if ( copy_to_guest(u_domctl, op, 1) )
748 ret = -EFAULT;
750 getvcpuinfo_out:
751 rcu_unlock_domain(d);
752 }
753 break;
755 case XEN_DOMCTL_max_mem:
756 {
757 struct domain *d;
758 unsigned long new_max;
760 ret = -ESRCH;
761 d = rcu_lock_domain_by_id(op->domain);
762 if ( d == NULL )
763 break;
765 ret = xsm_setdomainmaxmem(d);
766 if ( ret )
767 goto max_mem_out;
769 ret = -EINVAL;
770 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
772 spin_lock(&d->page_alloc_lock);
773 /*
774 * NB. We removed a check that new_max >= current tot_pages; this means
775 * that the domain will now be allowed to "ratchet" down to new_max. In
776 * the meantime, while tot > max, all new allocations are disallowed.
777 */
778 d->max_pages = new_max;
779 ret = 0;
780 spin_unlock(&d->page_alloc_lock);
782 max_mem_out:
783 rcu_unlock_domain(d);
784 }
785 break;
787 case XEN_DOMCTL_setdomainhandle:
788 {
789 struct domain *d;
791 ret = -ESRCH;
792 d = rcu_lock_domain_by_id(op->domain);
793 if ( d == NULL )
794 break;
796 ret = xsm_setdomainhandle(d);
797 if ( ret )
798 {
799 rcu_unlock_domain(d);
800 break;
801 }
803 memcpy(d->handle, op->u.setdomainhandle.handle,
804 sizeof(xen_domain_handle_t));
805 rcu_unlock_domain(d);
806 ret = 0;
807 }
808 break;
810 case XEN_DOMCTL_setdebugging:
811 {
812 struct domain *d;
814 ret = -ESRCH;
815 d = rcu_lock_domain_by_id(op->domain);
816 if ( d == NULL )
817 break;
819 ret = -EINVAL;
820 if ( d == current->domain ) /* no domain_pause() */
821 {
822 rcu_unlock_domain(d);
823 break;
824 }
826 ret = xsm_setdebugging(d);
827 if ( ret )
828 {
829 rcu_unlock_domain(d);
830 break;
831 }
833 domain_pause(d);
834 d->debugger_attached = !!op->u.setdebugging.enable;
835 domain_unpause(d); /* causes guest to latch new status */
836 rcu_unlock_domain(d);
837 ret = 0;
838 }
839 break;
841 case XEN_DOMCTL_irq_permission:
842 {
843 struct domain *d;
844 unsigned int pirq = op->u.irq_permission.pirq;
846 ret = -ESRCH;
847 d = rcu_lock_domain_by_id(op->domain);
848 if ( d == NULL )
849 break;
851 if ( pirq >= d->nr_pirqs )
852 ret = -EINVAL;
853 else if ( op->u.irq_permission.allow_access )
854 ret = irq_permit_access(d, pirq);
855 else
856 ret = irq_deny_access(d, pirq);
858 rcu_unlock_domain(d);
859 }
860 break;
862 case XEN_DOMCTL_iomem_permission:
863 {
864 struct domain *d;
865 unsigned long mfn = op->u.iomem_permission.first_mfn;
866 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
868 ret = -EINVAL;
869 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
870 break;
872 ret = -ESRCH;
873 d = rcu_lock_domain_by_id(op->domain);
874 if ( d == NULL )
875 break;
877 if ( op->u.iomem_permission.allow_access )
878 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
879 else
880 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
882 rcu_unlock_domain(d);
883 }
884 break;
886 case XEN_DOMCTL_settimeoffset:
887 {
888 struct domain *d;
890 ret = -ESRCH;
891 d = rcu_lock_domain_by_id(op->domain);
892 if ( d == NULL )
893 break;
895 ret = xsm_domain_settime(d);
896 if ( ret )
897 {
898 rcu_unlock_domain(d);
899 break;
900 }
902 domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
903 rcu_unlock_domain(d);
904 ret = 0;
905 }
906 break;
908 case XEN_DOMCTL_set_target:
909 {
910 struct domain *d, *e;
912 ret = -ESRCH;
913 d = rcu_lock_domain_by_id(op->domain);
914 if ( d == NULL )
915 break;
917 ret = -ESRCH;
918 e = get_domain_by_id(op->u.set_target.target);
919 if ( e == NULL )
920 goto set_target_out;
922 ret = -EINVAL;
923 if ( (d == e) || (d->target != NULL) )
924 {
925 put_domain(e);
926 goto set_target_out;
927 }
929 ret = xsm_set_target(d, e);
930 if ( ret ) {
931 put_domain(e);
932 goto set_target_out;
933 }
935 /* Hold reference on @e until we destroy @d. */
936 d->target = e;
938 ret = 0;
940 set_target_out:
941 rcu_unlock_domain(d);
942 }
943 break;
945 case XEN_DOMCTL_subscribe:
946 {
947 struct domain *d;
949 ret = -ESRCH;
950 d = rcu_lock_domain_by_id(op->domain);
951 if ( d != NULL )
952 {
953 d->suspend_evtchn = op->u.subscribe.port;
954 rcu_unlock_domain(d);
955 ret = 0;
956 }
957 }
958 break;
960 case XEN_DOMCTL_disable_migrate:
961 {
962 struct domain *d;
963 ret = -ESRCH;
964 if ( (d = rcu_lock_domain_by_id(op->domain)) != NULL )
965 {
966 d->disable_migrate = op->u.disable_migrate.disable;
967 rcu_unlock_domain(d);
968 ret = 0;
969 }
970 }
971 break;
973 default:
974 ret = arch_do_domctl(op, u_domctl);
975 break;
976 }
978 domctl_lock_release();
980 return ret;
981 }
983 /*
984 * Local variables:
985 * mode: C
986 * c-set-style: "BSD"
987 * c-basic-offset: 4
988 * tab-width: 4
989 * indent-tabs-mode: nil
990 * End:
991 */