debuggers.hg

view xen/common/domctl.c @ 19826:2f9e1348aa98

x86_64: allow more vCPU-s per guest

Since the shared info layout is fixed, guests are required to use
VCPUOP_register_vcpu_info prior to booting any vCPU beyond the
traditional limit of 32.

MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no
longer being exposed in the public headers.

The tools changes are clearly incomplete (and done only so things
would
build again), and the current state of the tools (using scalar
variables all over the place to represent vCPU bitmaps) very likely
doesn't permit booting DomU-s with more than the traditional number of
vCPU-s. Testing of the extended functionality was done with Dom0 (96
vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way
of a simple kernel side patch - to use only some, resulting in a
sparse
bitmap).

ia64 changes only to make things build, and build-tested only (and the
tools part only as far as the build would go without encountering
unrelated problems in the blktap code).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 10:14:16 2009 +0100 (2009-06-18)
parents 6705898f768d
children 468561f3c8ee
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 static DEFINE_SPINLOCK(domctl_lock);
30 extern long arch_do_domctl(
31 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
33 void cpumask_to_xenctl_cpumap(
34 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
35 {
36 unsigned int guest_bytes, copy_bytes, i;
37 uint8_t zero = 0;
38 uint8_t bytemap[(NR_CPUS + 7) / 8];
40 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
41 return;
43 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
44 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
46 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
48 if ( copy_bytes != 0 )
49 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
51 for ( i = copy_bytes; i < guest_bytes; i++ )
52 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
53 }
55 void xenctl_cpumap_to_cpumask(
56 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
57 {
58 unsigned int guest_bytes, copy_bytes;
59 uint8_t bytemap[(NR_CPUS + 7) / 8];
61 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
62 return;
64 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
65 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
67 memset(bytemap, 0, sizeof(bytemap));
69 if ( copy_bytes != 0 )
70 {
71 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
72 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
73 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
74 }
76 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
77 }
79 static inline int is_free_domid(domid_t dom)
80 {
81 struct domain *d;
83 if ( dom >= DOMID_FIRST_RESERVED )
84 return 0;
86 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
87 return 1;
89 rcu_unlock_domain(d);
90 return 0;
91 }
93 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
94 {
95 struct vcpu *v;
96 u64 cpu_time = 0;
97 int flags = XEN_DOMINF_blocked;
98 struct vcpu_runstate_info runstate;
100 info->domain = d->domain_id;
101 info->nr_online_vcpus = 0;
102 info->ssidref = 0;
104 /*
105 * - domain is marked as blocked only if all its vcpus are blocked
106 * - domain is marked as running if any of its vcpus is running
107 */
108 for_each_vcpu ( d, v )
109 {
110 vcpu_runstate_get(v, &runstate);
111 cpu_time += runstate.time[RUNSTATE_running];
112 info->max_vcpu_id = v->vcpu_id;
113 if ( !test_bit(_VPF_down, &v->pause_flags) )
114 {
115 if ( !(v->pause_flags & VPF_blocked) )
116 flags &= ~XEN_DOMINF_blocked;
117 if ( v->is_running )
118 flags |= XEN_DOMINF_running;
119 info->nr_online_vcpus++;
120 }
121 }
123 info->cpu_time = cpu_time;
125 info->flags = (info->nr_online_vcpus ? flags : 0) |
126 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
127 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
128 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
129 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
130 d->shutdown_code << XEN_DOMINF_shutdownshift;
132 if ( is_hvm_domain(d) )
133 info->flags |= XEN_DOMINF_hvm_guest;
135 xsm_security_domaininfo(d, info);
137 info->tot_pages = d->tot_pages;
138 info->max_pages = d->max_pages;
139 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
141 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
142 }
144 static unsigned int default_vcpu0_location(void)
145 {
146 struct domain *d;
147 struct vcpu *v;
148 unsigned int i, cpu, nr_cpus, *cnt;
149 cpumask_t cpu_exclude_map;
151 /* Do an initial CPU placement. Pick the least-populated CPU. */
152 nr_cpus = last_cpu(cpu_possible_map) + 1;
153 cnt = xmalloc_array(unsigned int, nr_cpus);
154 if ( cnt )
155 {
156 memset(cnt, 0, nr_cpus * sizeof(*cnt));
158 rcu_read_lock(&domlist_read_lock);
159 for_each_domain ( d )
160 for_each_vcpu ( d, v )
161 if ( !test_bit(_VPF_down, &v->pause_flags) )
162 cnt[v->processor]++;
163 rcu_read_unlock(&domlist_read_lock);
164 }
166 /*
167 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
168 * favour high numbered CPUs in the event of a tie.
169 */
170 cpu = first_cpu(cpu_sibling_map[0]);
171 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
172 cpu = next_cpu(cpu, cpu_sibling_map[0]);
173 cpu_exclude_map = cpu_sibling_map[0];
174 for_each_online_cpu ( i )
175 {
176 if ( cpu_isset(i, cpu_exclude_map) )
177 continue;
178 if ( (i == first_cpu(cpu_sibling_map[i])) &&
179 (cpus_weight(cpu_sibling_map[i]) > 1) )
180 continue;
181 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
182 if ( !cnt || cnt[i] <= cnt[cpu] )
183 cpu = i;
184 }
186 xfree(cnt);
188 return cpu;
189 }
191 bool_t domctl_lock_acquire(void)
192 {
193 /*
194 * Caller may try to pause its own VCPUs. We must prevent deadlock
195 * against other non-domctl routines which try to do the same.
196 */
197 if ( !spin_trylock(&current->domain->hypercall_deadlock_mutex) )
198 return 0;
200 /*
201 * Trylock here is paranoia if we have multiple privileged domains. Then
202 * we could have one domain trying to pause another which is spinning
203 * on domctl_lock -- results in deadlock.
204 */
205 if ( spin_trylock(&domctl_lock) )
206 return 1;
208 spin_unlock(&current->domain->hypercall_deadlock_mutex);
209 return 0;
210 }
212 void domctl_lock_release(void)
213 {
214 spin_unlock(&domctl_lock);
215 spin_unlock(&current->domain->hypercall_deadlock_mutex);
216 }
218 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
219 {
220 long ret = 0;
221 struct xen_domctl curop, *op = &curop;
223 if ( !IS_PRIV(current->domain) )
224 return -EPERM;
226 if ( copy_from_guest(op, u_domctl, 1) )
227 return -EFAULT;
229 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
230 return -EACCES;
232 if ( !domctl_lock_acquire() )
233 return hypercall_create_continuation(
234 __HYPERVISOR_domctl, "h", u_domctl);
236 switch ( op->cmd )
237 {
239 case XEN_DOMCTL_setvcpucontext:
240 {
241 struct domain *d = rcu_lock_domain_by_id(op->domain);
242 vcpu_guest_context_u c = { .nat = NULL };
243 unsigned int vcpu = op->u.vcpucontext.vcpu;
244 struct vcpu *v;
246 ret = -ESRCH;
247 if ( d == NULL )
248 break;
250 ret = xsm_setvcpucontext(d);
251 if ( ret )
252 goto svc_out;
254 ret = -EINVAL;
255 if ( (d == current->domain) || /* no domain_pause() */
256 (vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) )
257 goto svc_out;
259 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
260 {
261 vcpu_reset(v);
262 ret = 0;
263 goto svc_out;
264 }
266 #ifdef CONFIG_COMPAT
267 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
268 < sizeof(struct compat_vcpu_guest_context));
269 #endif
270 ret = -ENOMEM;
271 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
272 goto svc_out;
274 #ifdef CONFIG_COMPAT
275 if ( !is_pv_32on64_vcpu(v) )
276 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
277 else
278 ret = copy_from_guest(c.cmp,
279 guest_handle_cast(op->u.vcpucontext.ctxt,
280 void), 1);
281 #else
282 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
283 #endif
284 ret = ret ? -EFAULT : 0;
286 if ( ret == 0 )
287 {
288 domain_pause(d);
289 ret = arch_set_info_guest(v, c);
290 domain_unpause(d);
291 }
293 svc_out:
294 xfree(c.nat);
295 rcu_unlock_domain(d);
296 }
297 break;
299 case XEN_DOMCTL_pausedomain:
300 {
301 struct domain *d = rcu_lock_domain_by_id(op->domain);
302 ret = -ESRCH;
303 if ( d != NULL )
304 {
305 ret = xsm_pausedomain(d);
306 if ( ret )
307 goto pausedomain_out;
309 ret = -EINVAL;
310 if ( d != current->domain )
311 {
312 domain_pause_by_systemcontroller(d);
313 ret = 0;
314 }
315 pausedomain_out:
316 rcu_unlock_domain(d);
317 }
318 }
319 break;
321 case XEN_DOMCTL_unpausedomain:
322 {
323 struct domain *d = rcu_lock_domain_by_id(op->domain);
325 ret = -ESRCH;
326 if ( d == NULL )
327 break;
329 ret = xsm_unpausedomain(d);
330 if ( ret )
331 {
332 rcu_unlock_domain(d);
333 break;
334 }
336 domain_unpause_by_systemcontroller(d);
337 rcu_unlock_domain(d);
338 ret = 0;
339 }
340 break;
342 case XEN_DOMCTL_resumedomain:
343 {
344 struct domain *d = rcu_lock_domain_by_id(op->domain);
346 ret = -ESRCH;
347 if ( d == NULL )
348 break;
350 ret = xsm_resumedomain(d);
351 if ( ret )
352 {
353 rcu_unlock_domain(d);
354 break;
355 }
357 domain_resume(d);
358 rcu_unlock_domain(d);
359 ret = 0;
360 }
361 break;
363 case XEN_DOMCTL_createdomain:
364 {
365 struct domain *d;
366 domid_t dom;
367 static domid_t rover = 0;
368 unsigned int domcr_flags;
370 ret = -EINVAL;
371 if ( supervisor_mode_kernel ||
372 (op->u.createdomain.flags &
373 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap |
374 XEN_DOMCTL_CDF_s3_integrity)) )
375 break;
377 dom = op->domain;
378 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
379 {
380 ret = -EINVAL;
381 if ( !is_free_domid(dom) )
382 break;
383 }
384 else
385 {
386 for ( dom = rover + 1; dom != rover; dom++ )
387 {
388 if ( dom == DOMID_FIRST_RESERVED )
389 dom = 0;
390 if ( is_free_domid(dom) )
391 break;
392 }
394 ret = -ENOMEM;
395 if ( dom == rover )
396 break;
398 rover = dom;
399 }
401 domcr_flags = 0;
402 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
403 domcr_flags |= DOMCRF_hvm;
404 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
405 domcr_flags |= DOMCRF_hap;
406 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_s3_integrity )
407 domcr_flags |= DOMCRF_s3_integrity;
409 ret = -ENOMEM;
410 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
411 if ( d == NULL )
412 break;
414 ret = 0;
416 memcpy(d->handle, op->u.createdomain.handle,
417 sizeof(xen_domain_handle_t));
419 op->domain = d->domain_id;
420 if ( copy_to_guest(u_domctl, op, 1) )
421 ret = -EFAULT;
422 }
423 break;
425 case XEN_DOMCTL_max_vcpus:
426 {
427 struct domain *d;
428 unsigned int i, max = op->u.max_vcpus.max, cpu;
430 ret = -ESRCH;
431 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
432 break;
434 ret = -EINVAL;
435 if ( (d == current->domain) || /* no domain_pause() */
436 (max > MAX_VIRT_CPUS) ||
437 (is_hvm_domain(d) && max > XEN_LEGACY_MAX_VCPUS) )
438 {
439 rcu_unlock_domain(d);
440 break;
441 }
443 ret = xsm_max_vcpus(d);
444 if ( ret )
445 {
446 rcu_unlock_domain(d);
447 break;
448 }
450 /* Until Xenoprof can dynamically grow its vcpu-s array... */
451 if ( d->xenoprof )
452 {
453 rcu_unlock_domain(d);
454 ret = -EAGAIN;
455 break;
456 }
458 /* Needed, for example, to ensure writable p.t. state is synced. */
459 domain_pause(d);
461 /* We cannot reduce maximum VCPUs. */
462 ret = -EINVAL;
463 if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) )
464 goto maxvcpu_out;
466 ret = -ENOMEM;
467 if ( max > d->max_vcpus )
468 {
469 struct vcpu **vcpus = xmalloc_array(struct vcpu *, max);
470 void *ptr;
472 if ( !vcpus )
473 goto maxvcpu_out;
474 memcpy(vcpus, d->vcpu, d->max_vcpus * sizeof(*vcpus));
475 memset(vcpus + d->max_vcpus, 0,
476 (max - d->max_vcpus) * sizeof(*vcpus));
478 ptr = d->vcpu;
479 d->vcpu = vcpus;
480 wmb();
481 d->max_vcpus = max;
482 xfree(ptr);
483 }
484 for ( i = 0; i < max; i++ )
485 {
486 if ( d->vcpu[i] != NULL )
487 continue;
489 cpu = (i == 0) ?
490 default_vcpu0_location() :
491 cycle_cpu(d->vcpu[i-1]->processor, cpu_online_map);
493 if ( alloc_vcpu(d, i, cpu) == NULL )
494 goto maxvcpu_out;
495 }
497 ret = 0;
499 maxvcpu_out:
500 domain_unpause(d);
501 rcu_unlock_domain(d);
502 }
503 break;
505 case XEN_DOMCTL_destroydomain:
506 {
507 struct domain *d = rcu_lock_domain_by_id(op->domain);
508 ret = -ESRCH;
509 if ( d != NULL )
510 {
511 ret = xsm_destroydomain(d) ? : domain_kill(d);
512 rcu_unlock_domain(d);
513 }
514 }
515 break;
517 case XEN_DOMCTL_setvcpuaffinity:
518 case XEN_DOMCTL_getvcpuaffinity:
519 {
520 domid_t dom = op->domain;
521 struct domain *d = rcu_lock_domain_by_id(dom);
522 struct vcpu *v;
523 cpumask_t new_affinity;
525 ret = -ESRCH;
526 if ( d == NULL )
527 break;
529 ret = xsm_vcpuaffinity(op->cmd, d);
530 if ( ret )
531 goto vcpuaffinity_out;
533 ret = -EINVAL;
534 if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
535 goto vcpuaffinity_out;
537 ret = -ESRCH;
538 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
539 goto vcpuaffinity_out;
541 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
542 {
543 xenctl_cpumap_to_cpumask(
544 &new_affinity, &op->u.vcpuaffinity.cpumap);
545 ret = vcpu_set_affinity(v, &new_affinity);
546 }
547 else
548 {
549 cpumask_to_xenctl_cpumap(
550 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
551 ret = 0;
552 }
554 vcpuaffinity_out:
555 rcu_unlock_domain(d);
556 }
557 break;
559 case XEN_DOMCTL_scheduler_op:
560 {
561 struct domain *d;
563 ret = -ESRCH;
564 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
565 break;
567 ret = xsm_scheduler(d);
568 if ( ret )
569 goto scheduler_op_out;
571 ret = sched_adjust(d, &op->u.scheduler_op);
572 if ( copy_to_guest(u_domctl, op, 1) )
573 ret = -EFAULT;
575 scheduler_op_out:
576 rcu_unlock_domain(d);
577 }
578 break;
580 case XEN_DOMCTL_getdomaininfo:
581 {
582 struct domain *d;
583 domid_t dom = op->domain;
585 rcu_read_lock(&domlist_read_lock);
587 for_each_domain ( d )
588 if ( d->domain_id >= dom )
589 break;
591 if ( d == NULL )
592 {
593 rcu_read_unlock(&domlist_read_lock);
594 ret = -ESRCH;
595 break;
596 }
598 ret = xsm_getdomaininfo(d);
599 if ( ret )
600 goto getdomaininfo_out;
602 getdomaininfo(d, &op->u.getdomaininfo);
604 op->domain = op->u.getdomaininfo.domain;
605 if ( copy_to_guest(u_domctl, op, 1) )
606 ret = -EFAULT;
608 getdomaininfo_out:
609 rcu_read_unlock(&domlist_read_lock);
610 }
611 break;
613 case XEN_DOMCTL_getvcpucontext:
614 {
615 vcpu_guest_context_u c = { .nat = NULL };
616 struct domain *d;
617 struct vcpu *v;
619 ret = -ESRCH;
620 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
621 break;
623 ret = xsm_getvcpucontext(d);
624 if ( ret )
625 goto getvcpucontext_out;
627 ret = -EINVAL;
628 if ( op->u.vcpucontext.vcpu >= d->max_vcpus )
629 goto getvcpucontext_out;
631 ret = -ESRCH;
632 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
633 goto getvcpucontext_out;
635 ret = -ENODATA;
636 if ( !v->is_initialised )
637 goto getvcpucontext_out;
639 #ifdef CONFIG_COMPAT
640 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
641 < sizeof(struct compat_vcpu_guest_context));
642 #endif
643 ret = -ENOMEM;
644 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
645 goto getvcpucontext_out;
647 if ( v != current )
648 vcpu_pause(v);
650 arch_get_info_guest(v, c);
651 ret = 0;
653 if ( v != current )
654 vcpu_unpause(v);
656 #ifdef CONFIG_COMPAT
657 if ( !is_pv_32on64_vcpu(v) )
658 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
659 else
660 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
661 void), c.cmp, 1);
662 #else
663 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
664 #endif
666 if ( copy_to_guest(u_domctl, op, 1) || ret )
667 ret = -EFAULT;
669 getvcpucontext_out:
670 xfree(c.nat);
671 rcu_unlock_domain(d);
672 }
673 break;
675 case XEN_DOMCTL_getvcpuinfo:
676 {
677 struct domain *d;
678 struct vcpu *v;
679 struct vcpu_runstate_info runstate;
681 ret = -ESRCH;
682 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
683 break;
685 ret = xsm_getvcpuinfo(d);
686 if ( ret )
687 goto getvcpuinfo_out;
689 ret = -EINVAL;
690 if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus )
691 goto getvcpuinfo_out;
693 ret = -ESRCH;
694 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
695 goto getvcpuinfo_out;
697 vcpu_runstate_get(v, &runstate);
699 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
700 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
701 op->u.getvcpuinfo.running = v->is_running;
702 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
703 op->u.getvcpuinfo.cpu = v->processor;
704 ret = 0;
706 if ( copy_to_guest(u_domctl, op, 1) )
707 ret = -EFAULT;
709 getvcpuinfo_out:
710 rcu_unlock_domain(d);
711 }
712 break;
714 case XEN_DOMCTL_max_mem:
715 {
716 struct domain *d;
717 unsigned long new_max;
719 ret = -ESRCH;
720 d = rcu_lock_domain_by_id(op->domain);
721 if ( d == NULL )
722 break;
724 ret = xsm_setdomainmaxmem(d);
725 if ( ret )
726 goto max_mem_out;
728 ret = -EINVAL;
729 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
731 spin_lock(&d->page_alloc_lock);
732 if ( new_max >= d->tot_pages )
733 {
734 d->max_pages = new_max;
735 ret = 0;
736 }
737 spin_unlock(&d->page_alloc_lock);
739 max_mem_out:
740 rcu_unlock_domain(d);
741 }
742 break;
744 case XEN_DOMCTL_setdomainhandle:
745 {
746 struct domain *d;
748 ret = -ESRCH;
749 d = rcu_lock_domain_by_id(op->domain);
750 if ( d == NULL )
751 break;
753 ret = xsm_setdomainhandle(d);
754 if ( ret )
755 {
756 rcu_unlock_domain(d);
757 break;
758 }
760 memcpy(d->handle, op->u.setdomainhandle.handle,
761 sizeof(xen_domain_handle_t));
762 rcu_unlock_domain(d);
763 ret = 0;
764 }
765 break;
767 case XEN_DOMCTL_setdebugging:
768 {
769 struct domain *d;
771 ret = -ESRCH;
772 d = rcu_lock_domain_by_id(op->domain);
773 if ( d == NULL )
774 break;
776 ret = -EINVAL;
777 if ( d == current->domain ) /* no domain_pause() */
778 {
779 rcu_unlock_domain(d);
780 break;
781 }
783 ret = xsm_setdebugging(d);
784 if ( ret )
785 {
786 rcu_unlock_domain(d);
787 break;
788 }
790 domain_pause(d);
791 d->debugger_attached = !!op->u.setdebugging.enable;
792 domain_unpause(d); /* causes guest to latch new status */
793 rcu_unlock_domain(d);
794 ret = 0;
795 }
796 break;
798 case XEN_DOMCTL_irq_permission:
799 {
800 struct domain *d;
801 unsigned int pirq = op->u.irq_permission.pirq;
803 ret = -ESRCH;
804 d = rcu_lock_domain_by_id(op->domain);
805 if ( d == NULL )
806 break;
808 if ( pirq >= d->nr_pirqs )
809 ret = -EINVAL;
810 else if ( op->u.irq_permission.allow_access )
811 ret = irq_permit_access(d, pirq);
812 else
813 ret = irq_deny_access(d, pirq);
815 rcu_unlock_domain(d);
816 }
817 break;
819 case XEN_DOMCTL_iomem_permission:
820 {
821 struct domain *d;
822 unsigned long mfn = op->u.iomem_permission.first_mfn;
823 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
825 ret = -EINVAL;
826 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
827 break;
829 ret = -ESRCH;
830 d = rcu_lock_domain_by_id(op->domain);
831 if ( d == NULL )
832 break;
834 if ( op->u.iomem_permission.allow_access )
835 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
836 else
837 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
839 rcu_unlock_domain(d);
840 }
841 break;
843 case XEN_DOMCTL_settimeoffset:
844 {
845 struct domain *d;
847 ret = -ESRCH;
848 d = rcu_lock_domain_by_id(op->domain);
849 if ( d == NULL )
850 break;
852 ret = xsm_domain_settime(d);
853 if ( ret )
854 {
855 rcu_unlock_domain(d);
856 break;
857 }
859 domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
860 rcu_unlock_domain(d);
861 ret = 0;
862 }
863 break;
865 case XEN_DOMCTL_set_target:
866 {
867 struct domain *d, *e;
869 ret = -ESRCH;
870 d = rcu_lock_domain_by_id(op->domain);
871 if ( d == NULL )
872 break;
874 ret = -ESRCH;
875 e = get_domain_by_id(op->u.set_target.target);
876 if ( e == NULL )
877 goto set_target_out;
879 ret = -EINVAL;
880 if ( (d == e) || (d->target != NULL) )
881 {
882 put_domain(e);
883 goto set_target_out;
884 }
886 ret = xsm_set_target(d, e);
887 if ( ret ) {
888 put_domain(e);
889 goto set_target_out;
890 }
892 /* Hold reference on @e until we destroy @d. */
893 d->target = e;
895 ret = 0;
897 set_target_out:
898 rcu_unlock_domain(d);
899 }
900 break;
902 case XEN_DOMCTL_subscribe:
903 {
904 struct domain *d;
906 ret = -ESRCH;
907 d = rcu_lock_domain_by_id(op->domain);
908 if ( d != NULL )
909 {
910 d->suspend_evtchn = op->u.subscribe.port;
911 rcu_unlock_domain(d);
912 ret = 0;
913 }
914 }
915 break;
917 default:
918 ret = arch_do_domctl(op, u_domctl);
919 break;
920 }
922 domctl_lock_release();
924 return ret;
925 }
927 /*
928 * Local variables:
929 * mode: C
930 * c-set-style: "BSD"
931 * c-basic-offset: 4
932 * tab-width: 4
933 * indent-tabs-mode: nil
934 * End:
935 */