debuggers.hg

view xen/common/domctl.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children d668634e4f22
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 DEFINE_SPINLOCK(domctl_lock);
30 extern long arch_do_domctl(
31 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
33 void cpumask_to_xenctl_cpumap(
34 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
35 {
36 unsigned int guest_bytes, copy_bytes, i;
37 uint8_t zero = 0;
38 uint8_t bytemap[(NR_CPUS + 7) / 8];
40 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
41 return;
43 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
44 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
46 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
48 if ( copy_bytes != 0 )
49 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
51 for ( i = copy_bytes; i < guest_bytes; i++ )
52 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
53 }
55 void xenctl_cpumap_to_cpumask(
56 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
57 {
58 unsigned int guest_bytes, copy_bytes;
59 uint8_t bytemap[(NR_CPUS + 7) / 8];
61 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
62 return;
64 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
65 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
67 memset(bytemap, 0, sizeof(bytemap));
69 if ( copy_bytes != 0 )
70 {
71 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
72 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
73 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
74 }
76 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
77 }
79 static inline int is_free_domid(domid_t dom)
80 {
81 struct domain *d;
83 if ( dom >= DOMID_FIRST_RESERVED )
84 return 0;
86 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
87 return 1;
89 rcu_unlock_domain(d);
90 return 0;
91 }
93 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
94 {
95 struct vcpu *v;
96 u64 cpu_time = 0;
97 int flags = XEN_DOMINF_blocked;
98 struct vcpu_runstate_info runstate;
100 info->domain = d->domain_id;
101 info->nr_online_vcpus = 0;
103 /*
104 * - domain is marked as blocked only if all its vcpus are blocked
105 * - domain is marked as running if any of its vcpus is running
106 */
107 for_each_vcpu ( d, v )
108 {
109 vcpu_runstate_get(v, &runstate);
110 cpu_time += runstate.time[RUNSTATE_running];
111 info->max_vcpu_id = v->vcpu_id;
112 if ( !test_bit(_VPF_down, &v->pause_flags) )
113 {
114 if ( !(v->pause_flags & VPF_blocked) )
115 flags &= ~XEN_DOMINF_blocked;
116 if ( v->is_running )
117 flags |= XEN_DOMINF_running;
118 info->nr_online_vcpus++;
119 }
120 }
122 info->cpu_time = cpu_time;
124 info->flags = (info->nr_online_vcpus ? flags : 0) |
125 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
126 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
127 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
128 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
129 d->shutdown_code << XEN_DOMINF_shutdownshift;
131 if ( is_hvm_domain(d) )
132 info->flags |= XEN_DOMINF_hvm_guest;
134 xsm_security_domaininfo(d, info);
136 info->tot_pages = d->tot_pages;
137 info->max_pages = d->max_pages;
138 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
140 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
141 }
143 static unsigned int default_vcpu0_location(void)
144 {
145 struct domain *d;
146 struct vcpu *v;
147 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
148 cpumask_t cpu_exclude_map;
150 /* Do an initial CPU placement. Pick the least-populated CPU. */
151 rcu_read_lock(&domlist_read_lock);
152 for_each_domain ( d )
153 for_each_vcpu ( d, v )
154 if ( !test_bit(_VPF_down, &v->pause_flags) )
155 cnt[v->processor]++;
156 rcu_read_unlock(&domlist_read_lock);
158 /*
159 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
160 * favour high numbered CPUs in the event of a tie.
161 */
162 cpu = first_cpu(cpu_sibling_map[0]);
163 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
164 cpu = next_cpu(cpu, cpu_sibling_map[0]);
165 cpu_exclude_map = cpu_sibling_map[0];
166 for_each_online_cpu ( i )
167 {
168 if ( cpu_isset(i, cpu_exclude_map) )
169 continue;
170 if ( (i == first_cpu(cpu_sibling_map[i])) &&
171 (cpus_weight(cpu_sibling_map[i]) > 1) )
172 continue;
173 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
174 if ( cnt[i] <= cnt[cpu] )
175 cpu = i;
176 }
178 return cpu;
179 }
181 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
182 {
183 long ret = 0;
184 struct xen_domctl curop, *op = &curop;
186 if ( !IS_PRIV(current->domain) )
187 return -EPERM;
189 if ( copy_from_guest(op, u_domctl, 1) )
190 return -EFAULT;
192 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
193 return -EACCES;
195 spin_lock(&domctl_lock);
197 switch ( op->cmd )
198 {
200 case XEN_DOMCTL_setvcpucontext:
201 {
202 struct domain *d = rcu_lock_domain_by_id(op->domain);
203 vcpu_guest_context_u c = { .nat = NULL };
204 unsigned int vcpu = op->u.vcpucontext.vcpu;
205 struct vcpu *v;
207 ret = -ESRCH;
208 if ( d == NULL )
209 break;
211 ret = xsm_setvcpucontext(d);
212 if ( ret )
213 goto svc_out;
215 ret = -EINVAL;
216 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
217 goto svc_out;
219 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
220 {
221 ret = vcpu_reset(v);
222 goto svc_out;
223 }
225 #ifdef CONFIG_COMPAT
226 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
227 < sizeof(struct compat_vcpu_guest_context));
228 #endif
229 ret = -ENOMEM;
230 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
231 goto svc_out;
233 if ( !IS_COMPAT(v->domain) )
234 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
235 #ifdef CONFIG_COMPAT
236 else
237 ret = copy_from_guest(c.cmp,
238 guest_handle_cast(op->u.vcpucontext.ctxt,
239 void), 1);
240 #endif
241 ret = ret ? -EFAULT : 0;
243 if ( ret == 0 )
244 {
245 domain_pause(d);
246 ret = arch_set_info_guest(v, c);
247 domain_unpause(d);
248 }
250 svc_out:
251 xfree(c.nat);
252 rcu_unlock_domain(d);
253 }
254 break;
256 case XEN_DOMCTL_pausedomain:
257 {
258 struct domain *d = rcu_lock_domain_by_id(op->domain);
259 ret = -ESRCH;
260 if ( d != NULL )
261 {
262 ret = xsm_pausedomain(d);
263 if ( ret )
264 goto pausedomain_out;
266 ret = -EINVAL;
267 if ( d != current->domain )
268 {
269 domain_pause_by_systemcontroller(d);
270 ret = 0;
271 }
272 pausedomain_out:
273 rcu_unlock_domain(d);
274 }
275 }
276 break;
278 case XEN_DOMCTL_unpausedomain:
279 {
280 struct domain *d = rcu_lock_domain_by_id(op->domain);
282 ret = -ESRCH;
283 if ( d == NULL )
284 break;
286 ret = xsm_unpausedomain(d);
287 if ( ret )
288 {
289 rcu_unlock_domain(d);
290 break;
291 }
293 domain_unpause_by_systemcontroller(d);
294 rcu_unlock_domain(d);
295 ret = 0;
296 }
297 break;
299 case XEN_DOMCTL_resumedomain:
300 {
301 struct domain *d = rcu_lock_domain_by_id(op->domain);
303 ret = -ESRCH;
304 if ( d == NULL )
305 break;
307 ret = xsm_resumedomain(d);
308 if ( ret )
309 {
310 rcu_unlock_domain(d);
311 break;
312 }
314 domain_resume(d);
315 rcu_unlock_domain(d);
316 ret = 0;
317 }
318 break;
320 case XEN_DOMCTL_createdomain:
321 {
322 struct domain *d;
323 domid_t dom;
324 static domid_t rover = 0;
325 unsigned int domcr_flags;
327 ret = -EINVAL;
328 if ( supervisor_mode_kernel ||
329 (op->u.createdomain.flags &
330 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap)) )
331 break;
333 dom = op->domain;
334 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
335 {
336 ret = -EINVAL;
337 if ( !is_free_domid(dom) )
338 break;
339 }
340 else
341 {
342 for ( dom = rover + 1; dom != rover; dom++ )
343 {
344 if ( dom == DOMID_FIRST_RESERVED )
345 dom = 0;
346 if ( is_free_domid(dom) )
347 break;
348 }
350 ret = -ENOMEM;
351 if ( dom == rover )
352 break;
354 rover = dom;
355 }
357 domcr_flags = 0;
358 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
359 domcr_flags |= DOMCRF_hvm;
360 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
361 domcr_flags |= DOMCRF_hap;
363 ret = -ENOMEM;
364 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
365 if ( d == NULL )
366 break;
368 ret = 0;
370 memcpy(d->handle, op->u.createdomain.handle,
371 sizeof(xen_domain_handle_t));
373 op->domain = d->domain_id;
374 if ( copy_to_guest(u_domctl, op, 1) )
375 ret = -EFAULT;
376 }
377 break;
379 case XEN_DOMCTL_max_vcpus:
380 {
381 struct domain *d;
382 unsigned int i, max = op->u.max_vcpus.max, cpu;
384 ret = -EINVAL;
385 if ( max > MAX_VIRT_CPUS )
386 break;
388 ret = -ESRCH;
389 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
390 break;
392 ret = xsm_max_vcpus(d);
393 if ( ret )
394 {
395 rcu_unlock_domain(d);
396 break;
397 }
399 /* Needed, for example, to ensure writable p.t. state is synced. */
400 domain_pause(d);
402 /* We cannot reduce maximum VCPUs. */
403 ret = -EINVAL;
404 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
405 goto maxvcpu_out;
407 ret = -ENOMEM;
408 for ( i = 0; i < max; i++ )
409 {
410 if ( d->vcpu[i] != NULL )
411 continue;
413 cpu = (i == 0) ?
414 default_vcpu0_location() :
415 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
417 if ( alloc_vcpu(d, i, cpu) == NULL )
418 goto maxvcpu_out;
419 }
421 ret = 0;
423 maxvcpu_out:
424 domain_unpause(d);
425 rcu_unlock_domain(d);
426 }
427 break;
429 case XEN_DOMCTL_destroydomain:
430 {
431 struct domain *d = rcu_lock_domain_by_id(op->domain);
432 ret = -ESRCH;
433 if ( d != NULL )
434 {
435 ret = xsm_destroydomain(d) ? : domain_kill(d);
436 rcu_unlock_domain(d);
437 }
438 }
439 break;
441 case XEN_DOMCTL_setvcpuaffinity:
442 case XEN_DOMCTL_getvcpuaffinity:
443 {
444 domid_t dom = op->domain;
445 struct domain *d = rcu_lock_domain_by_id(dom);
446 struct vcpu *v;
447 cpumask_t new_affinity;
449 ret = -ESRCH;
450 if ( d == NULL )
451 break;
453 ret = xsm_vcpuaffinity(op->cmd, d);
454 if ( ret )
455 goto vcpuaffinity_out;
457 ret = -EINVAL;
458 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
459 goto vcpuaffinity_out;
461 ret = -ESRCH;
462 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
463 goto vcpuaffinity_out;
465 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
466 {
467 xenctl_cpumap_to_cpumask(
468 &new_affinity, &op->u.vcpuaffinity.cpumap);
469 ret = vcpu_set_affinity(v, &new_affinity);
470 }
471 else
472 {
473 cpumask_to_xenctl_cpumap(
474 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
475 ret = 0;
476 }
478 vcpuaffinity_out:
479 rcu_unlock_domain(d);
480 }
481 break;
483 case XEN_DOMCTL_scheduler_op:
484 {
485 struct domain *d;
487 ret = -ESRCH;
488 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
489 break;
491 ret = xsm_scheduler(d);
492 if ( ret )
493 goto scheduler_op_out;
495 ret = sched_adjust(d, &op->u.scheduler_op);
496 if ( copy_to_guest(u_domctl, op, 1) )
497 ret = -EFAULT;
499 scheduler_op_out:
500 rcu_unlock_domain(d);
501 }
502 break;
504 case XEN_DOMCTL_getdomaininfo:
505 {
506 struct domain *d;
507 domid_t dom = op->domain;
509 rcu_read_lock(&domlist_read_lock);
511 for_each_domain ( d )
512 if ( d->domain_id >= dom )
513 break;
515 if ( d == NULL )
516 {
517 rcu_read_unlock(&domlist_read_lock);
518 ret = -ESRCH;
519 break;
520 }
522 ret = xsm_getdomaininfo(d);
523 if ( ret )
524 goto getdomaininfo_out;
526 getdomaininfo(d, &op->u.getdomaininfo);
528 op->domain = op->u.getdomaininfo.domain;
529 if ( copy_to_guest(u_domctl, op, 1) )
530 ret = -EFAULT;
532 getdomaininfo_out:
533 rcu_read_unlock(&domlist_read_lock);
534 }
535 break;
537 case XEN_DOMCTL_getvcpucontext:
538 {
539 vcpu_guest_context_u c = { .nat = NULL };
540 struct domain *d;
541 struct vcpu *v;
543 ret = -ESRCH;
544 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
545 break;
547 ret = xsm_getvcpucontext(d);
548 if ( ret )
549 goto getvcpucontext_out;
551 ret = -EINVAL;
552 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
553 goto getvcpucontext_out;
555 ret = -ESRCH;
556 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
557 goto getvcpucontext_out;
559 ret = -ENODATA;
560 if ( !v->is_initialised )
561 goto getvcpucontext_out;
563 #ifdef CONFIG_COMPAT
564 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
565 < sizeof(struct compat_vcpu_guest_context));
566 #endif
567 ret = -ENOMEM;
568 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
569 goto getvcpucontext_out;
571 if ( v != current )
572 vcpu_pause(v);
574 arch_get_info_guest(v, c);
575 ret = 0;
577 if ( v != current )
578 vcpu_unpause(v);
580 if ( !IS_COMPAT(v->domain) )
581 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
582 #ifdef CONFIG_COMPAT
583 else
584 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
585 void), c.cmp, 1);
586 #endif
588 if ( copy_to_guest(u_domctl, op, 1) || ret )
589 ret = -EFAULT;
591 getvcpucontext_out:
592 xfree(c.nat);
593 rcu_unlock_domain(d);
594 }
595 break;
597 case XEN_DOMCTL_getvcpuinfo:
598 {
599 struct domain *d;
600 struct vcpu *v;
601 struct vcpu_runstate_info runstate;
603 ret = -ESRCH;
604 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
605 break;
607 ret = xsm_getvcpuinfo(d);
608 if ( ret )
609 goto getvcpuinfo_out;
611 ret = -EINVAL;
612 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
613 goto getvcpuinfo_out;
615 ret = -ESRCH;
616 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
617 goto getvcpuinfo_out;
619 vcpu_runstate_get(v, &runstate);
621 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
622 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
623 op->u.getvcpuinfo.running = v->is_running;
624 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
625 op->u.getvcpuinfo.cpu = v->processor;
626 ret = 0;
628 if ( copy_to_guest(u_domctl, op, 1) )
629 ret = -EFAULT;
631 getvcpuinfo_out:
632 rcu_unlock_domain(d);
633 }
634 break;
636 case XEN_DOMCTL_max_mem:
637 {
638 struct domain *d;
639 unsigned long new_max;
641 ret = -ESRCH;
642 d = rcu_lock_domain_by_id(op->domain);
643 if ( d == NULL )
644 break;
646 ret = xsm_setdomainmaxmem(d);
647 if ( ret )
648 goto max_mem_out;
650 ret = -EINVAL;
651 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
653 spin_lock(&d->page_alloc_lock);
654 if ( new_max >= d->tot_pages )
655 {
656 ret = guest_physmap_max_mem_pages(d, new_max);
657 if ( ret != 0 )
658 break;
659 d->max_pages = new_max;
660 ret = 0;
661 }
662 spin_unlock(&d->page_alloc_lock);
664 max_mem_out:
665 rcu_unlock_domain(d);
666 }
667 break;
669 case XEN_DOMCTL_setdomainhandle:
670 {
671 struct domain *d;
673 ret = -ESRCH;
674 d = rcu_lock_domain_by_id(op->domain);
675 if ( d == NULL )
676 break;
678 ret = xsm_setdomainhandle(d);
679 if ( ret )
680 {
681 rcu_unlock_domain(d);
682 break;
683 }
685 memcpy(d->handle, op->u.setdomainhandle.handle,
686 sizeof(xen_domain_handle_t));
687 rcu_unlock_domain(d);
688 ret = 0;
689 }
690 break;
692 case XEN_DOMCTL_setdebugging:
693 {
694 struct domain *d;
696 ret = -ESRCH;
697 d = rcu_lock_domain_by_id(op->domain);
698 if ( d == NULL )
699 break;
701 ret = xsm_setdebugging(d);
702 if ( ret )
703 {
704 rcu_unlock_domain(d);
705 break;
706 }
708 domain_pause(d);
709 d->debugger_attached = !!op->u.setdebugging.enable;
710 domain_unpause(d); /* causes guest to latch new status */
711 rcu_unlock_domain(d);
712 ret = 0;
713 }
714 break;
716 case XEN_DOMCTL_irq_permission:
717 {
718 struct domain *d;
719 unsigned int pirq = op->u.irq_permission.pirq;
721 ret = -EINVAL;
722 if ( pirq >= NR_IRQS )
723 break;
725 ret = -ESRCH;
726 d = rcu_lock_domain_by_id(op->domain);
727 if ( d == NULL )
728 break;
730 ret = xsm_irq_permission(d, pirq, op->u.irq_permission.allow_access);
731 if ( ret )
732 goto irq_permission_out;
734 if ( op->u.irq_permission.allow_access )
735 ret = irq_permit_access(d, pirq);
736 else
737 ret = irq_deny_access(d, pirq);
739 irq_permission_out:
740 rcu_unlock_domain(d);
741 }
742 break;
744 case XEN_DOMCTL_iomem_permission:
745 {
746 struct domain *d;
747 unsigned long mfn = op->u.iomem_permission.first_mfn;
748 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
750 ret = -EINVAL;
751 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
752 break;
754 ret = -ESRCH;
755 d = rcu_lock_domain_by_id(op->domain);
756 if ( d == NULL )
757 break;
759 ret = xsm_iomem_permission(d, mfn, op->u.iomem_permission.allow_access);
760 if ( ret )
761 goto iomem_permission_out;
763 if ( op->u.iomem_permission.allow_access )
764 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
765 else
766 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
768 iomem_permission_out:
769 rcu_unlock_domain(d);
770 }
771 break;
773 case XEN_DOMCTL_settimeoffset:
774 {
775 struct domain *d;
777 ret = -ESRCH;
778 d = rcu_lock_domain_by_id(op->domain);
779 if ( d == NULL )
780 break;
782 ret = xsm_domain_settime(d);
783 if ( ret )
784 {
785 rcu_unlock_domain(d);
786 break;
787 }
789 d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
790 rcu_unlock_domain(d);
791 ret = 0;
792 }
793 break;
795 case XEN_DOMCTL_set_target:
796 {
797 struct domain *d, *e;
799 ret = -ESRCH;
800 d = rcu_lock_domain_by_id(op->domain);
801 if ( d == NULL )
802 break;
804 ret = -ESRCH;
805 e = get_domain_by_id(op->u.set_target.target);
806 if ( e == NULL )
807 goto set_target_out;
809 ret = -EINVAL;
810 if ( (d == e) || (d->target != NULL) )
811 {
812 put_domain(e);
813 goto set_target_out;
814 }
816 /* Hold reference on @e until we destroy @d. */
817 d->target = e;
819 ret = 0;
821 set_target_out:
822 rcu_unlock_domain(d);
823 }
824 break;
826 default:
827 ret = arch_do_domctl(op, u_domctl);
828 break;
829 }
831 spin_unlock(&domctl_lock);
833 return ret;
834 }
836 /*
837 * Local variables:
838 * mode: C
839 * c-set-style: "BSD"
840 * c-basic-offset: 4
841 * tab-width: 4
842 * indent-tabs-mode: nil
843 * End:
844 */