debuggers.hg

view xen/common/domctl.c @ 17986:f2148e532c81

x86 hvm: Fix RTC handling.
1. Clean up initialisation/destruction.
2. Better handle per-domain time-offset changes.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jul 02 17:25:05 2008 +0100 (2008-07-02)
parents fd5b2ed9574a
children 14fd83fe71c3
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 DEFINE_SPINLOCK(domctl_lock);
30 extern long arch_do_domctl(
31 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
33 void cpumask_to_xenctl_cpumap(
34 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
35 {
36 unsigned int guest_bytes, copy_bytes, i;
37 uint8_t zero = 0;
38 uint8_t bytemap[(NR_CPUS + 7) / 8];
40 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
41 return;
43 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
44 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
46 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
48 if ( copy_bytes != 0 )
49 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
51 for ( i = copy_bytes; i < guest_bytes; i++ )
52 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
53 }
55 void xenctl_cpumap_to_cpumask(
56 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
57 {
58 unsigned int guest_bytes, copy_bytes;
59 uint8_t bytemap[(NR_CPUS + 7) / 8];
61 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
62 return;
64 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
65 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
67 memset(bytemap, 0, sizeof(bytemap));
69 if ( copy_bytes != 0 )
70 {
71 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
72 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
73 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
74 }
76 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
77 }
79 static inline int is_free_domid(domid_t dom)
80 {
81 struct domain *d;
83 if ( dom >= DOMID_FIRST_RESERVED )
84 return 0;
86 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
87 return 1;
89 rcu_unlock_domain(d);
90 return 0;
91 }
93 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
94 {
95 struct vcpu *v;
96 u64 cpu_time = 0;
97 int flags = XEN_DOMINF_blocked;
98 struct vcpu_runstate_info runstate;
100 info->domain = d->domain_id;
101 info->nr_online_vcpus = 0;
103 /*
104 * - domain is marked as blocked only if all its vcpus are blocked
105 * - domain is marked as running if any of its vcpus is running
106 */
107 for_each_vcpu ( d, v )
108 {
109 vcpu_runstate_get(v, &runstate);
110 cpu_time += runstate.time[RUNSTATE_running];
111 info->max_vcpu_id = v->vcpu_id;
112 if ( !test_bit(_VPF_down, &v->pause_flags) )
113 {
114 if ( !(v->pause_flags & VPF_blocked) )
115 flags &= ~XEN_DOMINF_blocked;
116 if ( v->is_running )
117 flags |= XEN_DOMINF_running;
118 info->nr_online_vcpus++;
119 }
120 }
122 info->cpu_time = cpu_time;
124 info->flags = (info->nr_online_vcpus ? flags : 0) |
125 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
126 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
127 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
128 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
129 d->shutdown_code << XEN_DOMINF_shutdownshift;
131 if ( is_hvm_domain(d) )
132 info->flags |= XEN_DOMINF_hvm_guest;
134 xsm_security_domaininfo(d, info);
136 info->tot_pages = d->tot_pages;
137 info->max_pages = d->max_pages;
138 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
140 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
141 }
143 static unsigned int default_vcpu0_location(void)
144 {
145 struct domain *d;
146 struct vcpu *v;
147 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
148 cpumask_t cpu_exclude_map;
150 /* Do an initial CPU placement. Pick the least-populated CPU. */
151 rcu_read_lock(&domlist_read_lock);
152 for_each_domain ( d )
153 for_each_vcpu ( d, v )
154 if ( !test_bit(_VPF_down, &v->pause_flags) )
155 cnt[v->processor]++;
156 rcu_read_unlock(&domlist_read_lock);
158 /*
159 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
160 * favour high numbered CPUs in the event of a tie.
161 */
162 cpu = first_cpu(cpu_sibling_map[0]);
163 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
164 cpu = next_cpu(cpu, cpu_sibling_map[0]);
165 cpu_exclude_map = cpu_sibling_map[0];
166 for_each_online_cpu ( i )
167 {
168 if ( cpu_isset(i, cpu_exclude_map) )
169 continue;
170 if ( (i == first_cpu(cpu_sibling_map[i])) &&
171 (cpus_weight(cpu_sibling_map[i]) > 1) )
172 continue;
173 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
174 if ( cnt[i] <= cnt[cpu] )
175 cpu = i;
176 }
178 return cpu;
179 }
181 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
182 {
183 long ret = 0;
184 struct xen_domctl curop, *op = &curop;
186 if ( !IS_PRIV(current->domain) )
187 return -EPERM;
189 if ( copy_from_guest(op, u_domctl, 1) )
190 return -EFAULT;
192 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
193 return -EACCES;
195 spin_lock(&domctl_lock);
197 switch ( op->cmd )
198 {
200 case XEN_DOMCTL_setvcpucontext:
201 {
202 struct domain *d = rcu_lock_domain_by_id(op->domain);
203 vcpu_guest_context_u c = { .nat = NULL };
204 unsigned int vcpu = op->u.vcpucontext.vcpu;
205 struct vcpu *v;
207 ret = -ESRCH;
208 if ( d == NULL )
209 break;
211 ret = xsm_setvcpucontext(d);
212 if ( ret )
213 goto svc_out;
215 ret = -EINVAL;
216 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
217 goto svc_out;
219 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
220 {
221 vcpu_reset(v);
222 ret = 0;
223 goto svc_out;
224 }
226 #ifdef CONFIG_COMPAT
227 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
228 < sizeof(struct compat_vcpu_guest_context));
229 #endif
230 ret = -ENOMEM;
231 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
232 goto svc_out;
234 if ( !IS_COMPAT(v->domain) )
235 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
236 #ifdef CONFIG_COMPAT
237 else
238 ret = copy_from_guest(c.cmp,
239 guest_handle_cast(op->u.vcpucontext.ctxt,
240 void), 1);
241 #endif
242 ret = ret ? -EFAULT : 0;
244 if ( ret == 0 )
245 {
246 domain_pause(d);
247 ret = arch_set_info_guest(v, c);
248 domain_unpause(d);
249 }
251 svc_out:
252 xfree(c.nat);
253 rcu_unlock_domain(d);
254 }
255 break;
257 case XEN_DOMCTL_pausedomain:
258 {
259 struct domain *d = rcu_lock_domain_by_id(op->domain);
260 ret = -ESRCH;
261 if ( d != NULL )
262 {
263 ret = xsm_pausedomain(d);
264 if ( ret )
265 goto pausedomain_out;
267 ret = -EINVAL;
268 if ( d != current->domain )
269 {
270 domain_pause_by_systemcontroller(d);
271 ret = 0;
272 }
273 pausedomain_out:
274 rcu_unlock_domain(d);
275 }
276 }
277 break;
279 case XEN_DOMCTL_unpausedomain:
280 {
281 struct domain *d = rcu_lock_domain_by_id(op->domain);
283 ret = -ESRCH;
284 if ( d == NULL )
285 break;
287 ret = xsm_unpausedomain(d);
288 if ( ret )
289 {
290 rcu_unlock_domain(d);
291 break;
292 }
294 domain_unpause_by_systemcontroller(d);
295 rcu_unlock_domain(d);
296 ret = 0;
297 }
298 break;
300 case XEN_DOMCTL_resumedomain:
301 {
302 struct domain *d = rcu_lock_domain_by_id(op->domain);
304 ret = -ESRCH;
305 if ( d == NULL )
306 break;
308 ret = xsm_resumedomain(d);
309 if ( ret )
310 {
311 rcu_unlock_domain(d);
312 break;
313 }
315 domain_resume(d);
316 rcu_unlock_domain(d);
317 ret = 0;
318 }
319 break;
321 case XEN_DOMCTL_createdomain:
322 {
323 struct domain *d;
324 domid_t dom;
325 static domid_t rover = 0;
326 unsigned int domcr_flags;
328 ret = -EINVAL;
329 if ( supervisor_mode_kernel ||
330 (op->u.createdomain.flags &
331 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap)) )
332 break;
334 dom = op->domain;
335 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
336 {
337 ret = -EINVAL;
338 if ( !is_free_domid(dom) )
339 break;
340 }
341 else
342 {
343 for ( dom = rover + 1; dom != rover; dom++ )
344 {
345 if ( dom == DOMID_FIRST_RESERVED )
346 dom = 0;
347 if ( is_free_domid(dom) )
348 break;
349 }
351 ret = -ENOMEM;
352 if ( dom == rover )
353 break;
355 rover = dom;
356 }
358 domcr_flags = 0;
359 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
360 domcr_flags |= DOMCRF_hvm;
361 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
362 domcr_flags |= DOMCRF_hap;
364 ret = -ENOMEM;
365 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
366 if ( d == NULL )
367 break;
369 ret = 0;
371 memcpy(d->handle, op->u.createdomain.handle,
372 sizeof(xen_domain_handle_t));
374 op->domain = d->domain_id;
375 if ( copy_to_guest(u_domctl, op, 1) )
376 ret = -EFAULT;
377 }
378 break;
380 case XEN_DOMCTL_max_vcpus:
381 {
382 struct domain *d;
383 unsigned int i, max = op->u.max_vcpus.max, cpu;
385 ret = -EINVAL;
386 if ( max > MAX_VIRT_CPUS )
387 break;
389 ret = -ESRCH;
390 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
391 break;
393 ret = xsm_max_vcpus(d);
394 if ( ret )
395 {
396 rcu_unlock_domain(d);
397 break;
398 }
400 /* Needed, for example, to ensure writable p.t. state is synced. */
401 domain_pause(d);
403 /* We cannot reduce maximum VCPUs. */
404 ret = -EINVAL;
405 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
406 goto maxvcpu_out;
408 ret = -ENOMEM;
409 for ( i = 0; i < max; i++ )
410 {
411 if ( d->vcpu[i] != NULL )
412 continue;
414 cpu = (i == 0) ?
415 default_vcpu0_location() :
416 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
418 if ( alloc_vcpu(d, i, cpu) == NULL )
419 goto maxvcpu_out;
420 }
422 ret = 0;
424 maxvcpu_out:
425 domain_unpause(d);
426 rcu_unlock_domain(d);
427 }
428 break;
430 case XEN_DOMCTL_destroydomain:
431 {
432 struct domain *d = rcu_lock_domain_by_id(op->domain);
433 ret = -ESRCH;
434 if ( d != NULL )
435 {
436 ret = xsm_destroydomain(d) ? : domain_kill(d);
437 rcu_unlock_domain(d);
438 }
439 }
440 break;
442 case XEN_DOMCTL_setvcpuaffinity:
443 case XEN_DOMCTL_getvcpuaffinity:
444 {
445 domid_t dom = op->domain;
446 struct domain *d = rcu_lock_domain_by_id(dom);
447 struct vcpu *v;
448 cpumask_t new_affinity;
450 ret = -ESRCH;
451 if ( d == NULL )
452 break;
454 ret = xsm_vcpuaffinity(op->cmd, d);
455 if ( ret )
456 goto vcpuaffinity_out;
458 ret = -EINVAL;
459 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
460 goto vcpuaffinity_out;
462 ret = -ESRCH;
463 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
464 goto vcpuaffinity_out;
466 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
467 {
468 xenctl_cpumap_to_cpumask(
469 &new_affinity, &op->u.vcpuaffinity.cpumap);
470 ret = vcpu_set_affinity(v, &new_affinity);
471 }
472 else
473 {
474 cpumask_to_xenctl_cpumap(
475 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
476 ret = 0;
477 }
479 vcpuaffinity_out:
480 rcu_unlock_domain(d);
481 }
482 break;
484 case XEN_DOMCTL_scheduler_op:
485 {
486 struct domain *d;
488 ret = -ESRCH;
489 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
490 break;
492 ret = xsm_scheduler(d);
493 if ( ret )
494 goto scheduler_op_out;
496 ret = sched_adjust(d, &op->u.scheduler_op);
497 if ( copy_to_guest(u_domctl, op, 1) )
498 ret = -EFAULT;
500 scheduler_op_out:
501 rcu_unlock_domain(d);
502 }
503 break;
505 case XEN_DOMCTL_getdomaininfo:
506 {
507 struct domain *d;
508 domid_t dom = op->domain;
510 rcu_read_lock(&domlist_read_lock);
512 for_each_domain ( d )
513 if ( d->domain_id >= dom )
514 break;
516 if ( d == NULL )
517 {
518 rcu_read_unlock(&domlist_read_lock);
519 ret = -ESRCH;
520 break;
521 }
523 ret = xsm_getdomaininfo(d);
524 if ( ret )
525 goto getdomaininfo_out;
527 getdomaininfo(d, &op->u.getdomaininfo);
529 op->domain = op->u.getdomaininfo.domain;
530 if ( copy_to_guest(u_domctl, op, 1) )
531 ret = -EFAULT;
533 getdomaininfo_out:
534 rcu_read_unlock(&domlist_read_lock);
535 }
536 break;
538 case XEN_DOMCTL_getvcpucontext:
539 {
540 vcpu_guest_context_u c = { .nat = NULL };
541 struct domain *d;
542 struct vcpu *v;
544 ret = -ESRCH;
545 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
546 break;
548 ret = xsm_getvcpucontext(d);
549 if ( ret )
550 goto getvcpucontext_out;
552 ret = -EINVAL;
553 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
554 goto getvcpucontext_out;
556 ret = -ESRCH;
557 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
558 goto getvcpucontext_out;
560 ret = -ENODATA;
561 if ( !v->is_initialised )
562 goto getvcpucontext_out;
564 #ifdef CONFIG_COMPAT
565 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
566 < sizeof(struct compat_vcpu_guest_context));
567 #endif
568 ret = -ENOMEM;
569 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
570 goto getvcpucontext_out;
572 if ( v != current )
573 vcpu_pause(v);
575 arch_get_info_guest(v, c);
576 ret = 0;
578 if ( v != current )
579 vcpu_unpause(v);
581 if ( !IS_COMPAT(v->domain) )
582 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
583 #ifdef CONFIG_COMPAT
584 else
585 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
586 void), c.cmp, 1);
587 #endif
589 if ( copy_to_guest(u_domctl, op, 1) || ret )
590 ret = -EFAULT;
592 getvcpucontext_out:
593 xfree(c.nat);
594 rcu_unlock_domain(d);
595 }
596 break;
598 case XEN_DOMCTL_getvcpuinfo:
599 {
600 struct domain *d;
601 struct vcpu *v;
602 struct vcpu_runstate_info runstate;
604 ret = -ESRCH;
605 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
606 break;
608 ret = xsm_getvcpuinfo(d);
609 if ( ret )
610 goto getvcpuinfo_out;
612 ret = -EINVAL;
613 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
614 goto getvcpuinfo_out;
616 ret = -ESRCH;
617 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
618 goto getvcpuinfo_out;
620 vcpu_runstate_get(v, &runstate);
622 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
623 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
624 op->u.getvcpuinfo.running = v->is_running;
625 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
626 op->u.getvcpuinfo.cpu = v->processor;
627 ret = 0;
629 if ( copy_to_guest(u_domctl, op, 1) )
630 ret = -EFAULT;
632 getvcpuinfo_out:
633 rcu_unlock_domain(d);
634 }
635 break;
637 case XEN_DOMCTL_max_mem:
638 {
639 struct domain *d;
640 unsigned long new_max;
642 ret = -ESRCH;
643 d = rcu_lock_domain_by_id(op->domain);
644 if ( d == NULL )
645 break;
647 ret = xsm_setdomainmaxmem(d);
648 if ( ret )
649 goto max_mem_out;
651 ret = -EINVAL;
652 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
654 spin_lock(&d->page_alloc_lock);
655 if ( new_max >= d->tot_pages )
656 {
657 ret = guest_physmap_max_mem_pages(d, new_max);
658 if ( ret != 0 )
659 break;
660 d->max_pages = new_max;
661 ret = 0;
662 }
663 spin_unlock(&d->page_alloc_lock);
665 max_mem_out:
666 rcu_unlock_domain(d);
667 }
668 break;
670 case XEN_DOMCTL_setdomainhandle:
671 {
672 struct domain *d;
674 ret = -ESRCH;
675 d = rcu_lock_domain_by_id(op->domain);
676 if ( d == NULL )
677 break;
679 ret = xsm_setdomainhandle(d);
680 if ( ret )
681 {
682 rcu_unlock_domain(d);
683 break;
684 }
686 memcpy(d->handle, op->u.setdomainhandle.handle,
687 sizeof(xen_domain_handle_t));
688 rcu_unlock_domain(d);
689 ret = 0;
690 }
691 break;
693 case XEN_DOMCTL_setdebugging:
694 {
695 struct domain *d;
697 ret = -ESRCH;
698 d = rcu_lock_domain_by_id(op->domain);
699 if ( d == NULL )
700 break;
702 ret = xsm_setdebugging(d);
703 if ( ret )
704 {
705 rcu_unlock_domain(d);
706 break;
707 }
709 domain_pause(d);
710 d->debugger_attached = !!op->u.setdebugging.enable;
711 domain_unpause(d); /* causes guest to latch new status */
712 rcu_unlock_domain(d);
713 ret = 0;
714 }
715 break;
717 case XEN_DOMCTL_irq_permission:
718 {
719 struct domain *d;
720 unsigned int pirq = op->u.irq_permission.pirq;
722 ret = -EINVAL;
723 if ( pirq >= NR_IRQS )
724 break;
726 ret = -ESRCH;
727 d = rcu_lock_domain_by_id(op->domain);
728 if ( d == NULL )
729 break;
731 ret = xsm_irq_permission(d, pirq, op->u.irq_permission.allow_access);
732 if ( ret )
733 goto irq_permission_out;
735 if ( op->u.irq_permission.allow_access )
736 ret = irq_permit_access(d, pirq);
737 else
738 ret = irq_deny_access(d, pirq);
740 irq_permission_out:
741 rcu_unlock_domain(d);
742 }
743 break;
745 case XEN_DOMCTL_iomem_permission:
746 {
747 struct domain *d;
748 unsigned long mfn = op->u.iomem_permission.first_mfn;
749 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
751 ret = -EINVAL;
752 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
753 break;
755 ret = -ESRCH;
756 d = rcu_lock_domain_by_id(op->domain);
757 if ( d == NULL )
758 break;
760 ret = xsm_iomem_permission(d, mfn, op->u.iomem_permission.allow_access);
761 if ( ret )
762 goto iomem_permission_out;
764 if ( op->u.iomem_permission.allow_access )
765 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
766 else
767 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
769 iomem_permission_out:
770 rcu_unlock_domain(d);
771 }
772 break;
774 case XEN_DOMCTL_settimeoffset:
775 {
776 struct domain *d;
778 ret = -ESRCH;
779 d = rcu_lock_domain_by_id(op->domain);
780 if ( d == NULL )
781 break;
783 ret = xsm_domain_settime(d);
784 if ( ret )
785 {
786 rcu_unlock_domain(d);
787 break;
788 }
790 domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
791 rcu_unlock_domain(d);
792 ret = 0;
793 }
794 break;
796 case XEN_DOMCTL_set_target:
797 {
798 struct domain *d, *e;
800 ret = -ESRCH;
801 d = rcu_lock_domain_by_id(op->domain);
802 if ( d == NULL )
803 break;
805 ret = -ESRCH;
806 e = get_domain_by_id(op->u.set_target.target);
807 if ( e == NULL )
808 goto set_target_out;
810 ret = -EINVAL;
811 if ( (d == e) || (d->target != NULL) )
812 {
813 put_domain(e);
814 goto set_target_out;
815 }
817 /* Hold reference on @e until we destroy @d. */
818 d->target = e;
820 ret = 0;
822 set_target_out:
823 rcu_unlock_domain(d);
824 }
825 break;
827 default:
828 ret = arch_do_domctl(op, u_domctl);
829 break;
830 }
832 spin_unlock(&domctl_lock);
834 return ret;
835 }
837 /*
838 * Local variables:
839 * mode: C
840 * c-set-style: "BSD"
841 * c-basic-offset: 4
842 * tab-width: 4
843 * indent-tabs-mode: nil
844 * End:
845 */