debuggers.hg

view xen/common/domctl.c @ 16558:a583f3a7eafc

Revert 16498:d2bef6551c1263e457aef75ce403ba53652a803f.
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 04 22:54:58 2007 +0000 (2007-12-04)
parents 62451388f630
children cff4c8a1aa28
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 extern long arch_do_domctl(
29 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
31 void cpumask_to_xenctl_cpumap(
32 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
33 {
34 unsigned int guest_bytes, copy_bytes, i;
35 uint8_t zero = 0;
36 uint8_t bytemap[(NR_CPUS + 7) / 8];
38 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
39 return;
41 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
42 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
44 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
46 if ( copy_bytes != 0 )
47 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
49 for ( i = copy_bytes; i < guest_bytes; i++ )
50 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
51 }
53 void xenctl_cpumap_to_cpumask(
54 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
55 {
56 unsigned int guest_bytes, copy_bytes;
57 uint8_t bytemap[(NR_CPUS + 7) / 8];
59 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
60 return;
62 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
63 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
65 memset(bytemap, 0, sizeof(bytemap));
67 if ( copy_bytes != 0 )
68 {
69 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
70 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
71 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
72 }
74 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
75 }
77 static inline int is_free_domid(domid_t dom)
78 {
79 struct domain *d;
81 if ( dom >= DOMID_FIRST_RESERVED )
82 return 0;
84 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
85 return 1;
87 rcu_unlock_domain(d);
88 return 0;
89 }
91 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
92 {
93 struct vcpu *v;
94 u64 cpu_time = 0;
95 int flags = XEN_DOMINF_blocked;
96 struct vcpu_runstate_info runstate;
98 info->domain = d->domain_id;
99 info->nr_online_vcpus = 0;
101 /*
102 * - domain is marked as blocked only if all its vcpus are blocked
103 * - domain is marked as running if any of its vcpus is running
104 */
105 for_each_vcpu ( d, v )
106 {
107 vcpu_runstate_get(v, &runstate);
108 cpu_time += runstate.time[RUNSTATE_running];
109 info->max_vcpu_id = v->vcpu_id;
110 if ( !test_bit(_VPF_down, &v->pause_flags) )
111 {
112 if ( !(v->pause_flags & VPF_blocked) )
113 flags &= ~XEN_DOMINF_blocked;
114 if ( v->is_running )
115 flags |= XEN_DOMINF_running;
116 info->nr_online_vcpus++;
117 }
118 }
120 info->cpu_time = cpu_time;
122 info->flags = flags |
123 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
124 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
125 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
126 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
127 d->shutdown_code << XEN_DOMINF_shutdownshift;
129 if ( is_hvm_domain(d) )
130 info->flags |= XEN_DOMINF_hvm_guest;
132 xsm_security_domaininfo(d, info);
134 info->tot_pages = d->tot_pages;
135 info->max_pages = d->max_pages;
136 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
138 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
139 }
141 static unsigned int default_vcpu0_location(void)
142 {
143 struct domain *d;
144 struct vcpu *v;
145 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
146 cpumask_t cpu_exclude_map;
148 /* Do an initial CPU placement. Pick the least-populated CPU. */
149 rcu_read_lock(&domlist_read_lock);
150 for_each_domain ( d )
151 for_each_vcpu ( d, v )
152 if ( !test_bit(_VPF_down, &v->pause_flags) )
153 cnt[v->processor]++;
154 rcu_read_unlock(&domlist_read_lock);
156 /*
157 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
158 * favour high numbered CPUs in the event of a tie.
159 */
160 cpu = first_cpu(cpu_sibling_map[0]);
161 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
162 cpu = next_cpu(cpu, cpu_sibling_map[0]);
163 cpu_exclude_map = cpu_sibling_map[0];
164 for_each_online_cpu ( i )
165 {
166 if ( cpu_isset(i, cpu_exclude_map) )
167 continue;
168 if ( (i == first_cpu(cpu_sibling_map[i])) &&
169 (cpus_weight(cpu_sibling_map[i]) > 1) )
170 continue;
171 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
172 if ( cnt[i] <= cnt[cpu] )
173 cpu = i;
174 }
176 return cpu;
177 }
179 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
180 {
181 long ret = 0;
182 struct xen_domctl curop, *op = &curop;
183 static DEFINE_SPINLOCK(domctl_lock);
185 if ( !IS_PRIV(current->domain) )
186 return -EPERM;
188 if ( copy_from_guest(op, u_domctl, 1) )
189 return -EFAULT;
191 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
192 return -EACCES;
194 spin_lock(&domctl_lock);
196 switch ( op->cmd )
197 {
199 case XEN_DOMCTL_setvcpucontext:
200 {
201 struct domain *d = rcu_lock_domain_by_id(op->domain);
202 vcpu_guest_context_u c = { .nat = NULL };
203 unsigned int vcpu = op->u.vcpucontext.vcpu;
204 struct vcpu *v;
206 ret = -ESRCH;
207 if ( d == NULL )
208 break;
210 ret = xsm_setvcpucontext(d);
211 if ( ret )
212 goto svc_out;
214 ret = -EINVAL;
215 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
216 goto svc_out;
218 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
219 {
220 ret = vcpu_reset(v);
221 goto svc_out;
222 }
224 #ifdef CONFIG_COMPAT
225 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
226 < sizeof(struct compat_vcpu_guest_context));
227 #endif
228 ret = -ENOMEM;
229 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
230 goto svc_out;
232 if ( !IS_COMPAT(v->domain) )
233 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
234 #ifdef CONFIG_COMPAT
235 else
236 ret = copy_from_guest(c.cmp,
237 guest_handle_cast(op->u.vcpucontext.ctxt,
238 void), 1);
239 #endif
240 ret = ret ? -EFAULT : 0;
242 if ( ret == 0 )
243 {
244 domain_pause(d);
245 ret = arch_set_info_guest(v, c);
246 domain_unpause(d);
247 }
249 svc_out:
250 xfree(c.nat);
251 rcu_unlock_domain(d);
252 }
253 break;
255 case XEN_DOMCTL_pausedomain:
256 {
257 struct domain *d = rcu_lock_domain_by_id(op->domain);
258 ret = -ESRCH;
259 if ( d != NULL )
260 {
261 ret = xsm_pausedomain(d);
262 if ( ret )
263 goto pausedomain_out;
265 ret = -EINVAL;
266 if ( d != current->domain )
267 {
268 domain_pause_by_systemcontroller(d);
269 ret = 0;
270 }
271 pausedomain_out:
272 rcu_unlock_domain(d);
273 }
274 }
275 break;
277 case XEN_DOMCTL_unpausedomain:
278 {
279 struct domain *d = rcu_lock_domain_by_id(op->domain);
281 ret = -ESRCH;
282 if ( d == NULL )
283 break;
285 ret = xsm_unpausedomain(d);
286 if ( ret )
287 {
288 rcu_unlock_domain(d);
289 break;
290 }
292 domain_unpause_by_systemcontroller(d);
293 rcu_unlock_domain(d);
294 ret = 0;
295 }
296 break;
298 case XEN_DOMCTL_resumedomain:
299 {
300 struct domain *d = rcu_lock_domain_by_id(op->domain);
302 ret = -ESRCH;
303 if ( d == NULL )
304 break;
306 ret = xsm_resumedomain(d);
307 if ( ret )
308 {
309 rcu_unlock_domain(d);
310 break;
311 }
313 domain_resume(d);
314 rcu_unlock_domain(d);
315 ret = 0;
316 }
317 break;
319 case XEN_DOMCTL_createdomain:
320 {
321 struct domain *d;
322 domid_t dom;
323 static domid_t rover = 0;
324 unsigned int domcr_flags;
326 ret = -EINVAL;
327 if ( supervisor_mode_kernel ||
328 (op->u.createdomain.flags & ~XEN_DOMCTL_CDF_hvm_guest) )
329 break;
331 dom = op->domain;
332 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
333 {
334 ret = -EINVAL;
335 if ( !is_free_domid(dom) )
336 break;
337 }
338 else
339 {
340 for ( dom = rover + 1; dom != rover; dom++ )
341 {
342 if ( dom == DOMID_FIRST_RESERVED )
343 dom = 0;
344 if ( is_free_domid(dom) )
345 break;
346 }
348 ret = -ENOMEM;
349 if ( dom == rover )
350 break;
352 rover = dom;
353 }
355 domcr_flags = 0;
356 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
357 domcr_flags |= DOMCRF_hvm;
359 ret = -ENOMEM;
360 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
361 if ( d == NULL )
362 break;
364 ret = 0;
366 memcpy(d->handle, op->u.createdomain.handle,
367 sizeof(xen_domain_handle_t));
369 op->domain = d->domain_id;
370 if ( copy_to_guest(u_domctl, op, 1) )
371 ret = -EFAULT;
372 }
373 break;
375 case XEN_DOMCTL_max_vcpus:
376 {
377 struct domain *d;
378 unsigned int i, max = op->u.max_vcpus.max, cpu;
380 ret = -EINVAL;
381 if ( max > MAX_VIRT_CPUS )
382 break;
384 ret = -ESRCH;
385 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
386 break;
388 ret = xsm_max_vcpus(d);
389 if ( ret )
390 {
391 rcu_unlock_domain(d);
392 break;
393 }
395 /* Needed, for example, to ensure writable p.t. state is synced. */
396 domain_pause(d);
398 /* We cannot reduce maximum VCPUs. */
399 ret = -EINVAL;
400 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
401 goto maxvcpu_out;
403 ret = -ENOMEM;
404 for ( i = 0; i < max; i++ )
405 {
406 if ( d->vcpu[i] != NULL )
407 continue;
409 cpu = (i == 0) ?
410 default_vcpu0_location() :
411 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
413 if ( alloc_vcpu(d, i, cpu) == NULL )
414 goto maxvcpu_out;
415 }
417 ret = 0;
419 maxvcpu_out:
420 domain_unpause(d);
421 rcu_unlock_domain(d);
422 }
423 break;
425 case XEN_DOMCTL_destroydomain:
426 {
427 struct domain *d = rcu_lock_domain_by_id(op->domain);
428 ret = -ESRCH;
429 if ( d != NULL )
430 {
431 ret = xsm_destroydomain(d) ? : domain_kill(d);
432 rcu_unlock_domain(d);
433 }
434 }
435 break;
437 case XEN_DOMCTL_setvcpuaffinity:
438 case XEN_DOMCTL_getvcpuaffinity:
439 {
440 domid_t dom = op->domain;
441 struct domain *d = rcu_lock_domain_by_id(dom);
442 struct vcpu *v;
443 cpumask_t new_affinity;
445 ret = -ESRCH;
446 if ( d == NULL )
447 break;
449 ret = xsm_vcpuaffinity(op->cmd, d);
450 if ( ret )
451 goto vcpuaffinity_out;
453 ret = -EINVAL;
454 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
455 goto vcpuaffinity_out;
457 ret = -ESRCH;
458 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
459 goto vcpuaffinity_out;
461 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
462 {
463 xenctl_cpumap_to_cpumask(
464 &new_affinity, &op->u.vcpuaffinity.cpumap);
465 ret = vcpu_set_affinity(v, &new_affinity);
466 }
467 else
468 {
469 cpumask_to_xenctl_cpumap(
470 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
471 ret = 0;
472 }
474 vcpuaffinity_out:
475 rcu_unlock_domain(d);
476 }
477 break;
479 case XEN_DOMCTL_scheduler_op:
480 {
481 struct domain *d;
483 ret = -ESRCH;
484 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
485 break;
487 ret = xsm_scheduler(d);
488 if ( ret )
489 goto scheduler_op_out;
491 ret = sched_adjust(d, &op->u.scheduler_op);
492 if ( copy_to_guest(u_domctl, op, 1) )
493 ret = -EFAULT;
495 scheduler_op_out:
496 rcu_unlock_domain(d);
497 }
498 break;
500 case XEN_DOMCTL_getdomaininfo:
501 {
502 struct domain *d;
503 domid_t dom = op->domain;
505 rcu_read_lock(&domlist_read_lock);
507 for_each_domain ( d )
508 if ( d->domain_id >= dom )
509 break;
511 if ( d == NULL )
512 {
513 rcu_read_unlock(&domlist_read_lock);
514 ret = -ESRCH;
515 break;
516 }
518 ret = xsm_getdomaininfo(d);
519 if ( ret )
520 goto getdomaininfo_out;
522 getdomaininfo(d, &op->u.getdomaininfo);
524 op->domain = op->u.getdomaininfo.domain;
525 if ( copy_to_guest(u_domctl, op, 1) )
526 ret = -EFAULT;
528 getdomaininfo_out:
529 rcu_read_unlock(&domlist_read_lock);
530 }
531 break;
533 case XEN_DOMCTL_getvcpucontext:
534 {
535 vcpu_guest_context_u c = { .nat = NULL };
536 struct domain *d;
537 struct vcpu *v;
539 ret = -ESRCH;
540 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
541 break;
543 ret = xsm_getvcpucontext(d);
544 if ( ret )
545 goto getvcpucontext_out;
547 ret = -EINVAL;
548 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
549 goto getvcpucontext_out;
551 ret = -ESRCH;
552 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
553 goto getvcpucontext_out;
555 ret = -ENODATA;
556 if ( !v->is_initialised )
557 goto getvcpucontext_out;
559 #ifdef CONFIG_COMPAT
560 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
561 < sizeof(struct compat_vcpu_guest_context));
562 #endif
563 ret = -ENOMEM;
564 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
565 goto getvcpucontext_out;
567 if ( v != current )
568 vcpu_pause(v);
570 arch_get_info_guest(v, c);
571 ret = 0;
573 if ( v != current )
574 vcpu_unpause(v);
576 if ( !IS_COMPAT(v->domain) )
577 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
578 #ifdef CONFIG_COMPAT
579 else
580 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
581 void), c.cmp, 1);
582 #endif
584 if ( copy_to_guest(u_domctl, op, 1) || ret )
585 ret = -EFAULT;
587 getvcpucontext_out:
588 xfree(c.nat);
589 rcu_unlock_domain(d);
590 }
591 break;
593 case XEN_DOMCTL_getvcpuinfo:
594 {
595 struct domain *d;
596 struct vcpu *v;
597 struct vcpu_runstate_info runstate;
599 ret = -ESRCH;
600 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
601 break;
603 ret = xsm_getvcpuinfo(d);
604 if ( ret )
605 goto getvcpuinfo_out;
607 ret = -EINVAL;
608 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
609 goto getvcpuinfo_out;
611 ret = -ESRCH;
612 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
613 goto getvcpuinfo_out;
615 vcpu_runstate_get(v, &runstate);
617 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
618 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
619 op->u.getvcpuinfo.running = v->is_running;
620 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
621 op->u.getvcpuinfo.cpu = v->processor;
622 ret = 0;
624 if ( copy_to_guest(u_domctl, op, 1) )
625 ret = -EFAULT;
627 getvcpuinfo_out:
628 rcu_unlock_domain(d);
629 }
630 break;
632 case XEN_DOMCTL_max_mem:
633 {
634 struct domain *d;
635 unsigned long new_max;
637 ret = -ESRCH;
638 d = rcu_lock_domain_by_id(op->domain);
639 if ( d == NULL )
640 break;
642 ret = xsm_setdomainmaxmem(d);
643 if ( ret )
644 goto max_mem_out;
646 ret = -EINVAL;
647 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
649 spin_lock(&d->page_alloc_lock);
650 if ( new_max >= d->tot_pages )
651 {
652 ret = guest_physmap_max_mem_pages(d, new_max);
653 if ( ret != 0 )
654 break;
655 d->max_pages = new_max;
656 ret = 0;
657 }
658 spin_unlock(&d->page_alloc_lock);
660 max_mem_out:
661 rcu_unlock_domain(d);
662 }
663 break;
665 case XEN_DOMCTL_setdomainhandle:
666 {
667 struct domain *d;
669 ret = -ESRCH;
670 d = rcu_lock_domain_by_id(op->domain);
671 if ( d == NULL )
672 break;
674 ret = xsm_setdomainhandle(d);
675 if ( ret )
676 {
677 rcu_unlock_domain(d);
678 break;
679 }
681 memcpy(d->handle, op->u.setdomainhandle.handle,
682 sizeof(xen_domain_handle_t));
683 rcu_unlock_domain(d);
684 ret = 0;
685 }
686 break;
688 case XEN_DOMCTL_setdebugging:
689 {
690 struct domain *d;
692 ret = -ESRCH;
693 d = rcu_lock_domain_by_id(op->domain);
694 if ( d == NULL )
695 break;
697 ret = xsm_setdebugging(d);
698 if ( ret )
699 {
700 rcu_unlock_domain(d);
701 break;
702 }
704 domain_pause(d);
705 d->debugger_attached = !!op->u.setdebugging.enable;
706 domain_unpause(d); /* causes guest to latch new status */
707 rcu_unlock_domain(d);
708 ret = 0;
709 }
710 break;
712 case XEN_DOMCTL_irq_permission:
713 {
714 struct domain *d;
715 unsigned int pirq = op->u.irq_permission.pirq;
717 ret = -EINVAL;
718 if ( pirq >= NR_IRQS )
719 break;
721 ret = -ESRCH;
722 d = rcu_lock_domain_by_id(op->domain);
723 if ( d == NULL )
724 break;
726 ret = xsm_irq_permission(d, pirq, op->u.irq_permission.allow_access);
727 if ( ret )
728 goto irq_permission_out;
730 if ( op->u.irq_permission.allow_access )
731 ret = irq_permit_access(d, pirq);
732 else
733 ret = irq_deny_access(d, pirq);
735 irq_permission_out:
736 rcu_unlock_domain(d);
737 }
738 break;
740 case XEN_DOMCTL_iomem_permission:
741 {
742 struct domain *d;
743 unsigned long mfn = op->u.iomem_permission.first_mfn;
744 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
746 ret = -EINVAL;
747 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
748 break;
750 ret = -ESRCH;
751 d = rcu_lock_domain_by_id(op->domain);
752 if ( d == NULL )
753 break;
755 ret = xsm_iomem_permission(d, mfn, op->u.iomem_permission.allow_access);
756 if ( ret )
757 goto iomem_permission_out;
759 if ( op->u.iomem_permission.allow_access )
760 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
761 else
762 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
764 iomem_permission_out:
765 rcu_unlock_domain(d);
766 }
767 break;
769 case XEN_DOMCTL_settimeoffset:
770 {
771 struct domain *d;
773 ret = -ESRCH;
774 d = rcu_lock_domain_by_id(op->domain);
775 if ( d != NULL )
776 {
777 ret = xsm_domain_settime(d);
778 if ( ret )
779 {
780 rcu_unlock_domain(d);
781 break;
782 }
784 d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
785 rcu_unlock_domain(d);
786 ret = 0;
787 }
788 }
789 break;
791 default:
792 ret = arch_do_domctl(op, u_domctl);
793 break;
794 }
796 spin_unlock(&domctl_lock);
798 return ret;
799 }
801 /*
802 * Local variables:
803 * mode: C
804 * c-set-style: "BSD"
805 * c-basic-offset: 4
806 * tab-width: 4
807 * indent-tabs-mode: nil
808 * End:
809 */