debuggers.hg

view xen/common/domain.c @ 19713:ae5bd69227d1

Free pirq_array/pirq_to_evtchn in complete_domain_destroy().

Also rejig code slightly in domain_create().

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri May 29 09:26:49 2009 +0100 (2009-05-29)
parents 527b628b8e83
children 50134a902c66
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/compat.h>
9 #include <xen/init.h>
10 #include <xen/lib.h>
11 #include <xen/ctype.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/mm.h>
16 #include <xen/event.h>
17 #include <xen/time.h>
18 #include <xen/console.h>
19 #include <xen/softirq.h>
20 #include <xen/domain_page.h>
21 #include <xen/rangeset.h>
22 #include <xen/guest_access.h>
23 #include <xen/hypercall.h>
24 #include <xen/delay.h>
25 #include <xen/shutdown.h>
26 #include <xen/percpu.h>
27 #include <xen/multicall.h>
28 #include <xen/rcupdate.h>
29 #include <acpi/cpufreq/cpufreq.h>
30 #include <asm/debugger.h>
31 #include <public/sched.h>
32 #include <public/vcpu.h>
33 #include <xsm/xsm.h>
34 #include <xen/trace.h>
35 #include <xen/tmem.h>
37 /* Linux config option: propageted to domain0 */
38 /* xen_processor_pmbits: xen control Cx, Px, ... */
39 unsigned int xen_processor_pmbits = XEN_PROCESSOR_PM_PX;
41 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
42 static unsigned int opt_dom0_vcpus_pin;
43 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
45 /* set xen as default cpufreq */
46 enum cpufreq_controller cpufreq_controller = FREQCTL_xen;
48 static void __init setup_cpufreq_option(char *str)
49 {
50 char *arg;
52 if ( !strcmp(str, "dom0-kernel") )
53 {
54 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
55 cpufreq_controller = FREQCTL_dom0_kernel;
56 opt_dom0_vcpus_pin = 1;
57 return;
58 }
60 if ( !strcmp(str, "none") )
61 {
62 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
63 cpufreq_controller = FREQCTL_none;
64 return;
65 }
67 if ( (arg = strpbrk(str, ",:")) != NULL )
68 *arg++ = '\0';
70 if ( !strcmp(str, "xen") )
71 if ( arg && *arg )
72 cpufreq_cmdline_parse(arg);
73 }
74 custom_param("cpufreq", setup_cpufreq_option);
76 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
77 DEFINE_SPINLOCK(domlist_update_lock);
78 DEFINE_RCU_READ_LOCK(domlist_read_lock);
80 #define DOMAIN_HASH_SIZE 256
81 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
82 static struct domain *domain_hash[DOMAIN_HASH_SIZE];
83 struct domain *domain_list;
85 struct domain *dom0;
87 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
89 int current_domain_id(void)
90 {
91 return current->domain->domain_id;
92 }
94 static void __domain_finalise_shutdown(struct domain *d)
95 {
96 struct vcpu *v;
98 BUG_ON(!spin_is_locked(&d->shutdown_lock));
100 if ( d->is_shut_down )
101 return;
103 for_each_vcpu ( d, v )
104 if ( !v->paused_for_shutdown )
105 return;
107 d->is_shut_down = 1;
108 if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
109 evtchn_send(d, d->suspend_evtchn);
110 else
111 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
112 }
114 static void vcpu_check_shutdown(struct vcpu *v)
115 {
116 struct domain *d = v->domain;
118 spin_lock(&d->shutdown_lock);
120 if ( d->is_shutting_down )
121 {
122 if ( !v->paused_for_shutdown )
123 vcpu_pause_nosync(v);
124 v->paused_for_shutdown = 1;
125 v->defer_shutdown = 0;
126 __domain_finalise_shutdown(d);
127 }
129 spin_unlock(&d->shutdown_lock);
130 }
132 struct vcpu *alloc_vcpu(
133 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
134 {
135 struct vcpu *v;
137 BUG_ON(d->vcpu[vcpu_id] != NULL);
139 if ( (v = alloc_vcpu_struct()) == NULL )
140 return NULL;
142 v->domain = d;
143 v->vcpu_id = vcpu_id;
145 spin_lock_init(&v->virq_lock);
147 if ( is_idle_domain(d) )
148 {
149 v->runstate.state = RUNSTATE_running;
150 }
151 else
152 {
153 v->runstate.state = RUNSTATE_offline;
154 v->runstate.state_entry_time = NOW();
155 set_bit(_VPF_down, &v->pause_flags);
156 v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
157 }
159 if ( sched_init_vcpu(v, cpu_id) != 0 )
160 {
161 free_vcpu_struct(v);
162 return NULL;
163 }
165 if ( vcpu_initialise(v) != 0 )
166 {
167 sched_destroy_vcpu(v);
168 free_vcpu_struct(v);
169 return NULL;
170 }
172 d->vcpu[vcpu_id] = v;
173 if ( vcpu_id != 0 )
174 d->vcpu[v->vcpu_id-1]->next_in_list = v;
176 /* Must be called after making new vcpu visible to for_each_vcpu(). */
177 vcpu_check_shutdown(v);
179 return v;
180 }
182 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
183 {
184 struct domain *d;
185 struct vcpu *v;
186 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
188 if ( (v = idle_vcpu[cpu_id]) != NULL )
189 return v;
191 d = (vcpu_id == 0) ?
192 domain_create(IDLE_DOMAIN_ID, 0, 0) :
193 idle_vcpu[cpu_id - vcpu_id]->domain;
194 BUG_ON(d == NULL);
196 v = alloc_vcpu(d, vcpu_id, cpu_id);
197 idle_vcpu[cpu_id] = v;
199 return v;
200 }
202 static unsigned int extra_dom0_irqs, extra_domU_irqs = 8;
203 static void __init parse_extra_guest_irqs(const char *s)
204 {
205 if ( isdigit(*s) )
206 extra_domU_irqs = simple_strtoul(s, &s, 0);
207 if ( *s == ',' && isdigit(*++s) )
208 extra_dom0_irqs = simple_strtoul(s, &s, 0);
209 }
210 custom_param("extra_guest_irqs", parse_extra_guest_irqs);
212 struct domain *domain_create(
213 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref)
214 {
215 struct domain *d, **pd;
216 enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2,
217 INIT_gnttab = 1u<<3, INIT_arch = 1u<<4 };
218 int init_status = 0;
220 if ( (d = alloc_domain_struct()) == NULL )
221 return NULL;
223 memset(d, 0, sizeof(*d));
224 d->domain_id = domid;
226 if ( xsm_alloc_security_domain(d) != 0 )
227 goto fail;
228 init_status |= INIT_xsm;
230 atomic_set(&d->refcnt, 1);
231 spin_lock_init(&d->domain_lock);
232 spin_lock_init(&d->page_alloc_lock);
233 spin_lock_init(&d->shutdown_lock);
234 spin_lock_init(&d->hypercall_deadlock_mutex);
235 INIT_PAGE_LIST_HEAD(&d->page_list);
236 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
238 if ( domcr_flags & DOMCRF_hvm )
239 d->is_hvm = 1;
241 if ( (domid == 0) && opt_dom0_vcpus_pin )
242 d->is_pinned = 1;
244 if ( domcr_flags & DOMCRF_dummy )
245 return d;
247 rangeset_domain_initialise(d);
248 init_status |= INIT_rangeset;
250 if ( !is_idle_domain(d) )
251 {
252 if ( xsm_domain_create(d, ssidref) != 0 )
253 goto fail;
255 d->is_paused_by_controller = 1;
256 atomic_inc(&d->pause_count);
258 d->nr_pirqs = (nr_irqs +
259 (domid ? extra_domU_irqs :
260 extra_dom0_irqs ?: nr_irqs));
261 d->pirq_to_evtchn = xmalloc_array(u16, d->nr_pirqs);
262 d->pirq_mask = xmalloc_array(
263 unsigned long, BITS_TO_LONGS(d->nr_pirqs));
264 if ( (d->pirq_to_evtchn == NULL) || (d->pirq_mask == NULL) )
265 goto fail;
266 memset(d->pirq_to_evtchn, 0, d->nr_pirqs * sizeof(*d->pirq_to_evtchn));
267 bitmap_zero(d->pirq_mask, d->nr_pirqs);
269 if ( evtchn_init(d) != 0 )
270 goto fail;
271 init_status |= INIT_evtchn;
273 if ( grant_table_create(d) != 0 )
274 goto fail;
275 init_status |= INIT_gnttab;
276 }
278 if ( arch_domain_create(d, domcr_flags) != 0 )
279 goto fail;
280 init_status |= INIT_arch;
282 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
283 d->irq_caps = rangeset_new(d, "Interrupts", 0);
284 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
285 goto fail;
287 if ( sched_init_domain(d) != 0 )
288 goto fail;
290 if ( !is_idle_domain(d) )
291 {
292 spin_lock(&domlist_update_lock);
293 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
294 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
295 if ( (*pd)->domain_id > d->domain_id )
296 break;
297 d->next_in_list = *pd;
298 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
299 rcu_assign_pointer(*pd, d);
300 rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
301 spin_unlock(&domlist_update_lock);
302 }
304 return d;
306 fail:
307 d->is_dying = DOMDYING_dead;
308 atomic_set(&d->refcnt, DOMAIN_DESTROYED);
309 if ( init_status & INIT_arch )
310 arch_domain_destroy(d);
311 if ( init_status & INIT_gnttab )
312 grant_table_destroy(d);
313 if ( init_status & INIT_evtchn )
314 evtchn_destroy(d);
315 if ( init_status & INIT_rangeset )
316 rangeset_domain_destroy(d);
317 if ( init_status & INIT_xsm )
318 xsm_free_security_domain(d);
319 xfree(d->pirq_mask);
320 xfree(d->pirq_to_evtchn);
321 free_domain_struct(d);
322 return NULL;
323 }
326 struct domain *get_domain_by_id(domid_t dom)
327 {
328 struct domain *d;
330 rcu_read_lock(&domlist_read_lock);
332 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
333 d != NULL;
334 d = rcu_dereference(d->next_in_hashbucket) )
335 {
336 if ( d->domain_id == dom )
337 {
338 if ( unlikely(!get_domain(d)) )
339 d = NULL;
340 break;
341 }
342 }
344 rcu_read_unlock(&domlist_read_lock);
346 return d;
347 }
350 struct domain *rcu_lock_domain_by_id(domid_t dom)
351 {
352 struct domain *d;
354 rcu_read_lock(&domlist_read_lock);
356 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
357 d != NULL;
358 d = rcu_dereference(d->next_in_hashbucket) )
359 {
360 if ( d->domain_id == dom )
361 return d;
362 }
364 rcu_read_unlock(&domlist_read_lock);
366 return NULL;
367 }
369 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d)
370 {
371 if ( dom == DOMID_SELF )
372 {
373 *d = rcu_lock_current_domain();
374 return 0;
375 }
377 if ( (*d = rcu_lock_domain_by_id(dom)) == NULL )
378 return -ESRCH;
380 if ( !IS_PRIV_FOR(current->domain, *d) )
381 {
382 rcu_unlock_domain(*d);
383 return -EPERM;
384 }
386 return 0;
387 }
389 int domain_kill(struct domain *d)
390 {
391 int rc = 0;
393 if ( d == current->domain )
394 return -EINVAL;
396 /* Protected by domctl_lock. */
397 switch ( d->is_dying )
398 {
399 case DOMDYING_alive:
400 domain_pause(d);
401 d->is_dying = DOMDYING_dying;
402 spin_barrier(&d->domain_lock);
403 evtchn_destroy(d);
404 gnttab_release_mappings(d);
405 /* fallthrough */
406 case DOMDYING_dying:
407 rc = domain_relinquish_resources(d);
408 page_scrub_kick();
409 if ( rc != 0 )
410 {
411 BUG_ON(rc != -EAGAIN);
412 break;
413 }
414 d->is_dying = DOMDYING_dead;
415 put_domain(d);
416 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
417 /* fallthrough */
418 case DOMDYING_dead:
419 break;
420 }
422 return rc;
423 }
426 void __domain_crash(struct domain *d)
427 {
428 if ( d->is_shutting_down )
429 {
430 /* Print nothing: the domain is already shutting down. */
431 }
432 else if ( d == current->domain )
433 {
434 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
435 d->domain_id, current->vcpu_id, smp_processor_id());
436 show_execution_state(guest_cpu_user_regs());
437 }
438 else
439 {
440 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
441 d->domain_id, current->domain->domain_id, smp_processor_id());
442 }
444 domain_shutdown(d, SHUTDOWN_crash);
445 }
448 void __domain_crash_synchronous(void)
449 {
450 __domain_crash(current->domain);
452 /*
453 * Flush multicall state before dying if a multicall is in progress.
454 * This shouldn't be necessary, but some architectures are calling
455 * domain_crash_synchronous() when they really shouldn't (i.e., from
456 * within hypercall context).
457 */
458 if ( this_cpu(mc_state).flags != 0 )
459 {
460 dprintk(XENLOG_ERR,
461 "FIXME: synchronous domain crash during a multicall!\n");
462 this_cpu(mc_state).flags = 0;
463 }
465 vcpu_end_shutdown_deferral(current);
467 for ( ; ; )
468 do_softirq();
469 }
472 void domain_shutdown(struct domain *d, u8 reason)
473 {
474 struct vcpu *v;
476 if ( d->domain_id == 0 )
477 dom0_shutdown(reason);
479 spin_lock(&d->shutdown_lock);
481 if ( d->is_shutting_down )
482 {
483 spin_unlock(&d->shutdown_lock);
484 return;
485 }
487 d->is_shutting_down = 1;
488 d->shutdown_code = reason;
490 smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
492 for_each_vcpu ( d, v )
493 {
494 if ( reason == SHUTDOWN_crash )
495 v->defer_shutdown = 0;
496 else if ( v->defer_shutdown )
497 continue;
498 vcpu_pause_nosync(v);
499 v->paused_for_shutdown = 1;
500 }
502 __domain_finalise_shutdown(d);
504 spin_unlock(&d->shutdown_lock);
505 }
507 void domain_resume(struct domain *d)
508 {
509 struct vcpu *v;
511 /*
512 * Some code paths assume that shutdown status does not get reset under
513 * their feet (e.g., some assertions make this assumption).
514 */
515 domain_pause(d);
517 spin_lock(&d->shutdown_lock);
519 d->is_shutting_down = d->is_shut_down = 0;
521 for_each_vcpu ( d, v )
522 {
523 if ( v->paused_for_shutdown )
524 vcpu_unpause(v);
525 v->paused_for_shutdown = 0;
526 }
528 spin_unlock(&d->shutdown_lock);
530 domain_unpause(d);
531 }
533 int vcpu_start_shutdown_deferral(struct vcpu *v)
534 {
535 if ( v->defer_shutdown )
536 return 1;
538 v->defer_shutdown = 1;
539 smp_mb(); /* set deferral status /then/ check for shutdown */
540 if ( unlikely(v->domain->is_shutting_down) )
541 vcpu_check_shutdown(v);
543 return v->defer_shutdown;
544 }
546 void vcpu_end_shutdown_deferral(struct vcpu *v)
547 {
548 v->defer_shutdown = 0;
549 smp_mb(); /* clear deferral status /then/ check for shutdown */
550 if ( unlikely(v->domain->is_shutting_down) )
551 vcpu_check_shutdown(v);
552 }
554 void domain_pause_for_debugger(void)
555 {
556 struct domain *d = current->domain;
557 struct vcpu *v;
559 atomic_inc(&d->pause_count);
560 if ( test_and_set_bool(d->is_paused_by_controller) )
561 domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */
563 for_each_vcpu ( d, v )
564 vcpu_sleep_nosync(v);
566 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
567 }
569 /* Complete domain destroy after RCU readers are not holding old references. */
570 static void complete_domain_destroy(struct rcu_head *head)
571 {
572 struct domain *d = container_of(head, struct domain, rcu);
573 struct vcpu *v;
574 int i;
576 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
577 {
578 if ( (v = d->vcpu[i]) == NULL )
579 continue;
580 vcpu_destroy(v);
581 sched_destroy_vcpu(v);
582 }
584 grant_table_destroy(d);
586 if ( d->tmem != NULL )
587 tmem_destroy(d->tmem);
589 arch_domain_destroy(d);
591 rangeset_domain_destroy(d);
593 sched_destroy_domain(d);
595 /* Free page used by xen oprofile buffer. */
596 free_xenoprof_pages(d);
598 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
599 if ( (v = d->vcpu[i]) != NULL )
600 free_vcpu_struct(v);
602 if ( d->target != NULL )
603 put_domain(d->target);
605 xfree(d->pirq_mask);
606 xfree(d->pirq_to_evtchn);
608 xsm_free_security_domain(d);
609 free_domain_struct(d);
611 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
612 }
614 /* Release resources belonging to task @p. */
615 void domain_destroy(struct domain *d)
616 {
617 struct domain **pd;
618 atomic_t old, new;
620 BUG_ON(!d->is_dying);
622 /* May be already destroyed, or get_domain() can race us. */
623 _atomic_set(old, 0);
624 _atomic_set(new, DOMAIN_DESTROYED);
625 old = atomic_compareandswap(old, new, &d->refcnt);
626 if ( _atomic_read(old) != 0 )
627 return;
629 /* Delete from task list and task hashtable. */
630 TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
631 spin_lock(&domlist_update_lock);
632 pd = &domain_list;
633 while ( *pd != d )
634 pd = &(*pd)->next_in_list;
635 rcu_assign_pointer(*pd, d->next_in_list);
636 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
637 while ( *pd != d )
638 pd = &(*pd)->next_in_hashbucket;
639 rcu_assign_pointer(*pd, d->next_in_hashbucket);
640 spin_unlock(&domlist_update_lock);
642 /* Schedule RCU asynchronous completion of domain destroy. */
643 call_rcu(&d->rcu, complete_domain_destroy);
644 }
646 void vcpu_pause(struct vcpu *v)
647 {
648 ASSERT(v != current);
649 atomic_inc(&v->pause_count);
650 vcpu_sleep_sync(v);
651 }
653 void vcpu_pause_nosync(struct vcpu *v)
654 {
655 atomic_inc(&v->pause_count);
656 vcpu_sleep_nosync(v);
657 }
659 void vcpu_unpause(struct vcpu *v)
660 {
661 if ( atomic_dec_and_test(&v->pause_count) )
662 vcpu_wake(v);
663 }
665 void domain_pause(struct domain *d)
666 {
667 struct vcpu *v;
669 ASSERT(d != current->domain);
671 atomic_inc(&d->pause_count);
673 for_each_vcpu( d, v )
674 vcpu_sleep_sync(v);
675 }
677 void domain_unpause(struct domain *d)
678 {
679 struct vcpu *v;
681 if ( atomic_dec_and_test(&d->pause_count) )
682 for_each_vcpu( d, v )
683 vcpu_wake(v);
684 }
686 void domain_pause_by_systemcontroller(struct domain *d)
687 {
688 domain_pause(d);
689 if ( test_and_set_bool(d->is_paused_by_controller) )
690 domain_unpause(d);
691 }
693 void domain_unpause_by_systemcontroller(struct domain *d)
694 {
695 if ( test_and_clear_bool(d->is_paused_by_controller) )
696 domain_unpause(d);
697 }
699 int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
700 {
701 struct vcpu *v = d->vcpu[vcpuid];
703 BUG_ON(v->is_initialised);
705 return arch_set_info_guest(v, ctxt);
706 }
708 void vcpu_reset(struct vcpu *v)
709 {
710 struct domain *d = v->domain;
712 vcpu_pause(v);
713 domain_lock(d);
715 arch_vcpu_reset(v);
717 set_bit(_VPF_down, &v->pause_flags);
719 clear_bit(v->vcpu_id, d->poll_mask);
720 v->poll_evtchn = 0;
722 v->fpu_initialised = 0;
723 v->fpu_dirtied = 0;
724 v->is_initialised = 0;
725 v->nmi_pending = 0;
726 v->mce_pending = 0;
727 v->old_trap_priority = VCPU_TRAP_NONE;
728 v->trap_priority = VCPU_TRAP_NONE;
729 clear_bit(_VPF_blocked, &v->pause_flags);
731 domain_unlock(v->domain);
732 vcpu_unpause(v);
733 }
736 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
737 {
738 struct domain *d = current->domain;
739 struct vcpu *v;
740 struct vcpu_guest_context *ctxt;
741 long rc = 0;
743 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
744 return -EINVAL;
746 if ( (v = d->vcpu[vcpuid]) == NULL )
747 return -ENOENT;
749 switch ( cmd )
750 {
751 case VCPUOP_initialise:
752 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
753 return -ENOMEM;
755 if ( copy_from_guest(ctxt, arg, 1) )
756 {
757 xfree(ctxt);
758 return -EFAULT;
759 }
761 domain_lock(d);
762 rc = -EEXIST;
763 if ( !v->is_initialised )
764 rc = boot_vcpu(d, vcpuid, ctxt);
765 domain_unlock(d);
767 xfree(ctxt);
768 break;
770 case VCPUOP_up:
771 if ( !v->is_initialised )
772 return -EINVAL;
774 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
775 vcpu_wake(v);
777 break;
779 case VCPUOP_down:
780 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
781 vcpu_sleep_nosync(v);
782 break;
784 case VCPUOP_is_up:
785 rc = !test_bit(_VPF_down, &v->pause_flags);
786 break;
788 case VCPUOP_get_runstate_info:
789 {
790 struct vcpu_runstate_info runstate;
791 vcpu_runstate_get(v, &runstate);
792 if ( copy_to_guest(arg, &runstate, 1) )
793 rc = -EFAULT;
794 break;
795 }
797 case VCPUOP_set_periodic_timer:
798 {
799 struct vcpu_set_periodic_timer set;
801 if ( copy_from_guest(&set, arg, 1) )
802 return -EFAULT;
804 if ( set.period_ns < MILLISECS(1) )
805 return -EINVAL;
807 v->periodic_period = set.period_ns;
808 vcpu_force_reschedule(v);
810 break;
811 }
813 case VCPUOP_stop_periodic_timer:
814 v->periodic_period = 0;
815 vcpu_force_reschedule(v);
816 break;
818 case VCPUOP_set_singleshot_timer:
819 {
820 struct vcpu_set_singleshot_timer set;
822 if ( v != current )
823 return -EINVAL;
825 if ( copy_from_guest(&set, arg, 1) )
826 return -EFAULT;
828 if ( (set.flags & VCPU_SSHOTTMR_future) &&
829 (set.timeout_abs_ns < NOW()) )
830 return -ETIME;
832 if ( v->singleshot_timer.cpu != smp_processor_id() )
833 {
834 stop_timer(&v->singleshot_timer);
835 v->singleshot_timer.cpu = smp_processor_id();
836 }
838 set_timer(&v->singleshot_timer, set.timeout_abs_ns);
840 break;
841 }
843 case VCPUOP_stop_singleshot_timer:
844 if ( v != current )
845 return -EINVAL;
847 stop_timer(&v->singleshot_timer);
849 break;
851 case VCPUOP_send_nmi:
852 if ( !guest_handle_is_null(arg) )
853 return -EINVAL;
855 if ( !test_and_set_bool(v->nmi_pending) )
856 vcpu_kick(v);
858 break;
860 default:
861 rc = arch_do_vcpu_op(cmd, v, arg);
862 break;
863 }
865 return rc;
866 }
868 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
869 {
870 if ( type > MAX_VMASST_TYPE )
871 return -EINVAL;
873 switch ( cmd )
874 {
875 case VMASST_CMD_enable:
876 set_bit(type, &p->vm_assist);
877 return 0;
878 case VMASST_CMD_disable:
879 clear_bit(type, &p->vm_assist);
880 return 0;
881 }
883 return -ENOSYS;
884 }
886 /*
887 * Local variables:
888 * mode: C
889 * c-set-style: "BSD"
890 * c-basic-offset: 4
891 * tab-width: 4
892 * indent-tabs-mode: nil
893 * End:
894 */