debuggers.hg
changeset 10679:462d6e4cb29a
[XEN] Separate domain creation from vcpu creation.
Creating a domain no longer creates vcpu0 -- that is now
done later.
Signed-off-by: Keir Fraser <keir@xensource.com>
Creating a domain no longer creates vcpu0 -- that is now
done later.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Jul 05 14:27:27 2006 +0100 (2006-07-05) |
parents | 222b492cc063 |
children | 4db818a7dc3f |
files | xen/arch/ia64/xen/xensetup.c xen/arch/x86/domain.c xen/arch/x86/setup.c xen/arch/x86/traps.c xen/common/dom0_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/sched_sedf.c xen/drivers/char/console.c xen/include/xen/sched.h |
line diff
1.1 --- a/xen/arch/ia64/xen/xensetup.c Wed Jul 05 11:31:33 2006 +0100 1.2 +++ b/xen/arch/ia64/xen/xensetup.c Wed Jul 05 14:27:27 2006 +0100 1.3 @@ -425,8 +425,9 @@ void start_kernel(void) 1.4 1.5 scheduler_init(); 1.6 idle_vcpu[0] = (struct vcpu*) ia64_r13; 1.7 - idle_domain = domain_create(IDLE_DOMAIN_ID, 0); 1.8 - BUG_ON(idle_domain == NULL); 1.9 + idle_domain = domain_create(IDLE_DOMAIN_ID); 1.10 + if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) ) 1.11 + BUG(); 1.12 1.13 late_setup_arch(&cmdline); 1.14 alloc_dom_xen_and_dom_io(); 1.15 @@ -503,9 +504,8 @@ printk("num_online_cpus=%d, max_cpus=%d\ 1.16 } 1.17 1.18 /* Create initial domain 0. */ 1.19 - dom0 = domain_create(0, 0); 1.20 - 1.21 - if ( dom0 == NULL ) 1.22 + dom0 = domain_create(0); 1.23 + if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) ) 1.24 panic("Error creating domain 0\n"); 1.25 1.26 set_bit(_DOMF_privileged, &dom0->domain_flags);
2.1 --- a/xen/arch/x86/domain.c Wed Jul 05 11:31:33 2006 +0100 2.2 +++ b/xen/arch/x86/domain.c Wed Jul 05 14:27:27 2006 +0100 2.3 @@ -951,7 +951,7 @@ void domain_relinquish_resources(struct 2.4 } 2.5 } 2.6 2.7 - if ( hvm_guest(d->vcpu[0]) ) 2.8 + if ( d->vcpu[0] && hvm_guest(d->vcpu[0]) ) 2.9 hvm_relinquish_guest_resources(d); 2.10 2.11 shadow_mode_disable(d);
3.1 --- a/xen/arch/x86/setup.c Wed Jul 05 11:31:33 2006 +0100 3.2 +++ b/xen/arch/x86/setup.c Wed Jul 05 14:27:27 2006 +0100 3.3 @@ -439,8 +439,9 @@ void __init __start_xen(multiboot_info_t 3.4 3.5 scheduler_init(); 3.6 3.7 - idle_domain = domain_create(IDLE_DOMAIN_ID, 0); 3.8 - BUG_ON(idle_domain == NULL); 3.9 + idle_domain = domain_create(IDLE_DOMAIN_ID); 3.10 + if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) ) 3.11 + BUG(); 3.12 3.13 set_current(idle_domain->vcpu[0]); 3.14 this_cpu(curr_vcpu) = idle_domain->vcpu[0]; 3.15 @@ -537,8 +538,8 @@ void __init __start_xen(multiboot_info_t 3.16 acm_init(&initrdidx, mbi, initial_images_start); 3.17 3.18 /* Create initial domain 0. */ 3.19 - dom0 = domain_create(0, 0); 3.20 - if ( dom0 == NULL ) 3.21 + dom0 = domain_create(0); 3.22 + if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) ) 3.23 panic("Error creating domain 0\n"); 3.24 3.25 set_bit(_DOMF_privileged, &dom0->domain_flags);
4.1 --- a/xen/arch/x86/traps.c Wed Jul 05 11:31:33 2006 +0100 4.2 +++ b/xen/arch/x86/traps.c Wed Jul 05 14:27:27 2006 +0100 4.3 @@ -1397,13 +1397,14 @@ static void nmi_softirq(void) 4.4 static void nmi_dom0_report(unsigned int reason_idx) 4.5 { 4.6 struct domain *d; 4.7 + struct vcpu *v; 4.8 4.9 - if ( (d = dom0) == NULL ) 4.10 + if ( ((d = dom0) == NULL) || ((v = d->vcpu[0]) == NULL) ) 4.11 return; 4.12 4.13 set_bit(reason_idx, &d->shared_info->arch.nmi_reason); 4.14 4.15 - if ( test_and_set_bit(_VCPUF_nmi_pending, &d->vcpu[0]->vcpu_flags) ) 4.16 + if ( test_and_set_bit(_VCPUF_nmi_pending, &v->vcpu_flags) ) 4.17 raise_softirq(NMI_SOFTIRQ); /* not safe to wake up a vcpu here */ 4.18 } 4.19
5.1 --- a/xen/common/dom0_ops.c Wed Jul 05 11:31:33 2006 +0100 5.2 +++ b/xen/common/dom0_ops.c Wed Jul 05 14:27:27 2006 +0100 5.3 @@ -90,6 +90,44 @@ static void getdomaininfo(struct domain 5.4 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t)); 5.5 } 5.6 5.7 +static unsigned int default_vcpu0_location(void) 5.8 +{ 5.9 + struct domain *d; 5.10 + struct vcpu *v; 5.11 + unsigned int i, cpu, cnt[NR_CPUS] = { 0 }; 5.12 + cpumask_t cpu_exclude_map; 5.13 + 5.14 + /* Do an initial CPU placement. Pick the least-populated CPU. */ 5.15 + read_lock(&domlist_lock); 5.16 + for_each_domain ( d ) 5.17 + for_each_vcpu ( d, v ) 5.18 + if ( !test_bit(_VCPUF_down, &v->vcpu_flags) ) 5.19 + cnt[v->processor]++; 5.20 + read_unlock(&domlist_lock); 5.21 + 5.22 + /* 5.23 + * If we're on a HT system, we only auto-allocate to a non-primary HT. We 5.24 + * favour high numbered CPUs in the event of a tie. 5.25 + */ 5.26 + cpu = first_cpu(cpu_sibling_map[0]); 5.27 + if ( cpus_weight(cpu_sibling_map[0]) > 1 ) 5.28 + cpu = next_cpu(cpu, cpu_sibling_map[0]); 5.29 + cpu_exclude_map = cpu_sibling_map[0]; 5.30 + for_each_online_cpu ( i ) 5.31 + { 5.32 + if ( cpu_isset(i, cpu_exclude_map) ) 5.33 + continue; 5.34 + if ( (i == first_cpu(cpu_sibling_map[i])) && 5.35 + (cpus_weight(cpu_sibling_map[i]) > 1) ) 5.36 + continue; 5.37 + cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]); 5.38 + if ( cnt[i] <= cnt[cpu] ) 5.39 + cpu = i; 5.40 + } 5.41 + 5.42 + return cpu; 5.43 +} 5.44 + 5.45 long do_dom0_op(XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op) 5.46 { 5.47 long ret = 0; 5.48 @@ -150,7 +188,7 @@ long do_dom0_op(XEN_GUEST_HANDLE(dom0_op 5.49 if ( d != NULL ) 5.50 { 5.51 ret = -EINVAL; 5.52 - if ( (d != current->domain) && 5.53 + if ( (d != current->domain) && (d->vcpu[0] != NULL) && 5.54 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) ) 5.55 { 5.56 domain_unpause_by_systemcontroller(d); 5.57 @@ -164,11 +202,7 @@ long do_dom0_op(XEN_GUEST_HANDLE(dom0_op 5.58 case DOM0_CREATEDOMAIN: 5.59 { 5.60 struct domain *d; 5.61 - unsigned int pro; 5.62 domid_t dom; 5.63 - struct vcpu *v; 5.64 - unsigned int i, cnt[NR_CPUS] = { 0 }; 5.65 - cpumask_t cpu_exclude_map; 5.66 static domid_t rover = 0; 5.67 5.68 /* 5.69 @@ -202,36 +236,8 @@ long do_dom0_op(XEN_GUEST_HANDLE(dom0_op 5.70 rover = dom; 5.71 } 5.72 5.73 - /* Do an initial CPU placement. Pick the least-populated CPU. */ 5.74 - read_lock(&domlist_lock); 5.75 - for_each_domain ( d ) 5.76 - for_each_vcpu ( d, v ) 5.77 - if ( !test_bit(_VCPUF_down, &v->vcpu_flags) ) 5.78 - cnt[v->processor]++; 5.79 - read_unlock(&domlist_lock); 5.80 - 5.81 - /* 5.82 - * If we're on a HT system, we only auto-allocate to a non-primary HT. 5.83 - * We favour high numbered CPUs in the event of a tie. 5.84 - */ 5.85 - pro = first_cpu(cpu_sibling_map[0]); 5.86 - if ( cpus_weight(cpu_sibling_map[0]) > 1 ) 5.87 - pro = next_cpu(pro, cpu_sibling_map[0]); 5.88 - cpu_exclude_map = cpu_sibling_map[0]; 5.89 - for_each_online_cpu ( i ) 5.90 - { 5.91 - if ( cpu_isset(i, cpu_exclude_map) ) 5.92 - continue; 5.93 - if ( (i == first_cpu(cpu_sibling_map[i])) && 5.94 - (cpus_weight(cpu_sibling_map[i]) > 1) ) 5.95 - continue; 5.96 - cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]); 5.97 - if ( cnt[i] <= cnt[pro] ) 5.98 - pro = i; 5.99 - } 5.100 - 5.101 ret = -ENOMEM; 5.102 - if ( (d = domain_create(dom, pro)) == NULL ) 5.103 + if ( (d = domain_create(dom)) == NULL ) 5.104 break; 5.105 5.106 memcpy(d->handle, op->u.createdomain.handle, 5.107 @@ -258,14 +264,8 @@ long do_dom0_op(XEN_GUEST_HANDLE(dom0_op 5.108 if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL ) 5.109 break; 5.110 5.111 - /* 5.112 - * Can only create new VCPUs while the domain is not fully constructed 5.113 - * (and hence not runnable). Xen needs auditing for races before 5.114 - * removing this check. 5.115 - */ 5.116 - ret = -EINVAL; 5.117 - if ( test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) ) 5.118 - goto maxvcpu_out; 5.119 + /* Needed, for example, to ensure writable p.t. state is synced. */ 5.120 + domain_pause(d); 5.121 5.122 /* We cannot reduce maximum VCPUs. */ 5.123 ret = -EINVAL; 5.124 @@ -275,17 +275,21 @@ long do_dom0_op(XEN_GUEST_HANDLE(dom0_op 5.125 ret = -ENOMEM; 5.126 for ( i = 0; i < max; i++ ) 5.127 { 5.128 - if ( d->vcpu[i] == NULL ) 5.129 - { 5.130 - cpu = (d->vcpu[i-1]->processor + 1) % num_online_cpus(); 5.131 - if ( alloc_vcpu(d, i, cpu) == NULL ) 5.132 - goto maxvcpu_out; 5.133 - } 5.134 + if ( d->vcpu[i] != NULL ) 5.135 + continue; 5.136 + 5.137 + cpu = (i == 0) ? 5.138 + default_vcpu0_location() : 5.139 + (d->vcpu[i-1]->processor + 1) % num_online_cpus(); 5.140 + 5.141 + if ( alloc_vcpu(d, i, cpu) == NULL ) 5.142 + goto maxvcpu_out; 5.143 } 5.144 5.145 ret = 0; 5.146 5.147 maxvcpu_out: 5.148 + domain_unpause(d); 5.149 put_domain(d); 5.150 } 5.151 break;
6.1 --- a/xen/common/domain.c Wed Jul 05 11:31:33 2006 +0100 6.2 +++ b/xen/common/domain.c Wed Jul 05 14:27:27 2006 +0100 6.3 @@ -46,6 +46,7 @@ struct domain *alloc_domain(domid_t domi 6.4 atomic_set(&d->refcnt, 1); 6.5 spin_lock_init(&d->big_lock); 6.6 spin_lock_init(&d->page_alloc_lock); 6.7 + spin_lock_init(&d->pause_lock); 6.8 INIT_LIST_HEAD(&d->page_list); 6.9 INIT_LIST_HEAD(&d->xenpage_list); 6.10 6.11 @@ -81,8 +82,8 @@ struct vcpu *alloc_vcpu( 6.12 v->domain = d; 6.13 v->vcpu_id = vcpu_id; 6.14 v->processor = cpu_id; 6.15 - atomic_set(&v->pausecnt, 0); 6.16 v->vcpu_info = &d->shared_info->vcpu_info[vcpu_id]; 6.17 + spin_lock_init(&v->pause_lock); 6.18 6.19 v->cpu_affinity = is_idle_domain(d) ? 6.20 cpumask_of_cpu(cpu_id) : CPU_MASK_ALL; 6.21 @@ -110,30 +111,22 @@ struct vcpu *alloc_idle_vcpu(unsigned in 6.22 { 6.23 struct domain *d; 6.24 struct vcpu *v; 6.25 - unsigned int vcpu_id; 6.26 + unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS; 6.27 6.28 - if ((vcpu_id = cpu_id % MAX_VIRT_CPUS) == 0) 6.29 - { 6.30 - d = domain_create(IDLE_DOMAIN_ID, cpu_id); 6.31 - BUG_ON(d == NULL); 6.32 - v = d->vcpu[0]; 6.33 - } 6.34 - else 6.35 - { 6.36 - d = idle_vcpu[cpu_id - vcpu_id]->domain; 6.37 - BUG_ON(d == NULL); 6.38 - v = alloc_vcpu(d, vcpu_id, cpu_id); 6.39 - } 6.40 + d = (vcpu_id == 0) ? 6.41 + domain_create(IDLE_DOMAIN_ID) : 6.42 + idle_vcpu[cpu_id - vcpu_id]->domain; 6.43 + BUG_ON(d == NULL); 6.44 6.45 + v = alloc_vcpu(d, vcpu_id, cpu_id); 6.46 idle_vcpu[cpu_id] = v; 6.47 6.48 return v; 6.49 } 6.50 6.51 -struct domain *domain_create(domid_t domid, unsigned int cpu) 6.52 +struct domain *domain_create(domid_t domid) 6.53 { 6.54 struct domain *d, **pd; 6.55 - struct vcpu *v; 6.56 6.57 if ( (d = alloc_domain(domid)) == NULL ) 6.58 return NULL; 6.59 @@ -152,13 +145,10 @@ struct domain *domain_create(domid_t dom 6.60 if ( arch_domain_create(d) != 0 ) 6.61 goto fail3; 6.62 6.63 - if ( (v = alloc_vcpu(d, 0, cpu)) == NULL ) 6.64 - goto fail4; 6.65 - 6.66 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); 6.67 d->irq_caps = rangeset_new(d, "Interrupts", 0); 6.68 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) ) 6.69 - goto fail4; /* NB. alloc_vcpu() is undone in free_domain() */ 6.70 + goto fail4; 6.71 6.72 if ( !is_idle_domain(d) ) 6.73 { 6.74 @@ -327,11 +317,12 @@ void domain_shutdown(struct domain *d, u 6.75 d->shutdown_code = reason; 6.76 6.77 /* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */ 6.78 + spin_lock(&d->pause_lock); 6.79 + d->pause_count++; 6.80 + set_bit(_DOMF_paused, &d->domain_flags); 6.81 + spin_unlock(&d->pause_lock); 6.82 for_each_vcpu ( d, v ) 6.83 - { 6.84 - atomic_inc(&v->pausecnt); 6.85 vcpu_sleep_nosync(v); 6.86 - } 6.87 6.88 get_knownalive_domain(d); 6.89 domain_shuttingdown[smp_processor_id()] = d; 6.90 @@ -398,34 +389,65 @@ void domain_destroy(struct domain *d) 6.91 6.92 void vcpu_pause(struct vcpu *v) 6.93 { 6.94 - BUG_ON(v == current); 6.95 - atomic_inc(&v->pausecnt); 6.96 + ASSERT(v != current); 6.97 + 6.98 + spin_lock(&v->pause_lock); 6.99 + if ( v->pause_count++ == 0 ) 6.100 + set_bit(_VCPUF_paused, &v->vcpu_flags); 6.101 + spin_unlock(&v->pause_lock); 6.102 + 6.103 vcpu_sleep_sync(v); 6.104 } 6.105 6.106 +void vcpu_unpause(struct vcpu *v) 6.107 +{ 6.108 + int wake; 6.109 + 6.110 + ASSERT(v != current); 6.111 + 6.112 + spin_lock(&v->pause_lock); 6.113 + wake = (--v->pause_count == 0); 6.114 + if ( wake ) 6.115 + clear_bit(_VCPUF_paused, &v->vcpu_flags); 6.116 + spin_unlock(&v->pause_lock); 6.117 + 6.118 + if ( wake ) 6.119 + vcpu_wake(v); 6.120 +} 6.121 + 6.122 void domain_pause(struct domain *d) 6.123 { 6.124 struct vcpu *v; 6.125 6.126 + ASSERT(d != current->domain); 6.127 + 6.128 + spin_lock(&d->pause_lock); 6.129 + if ( d->pause_count++ == 0 ) 6.130 + set_bit(_DOMF_paused, &d->domain_flags); 6.131 + spin_unlock(&d->pause_lock); 6.132 + 6.133 for_each_vcpu( d, v ) 6.134 - vcpu_pause(v); 6.135 + vcpu_sleep_sync(v); 6.136 6.137 sync_pagetable_state(d); 6.138 } 6.139 6.140 -void vcpu_unpause(struct vcpu *v) 6.141 -{ 6.142 - BUG_ON(v == current); 6.143 - if ( atomic_dec_and_test(&v->pausecnt) ) 6.144 - vcpu_wake(v); 6.145 -} 6.146 - 6.147 void domain_unpause(struct domain *d) 6.148 { 6.149 struct vcpu *v; 6.150 + int wake; 6.151 6.152 - for_each_vcpu( d, v ) 6.153 - vcpu_unpause(v); 6.154 + ASSERT(d != current->domain); 6.155 + 6.156 + spin_lock(&d->pause_lock); 6.157 + wake = (--d->pause_count == 0); 6.158 + if ( wake ) 6.159 + clear_bit(_DOMF_paused, &d->domain_flags); 6.160 + spin_unlock(&d->pause_lock); 6.161 + 6.162 + if ( wake ) 6.163 + for_each_vcpu( d, v ) 6.164 + vcpu_wake(v); 6.165 } 6.166 6.167 void domain_pause_by_systemcontroller(struct domain *d)
7.1 --- a/xen/common/event_channel.c Wed Jul 05 11:31:33 2006 +0100 7.2 +++ b/xen/common/event_channel.c Wed Jul 05 14:27:27 2006 +0100 7.3 @@ -525,11 +525,16 @@ void send_guest_vcpu_virq(struct vcpu *v 7.4 void send_guest_global_virq(struct domain *d, int virq) 7.5 { 7.6 int port; 7.7 + struct vcpu *v; 7.8 struct evtchn *chn; 7.9 7.10 ASSERT(virq_is_global(virq)); 7.11 7.12 - port = d->vcpu[0]->virq_to_evtchn[virq]; 7.13 + v = d->vcpu[0]; 7.14 + if ( unlikely(v == NULL) ) 7.15 + return; 7.16 + 7.17 + port = v->virq_to_evtchn[virq]; 7.18 if ( unlikely(port == 0) ) 7.19 return; 7.20
8.1 --- a/xen/common/sched_sedf.c Wed Jul 05 11:31:33 2006 +0100 8.2 +++ b/xen/common/sched_sedf.c Wed Jul 05 14:27:27 2006 +0100 8.3 @@ -1429,6 +1429,8 @@ static int sedf_adjdom(struct domain *p, 8.4 } 8.5 else if ( cmd->direction == SCHED_INFO_GET ) 8.6 { 8.7 + if ( p->vcpu[0] == NULL ) 8.8 + return -EINVAL; 8.9 cmd->u.sedf.period = EDOM_INFO(p->vcpu[0])->period; 8.10 cmd->u.sedf.slice = EDOM_INFO(p->vcpu[0])->slice; 8.11 cmd->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status & EXTRA_AWARE;
9.1 --- a/xen/drivers/char/console.c Wed Jul 05 11:31:33 2006 +0100 9.2 +++ b/xen/drivers/char/console.c Wed Jul 05 14:27:27 2006 +0100 9.3 @@ -279,7 +279,7 @@ static void switch_serial_input(void) 9.4 { 9.5 static char *input_str[2] = { "DOM0", "Xen" }; 9.6 xen_rx = !xen_rx; 9.7 - if ( SWITCH_CODE != 0 ) 9.8 + if ( (SWITCH_CODE != 0) && (dom0 != NULL) ) 9.9 { 9.10 printk("*** Serial input -> %s " 9.11 "(type 'CTRL-%c' three times to switch input to %s).\n",
10.1 --- a/xen/include/xen/sched.h Wed Jul 05 11:31:33 2006 +0100 10.2 +++ b/xen/include/xen/sched.h Wed Jul 05 14:27:27 2006 +0100 10.3 @@ -78,9 +78,10 @@ struct vcpu 10.4 10.5 unsigned long vcpu_flags; 10.6 10.7 - u16 virq_to_evtchn[NR_VIRQS]; 10.8 + spinlock_t pause_lock; 10.9 + unsigned int pause_count; 10.10 10.11 - atomic_t pausecnt; 10.12 + u16 virq_to_evtchn[NR_VIRQS]; 10.13 10.14 /* Bitmask of CPUs on which this VCPU may run. */ 10.15 cpumask_t cpu_affinity; 10.16 @@ -141,6 +142,10 @@ struct domain 10.17 struct rangeset *irq_caps; 10.18 10.19 unsigned long domain_flags; 10.20 + 10.21 + spinlock_t pause_lock; 10.22 + unsigned int pause_count; 10.23 + 10.24 unsigned long vm_assist; 10.25 10.26 atomic_t refcnt; 10.27 @@ -220,8 +225,7 @@ static inline void get_knownalive_domain 10.28 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED)); 10.29 } 10.30 10.31 -extern struct domain *domain_create( 10.32 - domid_t domid, unsigned int cpu); 10.33 +extern struct domain *domain_create(domid_t domid); 10.34 extern int construct_dom0( 10.35 struct domain *d, 10.36 unsigned long image_start, unsigned long image_len, 10.37 @@ -368,6 +372,9 @@ extern struct domain *domain_list; 10.38 /* VCPU is polling a set of event channels (SCHEDOP_poll). */ 10.39 #define _VCPUF_polling 10 10.40 #define VCPUF_polling (1UL<<_VCPUF_polling) 10.41 + /* VCPU is paused by the hypervisor? */ 10.42 +#define _VCPUF_paused 11 10.43 +#define VCPUF_paused (1UL<<_VCPUF_paused) 10.44 10.45 /* 10.46 * Per-domain flags (domain_flags). 10.47 @@ -390,12 +397,16 @@ extern struct domain *domain_list; 10.48 /* Are any VCPUs polling event channels (SCHEDOP_poll)? */ 10.49 #define _DOMF_polling 5 10.50 #define DOMF_polling (1UL<<_DOMF_polling) 10.51 + /* Domain is paused by the hypervisor? */ 10.52 +#define _DOMF_paused 6 10.53 +#define DOMF_paused (1UL<<_DOMF_paused) 10.54 10.55 static inline int vcpu_runnable(struct vcpu *v) 10.56 { 10.57 - return ( (atomic_read(&v->pausecnt) == 0) && 10.58 - !(v->vcpu_flags & (VCPUF_blocked|VCPUF_down)) && 10.59 - !(v->domain->domain_flags & (DOMF_shutdown|DOMF_ctrl_pause)) ); 10.60 + return ( !(v->vcpu_flags & 10.61 + (VCPUF_blocked|VCPUF_down|VCPUF_paused)) && 10.62 + !(v->domain->domain_flags & 10.63 + (DOMF_shutdown|DOMF_ctrl_pause|DOMF_paused)) ); 10.64 } 10.65 10.66 void vcpu_pause(struct vcpu *v);