debuggers.hg
changeset 14695:3681f91a91e8
Merge
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Thu Mar 29 16:28:34 2007 +0000 (2007-03-29) |
parents | e3dc8cea5bc0 d93a48816328 |
children | f830c5719e74 |
files | xen/arch/x86/hvm/hvm.c |
line diff
1.1 --- a/tools/python/xen/xm/main.py Thu Mar 29 16:27:52 2007 +0000 1.2 +++ b/tools/python/xen/xm/main.py Thu Mar 29 16:28:34 2007 +0000 1.3 @@ -560,11 +560,21 @@ def err(msg): 1.4 def get_single_vm(dom): 1.5 if serverType == SERVER_XEN_API: 1.6 uuids = server.xenapi.VM.get_by_name_label(dom) 1.7 - n = len(uuids) 1.8 - if n > 0: 1.9 + if len(uuids) > 0: 1.10 return uuids[0] 1.11 - else: 1.12 - raise OptionError("Domain '%s' not found." % dom) 1.13 + 1.14 + try: 1.15 + domid = int(dom) 1.16 + uuids = [server.xenapi.VM.get_domid(vm_ref) 1.17 + for vm_ref in server.xenapi.VM.get_all() 1.18 + if int(server.xenapi.VM.get_domid(vm_ref)) == domid] 1.19 + except: 1.20 + pass 1.21 + 1.22 + if len(uuids) > 0: 1.23 + return uuids[0] 1.24 + 1.25 + raise OptionError("Domain '%s' not found." % dom) 1.26 else: 1.27 dominfo = server.xend.domain(dom, False) 1.28 return dominfo['uuid']
2.1 --- a/xen/arch/ia64/vmx/vlsapic.c Thu Mar 29 16:27:52 2007 +0000 2.2 +++ b/xen/arch/ia64/vmx/vlsapic.c Thu Mar 29 16:28:34 2007 +0000 2.3 @@ -692,7 +692,7 @@ static void vlsapic_write_ipi(VCPU *vcpu 2.4 if (targ == NULL) 2.5 panic_domain(NULL, "Unknown IPI cpu\n"); 2.6 2.7 - if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags) || 2.8 + if (!targ->is_initialised || 2.9 test_bit(_VCPUF_down, &targ->vcpu_flags)) { 2.10 2.11 struct pt_regs *targ_regs = vcpu_regs(targ); 2.12 @@ -717,7 +717,7 @@ static void vlsapic_write_ipi(VCPU *vcpu 2.13 printk("arch_boot_vcpu: huh, already awake!"); 2.14 } 2.15 } else { 2.16 - int running = test_bit(_VCPUF_running, &targ->vcpu_flags); 2.17 + int running = targ->is_running; 2.18 vlsapic_deliver_ipi(targ, ((ipi_d_t)value).dm, 2.19 ((ipi_d_t)value).vector); 2.20 vcpu_unblock(targ);
3.1 --- a/xen/arch/ia64/vmx/vmmu.c Thu Mar 29 16:27:52 2007 +0000 3.2 +++ b/xen/arch/ia64/vmx/vmmu.c Thu Mar 29 16:28:34 2007 +0000 3.3 @@ -598,7 +598,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u6 3.4 vcpu_get_rr(vcpu, va, &args.rid); 3.5 args.ps = ps; 3.6 for_each_vcpu (d, v) { 3.7 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 3.8 + if (!v->is_initialised) 3.9 continue; 3.10 3.11 args.vcpu = v;
4.1 --- a/xen/arch/ia64/xen/domain.c Thu Mar 29 16:27:52 2007 +0000 4.2 +++ b/xen/arch/ia64/xen/domain.c Thu Mar 29 16:28:34 2007 +0000 4.3 @@ -657,7 +657,7 @@ int arch_set_info_guest(struct vcpu *v, 4.4 v->arch.iva = er->iva; 4.5 } 4.6 4.7 - if (test_bit(_VCPUF_initialised, &v->vcpu_flags)) 4.8 + if (v->is_initialised) 4.9 return 0; 4.10 4.11 if (d->arch.is_vti) { 4.12 @@ -676,10 +676,12 @@ int arch_set_info_guest(struct vcpu *v, 4.13 /* This overrides some registers. */ 4.14 vcpu_init_regs(v); 4.15 4.16 - /* Don't redo final setup. Auto-online VCPU0. */ 4.17 - if (!test_and_set_bit(_VCPUF_initialised, &v->vcpu_flags) && 4.18 - (v->vcpu_id == 0)) 4.19 - clear_bit(_VCPUF_down, &v->vcpu_flags); 4.20 + if (!v->is_initialised) { 4.21 + v->is_initialised = 1; 4.22 + /* Auto-online VCPU0 when it is initialised. */ 4.23 + if (v->vcpu_id == 0) 4.24 + clear_bit(_VCPUF_down, &v->vcpu_flags); 4.25 + } 4.26 4.27 return 0; 4.28 } 4.29 @@ -1067,7 +1069,7 @@ int construct_dom0(struct domain *d, 4.30 /* Sanity! */ 4.31 BUG_ON(d != dom0); 4.32 BUG_ON(d->vcpu[0] == NULL); 4.33 - BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags)); 4.34 + BUG_ON(v->is_initialised); 4.35 4.36 printk("*** LOADING DOMAIN 0 ***\n"); 4.37 4.38 @@ -1188,7 +1190,7 @@ int construct_dom0(struct domain *d, 4.39 4.40 printk("Dom0: 0x%lx\n", (u64)dom0); 4.41 4.42 - set_bit(_VCPUF_initialised, &v->vcpu_flags); 4.43 + v->is_initialised = 1; 4.44 clear_bit(_VCPUF_down, &v->vcpu_flags); 4.45 4.46 /* Build firmware.
5.1 --- a/xen/arch/ia64/xen/hypercall.c Thu Mar 29 16:27:52 2007 +0000 5.2 +++ b/xen/arch/ia64/xen/hypercall.c Thu Mar 29 16:28:34 2007 +0000 5.3 @@ -81,11 +81,11 @@ fw_hypercall_ipi (struct pt_regs *regs) 5.4 return; 5.5 5.6 if (vector == XEN_SAL_BOOT_RENDEZ_VEC 5.7 - && (!test_bit(_VCPUF_initialised, &targ->vcpu_flags) 5.8 + && (!targ->is_initialised 5.9 || test_bit(_VCPUF_down, &targ->vcpu_flags))) { 5.10 5.11 /* First start: initialize vpcu. */ 5.12 - if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) { 5.13 + if (!targ->is_initialised) { 5.14 struct vcpu_guest_context c; 5.15 5.16 memset (&c, 0, sizeof (c)); 5.17 @@ -112,9 +112,7 @@ fw_hypercall_ipi (struct pt_regs *regs) 5.18 printk ("arch_boot_vcpu: huu, already awaken!\n"); 5.19 } 5.20 else { 5.21 - int running = test_bit(_VCPUF_running, 5.22 - &targ->vcpu_flags); 5.23 - 5.24 + int running = targ->is_running; 5.25 vcpu_pend_interrupt(targ, vector); 5.26 vcpu_unblock(targ); 5.27 if (running)
6.1 --- a/xen/arch/ia64/xen/vhpt.c Thu Mar 29 16:27:52 2007 +0000 6.2 +++ b/xen/arch/ia64/xen/vhpt.c Thu Mar 29 16:28:34 2007 +0000 6.3 @@ -184,7 +184,7 @@ domain_purge_swtc_entries(struct domain 6.4 { 6.5 struct vcpu* v; 6.6 for_each_vcpu(d, v) { 6.7 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 6.8 + if (!v->is_initialised) 6.9 continue; 6.10 6.11 /* Purge TC entries. 6.12 @@ -202,7 +202,7 @@ domain_purge_swtc_entries_vcpu_dirty_mas 6.13 6.14 for_each_vcpu_mask(vcpu, vcpu_dirty_mask) { 6.15 struct vcpu* v = d->vcpu[vcpu]; 6.16 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 6.17 + if (!v->is_initialised) 6.18 continue; 6.19 6.20 /* Purge TC entries. 6.21 @@ -263,7 +263,7 @@ void domain_flush_vtlb_all(struct domain 6.22 struct vcpu *v; 6.23 6.24 for_each_vcpu(d, v) { 6.25 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 6.26 + if (!v->is_initialised) 6.27 continue; 6.28 6.29 if (v->processor == cpu) 6.30 @@ -341,7 +341,7 @@ void domain_flush_vtlb_range (struct dom 6.31 smp_mb(); 6.32 6.33 for_each_vcpu (d, v) { 6.34 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 6.35 + if (!v->is_initialised) 6.36 continue; 6.37 6.38 if (HAS_PERVCPU_VHPT(d)) { 6.39 @@ -407,7 +407,7 @@ void 6.40 if (HAS_PERVCPU_VHPT(d)) { 6.41 for_each_vcpu_mask(vcpu, entry->vcpu_dirty_mask) { 6.42 v = d->vcpu[vcpu]; 6.43 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 6.44 + if (!v->is_initialised) 6.45 continue; 6.46 6.47 /* Invalidate VHPT entries. */
7.1 --- a/xen/arch/powerpc/domain.c Thu Mar 29 16:27:52 2007 +0000 7.2 +++ b/xen/arch/powerpc/domain.c Thu Mar 29 16:28:34 2007 +0000 7.3 @@ -168,10 +168,13 @@ int arch_set_info_guest(struct vcpu *v, 7.4 d->shared_info->wc_nsec = dom0->shared_info->wc_nsec; 7.5 d->shared_info->arch.boot_timebase = dom0->shared_info->arch.boot_timebase; 7.6 7.7 - /* Auto-online VCPU0 when it is initialised. */ 7.8 - if ( !test_and_set_bit(_VCPUF_initialised, &v->vcpu_flags) && 7.9 - (v->vcpu_id == 0) ) 7.10 - clear_bit(_VCPUF_down, &v->vcpu_flags); 7.11 + if ( !v->is_initialised ) 7.12 + { 7.13 + v->is_initialised = 1; 7.14 + /* Auto-online VCPU0 when it is initialised. */ 7.15 + if ( v->vcpu_id == 0 ) 7.16 + clear_bit(_VCPUF_down, &v->vcpu_flags); 7.17 + } 7.18 7.19 cpu_init_vcpu(v); 7.20
8.1 --- a/xen/arch/powerpc/domain_build.c Thu Mar 29 16:27:52 2007 +0000 8.2 +++ b/xen/arch/powerpc/domain_build.c Thu Mar 29 16:28:34 2007 +0000 8.3 @@ -273,7 +273,7 @@ int construct_dom0(struct domain *d, 8.4 8.5 ofd_dom0_fixup(d, *ofh_tree + rma, cmdline, shared_info_addr); 8.6 8.7 - set_bit(_VCPUF_initialised, &v->vcpu_flags); 8.8 + v->is_initialised = 1; 8.9 clear_bit(_VCPUF_down, &v->vcpu_flags); 8.10 8.11 rc = 0;
9.1 --- a/xen/arch/x86/domain.c Thu Mar 29 16:27:52 2007 +0000 9.2 +++ b/xen/arch/x86/domain.c Thu Mar 29 16:28:34 2007 +0000 9.3 @@ -563,9 +563,7 @@ int arch_set_info_guest( 9.4 #endif 9.5 } 9.6 9.7 - clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 9.8 - if ( flags & VGCF_I387_VALID ) 9.9 - set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 9.10 + v->fpu_initialised = !!(flags & VGCF_I387_VALID); 9.11 9.12 v->arch.flags &= ~TF_kernel_mode; 9.13 if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ ) 9.14 @@ -600,7 +598,7 @@ int arch_set_info_guest( 9.15 hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs); 9.16 } 9.17 9.18 - if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 9.19 + if ( v->is_initialised ) 9.20 goto out; 9.21 9.22 memset(v->arch.guest_context.debugreg, 0, 9.23 @@ -699,7 +697,7 @@ int arch_set_info_guest( 9.24 update_domain_wallclock_time(d); 9.25 9.26 /* Don't redo final setup */ 9.27 - set_bit(_VCPUF_initialised, &v->vcpu_flags); 9.28 + v->is_initialised = 1; 9.29 9.30 if ( paging_mode_enabled(d) ) 9.31 paging_update_paging_modes(v);
10.1 --- a/xen/arch/x86/domain_build.c Thu Mar 29 16:27:52 2007 +0000 10.2 +++ b/xen/arch/x86/domain_build.c Thu Mar 29 16:28:34 2007 +0000 10.3 @@ -254,7 +254,7 @@ int construct_dom0(struct domain *d, 10.4 /* Sanity! */ 10.5 BUG_ON(d->domain_id != 0); 10.6 BUG_ON(d->vcpu[0] == NULL); 10.7 - BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags)); 10.8 + BUG_ON(v->is_initialised); 10.9 10.10 printk("*** LOADING DOMAIN 0 ***\n"); 10.11 10.12 @@ -901,7 +901,7 @@ int construct_dom0(struct domain *d, 10.13 10.14 update_domain_wallclock_time(d); 10.15 10.16 - set_bit(_VCPUF_initialised, &v->vcpu_flags); 10.17 + v->is_initialised = 1; 10.18 clear_bit(_VCPUF_down, &v->vcpu_flags); 10.19 10.20 /*
11.1 --- a/xen/arch/x86/domctl.c Thu Mar 29 16:27:52 2007 +0000 11.2 +++ b/xen/arch/x86/domctl.c Thu Mar 29 16:28:34 2007 +0000 11.3 @@ -448,7 +448,7 @@ void arch_get_info_guest(struct vcpu *v, 11.4 #endif 11.5 11.6 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel)); 11.7 - if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) ) 11.8 + if ( v->fpu_initialised ) 11.9 c(flags |= VGCF_i387_valid); 11.10 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) ) 11.11 c(flags |= VGCF_online);
12.1 --- a/xen/arch/x86/hvm/hvm.c Thu Mar 29 16:27:52 2007 +0000 12.2 +++ b/xen/arch/x86/hvm/hvm.c Thu Mar 29 16:28:34 2007 +0000 12.3 @@ -85,7 +85,7 @@ void hvm_disable(void) 12.4 void hvm_stts(struct vcpu *v) 12.5 { 12.6 /* FPU state already dirty? Then no need to setup_fpu() lazily. */ 12.7 - if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) 12.8 + if ( !v->fpu_dirtied ) 12.9 hvm_funcs.stts(v); 12.10 } 12.11 12.12 @@ -332,10 +332,10 @@ void hvm_vcpu_reset(struct vcpu *v) 12.13 hvm_funcs.vcpu_initialise(v); 12.14 12.15 set_bit(_VCPUF_down, &v->vcpu_flags); 12.16 - clear_bit(_VCPUF_initialised, &v->vcpu_flags); 12.17 - clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 12.18 - clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags); 12.19 clear_bit(_VCPUF_blocked, &v->vcpu_flags); 12.20 + v->fpu_initialised = 0; 12.21 + v->fpu_dirtied = 0; 12.22 + v->is_initialised = 0; 12.23 12.24 vcpu_unpause(v); 12.25 } 12.26 @@ -723,7 +723,7 @@ int hvm_bringup_ap(int vcpuid, int tramp 12.27 12.28 LOCK_BIGLOCK(d); 12.29 rc = -EEXIST; 12.30 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 12.31 + if ( !v->is_initialised ) 12.32 rc = boot_vcpu(d, vcpuid, ctxt); 12.33 UNLOCK_BIGLOCK(d); 12.34
13.1 --- a/xen/arch/x86/hvm/vlapic.c Thu Mar 29 16:27:52 2007 +0000 13.2 +++ b/xen/arch/x86/hvm/vlapic.c Thu Mar 29 16:28:34 2007 +0000 13.3 @@ -303,7 +303,7 @@ static int vlapic_accept_irq(struct vcpu 13.4 if ( trig_mode && !(level & APIC_INT_ASSERT) ) 13.5 break; 13.6 /* FIXME How to check the situation after vcpu reset? */ 13.7 - if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 13.8 + if ( v->is_initialised ) 13.9 hvm_vcpu_reset(v); 13.10 v->arch.hvm_vcpu.init_sipi_sipi_state = 13.11 HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI; 13.12 @@ -318,7 +318,7 @@ static int vlapic_accept_irq(struct vcpu 13.13 v->arch.hvm_vcpu.init_sipi_sipi_state = 13.14 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM; 13.15 13.16 - if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 13.17 + if ( v->is_initialised ) 13.18 { 13.19 gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id); 13.20 goto exit_and_crash;
14.1 --- a/xen/arch/x86/i387.c Thu Mar 29 16:27:52 2007 +0000 14.2 +++ b/xen/arch/x86/i387.c Thu Mar 29 16:28:34 2007 +0000 14.3 @@ -21,7 +21,7 @@ void init_fpu(void) 14.4 __asm__ __volatile__ ( "fninit" ); 14.5 if ( cpu_has_xmm ) 14.6 load_mxcsr(0x1f80); 14.7 - set_bit(_VCPUF_fpu_initialised, ¤t->vcpu_flags); 14.8 + current->fpu_initialised = 1; 14.9 } 14.10 14.11 void save_init_fpu(struct vcpu *v) 14.12 @@ -76,7 +76,7 @@ void save_init_fpu(struct vcpu *v) 14.13 : "=m" (*fpu_ctxt) ); 14.14 } 14.15 14.16 - clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags); 14.17 + v->fpu_dirtied = 0; 14.18 write_cr0(cr0|X86_CR0_TS); 14.19 } 14.20
15.1 --- a/xen/arch/x86/mm.c Thu Mar 29 16:27:52 2007 +0000 15.2 +++ b/xen/arch/x86/mm.c Thu Mar 29 16:28:34 2007 +0000 15.3 @@ -1089,7 +1089,7 @@ static int alloc_l3_table(struct page_in 15.4 */ 15.5 if ( (pfn >= 0x100000) && 15.6 unlikely(!VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3)) && 15.7 - d->vcpu[0] && test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) ) 15.8 + d->vcpu[0] && d->vcpu[0]->is_initialised ) 15.9 { 15.10 MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn); 15.11 return 0;
16.1 --- a/xen/arch/x86/mm/hap/hap.c Thu Mar 29 16:27:52 2007 +0000 16.2 +++ b/xen/arch/x86/mm/hap/hap.c Thu Mar 29 16:28:34 2007 +0000 16.3 @@ -569,7 +569,8 @@ void hap_update_cr3(struct vcpu *v, int 16.4 16.5 HERE_I_AM; 16.6 /* Don't do anything on an uninitialised vcpu */ 16.7 - if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) { 16.8 + if ( !is_hvm_domain(d) && !v->is_initialised ) 16.9 + { 16.10 ASSERT(v->arch.cr3 == 0); 16.11 return; 16.12 }
17.1 --- a/xen/arch/x86/mm/shadow/multi.c Thu Mar 29 16:27:52 2007 +0000 17.2 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Mar 29 16:28:34 2007 +0000 17.3 @@ -3427,7 +3427,7 @@ sh_update_cr3(struct vcpu *v, int do_loc 17.4 #endif 17.5 17.6 /* Don't do anything on an uninitialised vcpu */ 17.7 - if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 17.8 + if ( !is_hvm_domain(d) && !v->is_initialised ) 17.9 { 17.10 ASSERT(v->arch.cr3 == 0); 17.11 return;
18.1 --- a/xen/arch/x86/traps.c Thu Mar 29 16:27:52 2007 +0000 18.2 +++ b/xen/arch/x86/traps.c Thu Mar 29 16:28:34 2007 +0000 18.3 @@ -1030,7 +1030,7 @@ long do_fpu_taskswitch(int set) 18.4 else 18.5 { 18.6 v->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS; 18.7 - if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) 18.8 + if ( v->fpu_dirtied ) 18.9 clts(); 18.10 } 18.11
19.1 --- a/xen/common/compat/domain.c Thu Mar 29 16:27:52 2007 +0000 19.2 +++ b/xen/common/compat/domain.c Thu Mar 29 16:28:34 2007 +0000 19.3 @@ -44,7 +44,7 @@ int compat_vcpu_op(int cmd, int vcpuid, 19.4 19.5 LOCK_BIGLOCK(d); 19.6 rc = -EEXIST; 19.7 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 19.8 + if ( !v->is_initialised ) 19.9 rc = boot_vcpu(d, vcpuid, cmp_ctxt); 19.10 UNLOCK_BIGLOCK(d); 19.11
20.1 --- a/xen/common/domain.c Thu Mar 29 16:27:52 2007 +0000 20.2 +++ b/xen/common/domain.c Thu Mar 29 16:28:34 2007 +0000 20.3 @@ -484,7 +484,7 @@ int boot_vcpu(struct domain *d, int vcpu 20.4 { 20.5 struct vcpu *v = d->vcpu[vcpuid]; 20.6 20.7 - BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags)); 20.8 + BUG_ON(v->is_initialised); 20.9 20.10 return arch_set_info_guest(v, ctxt); 20.11 } 20.12 @@ -503,13 +503,13 @@ int vcpu_reset(struct vcpu *v) 20.13 20.14 set_bit(_VCPUF_down, &v->vcpu_flags); 20.15 20.16 - clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 20.17 - clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags); 20.18 + v->fpu_initialised = 0; 20.19 + v->fpu_dirtied = 0; 20.20 + v->is_polling = 0; 20.21 + v->is_initialised = 0; 20.22 clear_bit(_VCPUF_blocked, &v->vcpu_flags); 20.23 - clear_bit(_VCPUF_initialised, &v->vcpu_flags); 20.24 clear_bit(_VCPUF_nmi_pending, &v->vcpu_flags); 20.25 clear_bit(_VCPUF_nmi_masked, &v->vcpu_flags); 20.26 - clear_bit(_VCPUF_polling, &v->vcpu_flags); 20.27 20.28 out: 20.29 UNLOCK_BIGLOCK(v->domain); 20.30 @@ -546,7 +546,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN 20.31 20.32 LOCK_BIGLOCK(d); 20.33 rc = -EEXIST; 20.34 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 20.35 + if ( !v->is_initialised ) 20.36 rc = boot_vcpu(d, vcpuid, ctxt); 20.37 UNLOCK_BIGLOCK(d); 20.38 20.39 @@ -554,7 +554,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN 20.40 break; 20.41 20.42 case VCPUOP_up: 20.43 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 20.44 + if ( !v->is_initialised ) 20.45 return -EINVAL; 20.46 20.47 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
21.1 --- a/xen/common/domctl.c Thu Mar 29 16:27:52 2007 +0000 21.2 +++ b/xen/common/domctl.c Thu Mar 29 16:28:34 2007 +0000 21.3 @@ -105,7 +105,7 @@ void getdomaininfo(struct domain *d, str 21.4 { 21.5 if ( !(v->vcpu_flags & VCPUF_blocked) ) 21.6 flags &= ~XEN_DOMINF_blocked; 21.7 - if ( v->vcpu_flags & VCPUF_running ) 21.8 + if ( v->is_running ) 21.9 flags |= XEN_DOMINF_running; 21.10 info->nr_online_vcpus++; 21.11 } 21.12 @@ -517,7 +517,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.13 goto getvcpucontext_out; 21.14 21.15 ret = -ENODATA; 21.16 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 21.17 + if ( !v->is_initialised ) 21.18 goto getvcpucontext_out; 21.19 21.20 #ifdef CONFIG_COMPAT 21.21 @@ -576,7 +576,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.22 21.23 op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags); 21.24 op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags); 21.25 - op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags); 21.26 + op->u.getvcpuinfo.running = v->is_running; 21.27 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running]; 21.28 op->u.getvcpuinfo.cpu = v->processor; 21.29 ret = 0;
22.1 --- a/xen/common/event_channel.c Thu Mar 29 16:27:52 2007 +0000 22.2 +++ b/xen/common/event_channel.c Thu Mar 29 16:28:34 2007 +0000 22.3 @@ -529,11 +529,17 @@ void evtchn_set_pending(struct vcpu *v, 22.4 } 22.5 22.6 /* Check if some VCPU might be polling for this event. */ 22.7 - if ( unlikely(d->is_polling) && likely(xchg(&d->is_polling, 0)) ) 22.8 + if ( unlikely(d->is_polling) ) 22.9 { 22.10 + d->is_polling = 0; 22.11 + smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */ 22.12 for_each_vcpu ( d, v ) 22.13 - if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) ) 22.14 - vcpu_unblock(v); 22.15 + { 22.16 + if ( !v->is_polling ) 22.17 + continue; 22.18 + v->is_polling = 0; 22.19 + vcpu_unblock(v); 22.20 + } 22.21 } 22.22 } 22.23
23.1 --- a/xen/common/keyhandler.c Thu Mar 29 16:27:52 2007 +0000 23.2 +++ b/xen/common/keyhandler.c Thu Mar 29 16:28:34 2007 +0000 23.3 @@ -188,7 +188,7 @@ static void dump_domains(unsigned char k 23.4 printk(" VCPU%d: CPU%d [has=%c] flags=%lx " 23.5 "upcall_pend = %02x, upcall_mask = %02x ", 23.6 v->vcpu_id, v->processor, 23.7 - test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F', 23.8 + v->is_running ? 'T':'F', 23.9 v->vcpu_flags, 23.10 vcpu_info(v, evtchn_upcall_pending), 23.11 vcpu_info(v, evtchn_upcall_mask));
24.1 --- a/xen/common/sched_credit.c Thu Mar 29 16:27:52 2007 +0000 24.2 +++ b/xen/common/sched_credit.c Thu Mar 29 16:28:34 2007 +0000 24.3 @@ -411,8 +411,7 @@ static inline int 24.4 * Don't pick up work that's in the peer's scheduling tail. Also only pick 24.5 * up work that's allowed to run on our CPU. 24.6 */ 24.7 - return !test_bit(_VCPUF_running, &vc->vcpu_flags) && 24.8 - cpu_isset(dest_cpu, vc->cpu_affinity); 24.9 + return !vc->is_running && cpu_isset(dest_cpu, vc->cpu_affinity); 24.10 } 24.11 24.12 static int
25.1 --- a/xen/common/sched_sedf.c Thu Mar 29 16:27:52 2007 +0000 25.2 +++ b/xen/common/sched_sedf.c Thu Mar 29 16:28:34 2007 +0000 25.3 @@ -1189,7 +1189,7 @@ void sedf_wake(struct vcpu *d) 25.4 static void sedf_dump_domain(struct vcpu *d) 25.5 { 25.6 printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id, 25.7 - test_bit(_VCPUF_running, &d->vcpu_flags) ? 'T':'F'); 25.8 + d->is_running ? 'T':'F'); 25.9 printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu" 25.10 " sc=%i xtr(%s)=%"PRIu64" ew=%hu", 25.11 EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs,
26.1 --- a/xen/common/schedule.c Thu Mar 29 16:27:52 2007 +0000 26.2 +++ b/xen/common/schedule.c Thu Mar 29 16:28:34 2007 +0000 26.3 @@ -123,7 +123,7 @@ int sched_init_vcpu(struct vcpu *v, unsi 26.4 { 26.5 per_cpu(schedule_data, v->processor).curr = v; 26.6 per_cpu(schedule_data, v->processor).idle = v; 26.7 - set_bit(_VCPUF_running, &v->vcpu_flags); 26.8 + v->is_running = 1; 26.9 } 26.10 26.11 TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id); 26.12 @@ -172,7 +172,7 @@ void vcpu_sleep_sync(struct vcpu *v) 26.13 { 26.14 vcpu_sleep_nosync(v); 26.15 26.16 - while ( !vcpu_runnable(v) && test_bit(_VCPUF_running, &v->vcpu_flags) ) 26.17 + while ( !vcpu_runnable(v) && v->is_running ) 26.18 cpu_relax(); 26.19 26.20 sync_vcpu_execstate(v); 26.21 @@ -208,7 +208,12 @@ static void vcpu_migrate(struct vcpu *v) 26.22 26.23 vcpu_schedule_lock_irqsave(v, flags); 26.24 26.25 - if ( test_bit(_VCPUF_running, &v->vcpu_flags) || 26.26 + /* 26.27 + * NB. Check of v->running happens /after/ setting migration flag 26.28 + * because they both happen in (different) spinlock regions, and those 26.29 + * regions are strictly serialised. 26.30 + */ 26.31 + if ( v->is_running || 26.32 !test_and_clear_bit(_VCPUF_migrating, &v->vcpu_flags) ) 26.33 { 26.34 vcpu_schedule_unlock_irqrestore(v, flags); 26.35 @@ -234,7 +239,7 @@ static void vcpu_migrate(struct vcpu *v) 26.36 void vcpu_force_reschedule(struct vcpu *v) 26.37 { 26.38 vcpu_schedule_lock_irq(v); 26.39 - if ( test_bit(_VCPUF_running, &v->vcpu_flags) ) 26.40 + if ( v->is_running ) 26.41 set_bit(_VCPUF_migrating, &v->vcpu_flags); 26.42 vcpu_schedule_unlock_irq(v); 26.43 26.44 @@ -310,14 +315,13 @@ static long do_poll(struct sched_poll *s 26.45 if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) ) 26.46 return -EFAULT; 26.47 26.48 - /* These operations must occur in order. */ 26.49 set_bit(_VCPUF_blocked, &v->vcpu_flags); 26.50 - set_bit(_VCPUF_polling, &v->vcpu_flags); 26.51 - smp_wmb(); 26.52 + v->is_polling = 1; 26.53 d->is_polling = 1; 26.54 + 26.55 + /* Check for events /after/ setting flags: avoids wakeup waiting race. */ 26.56 smp_wmb(); 26.57 26.58 - /* Check for events /after/ setting flags: avoids wakeup waiting race. */ 26.59 for ( i = 0; i < sched_poll->nr_ports; i++ ) 26.60 { 26.61 rc = -EFAULT; 26.62 @@ -342,7 +346,7 @@ static long do_poll(struct sched_poll *s 26.63 return 0; 26.64 26.65 out: 26.66 - clear_bit(_VCPUF_polling, &v->vcpu_flags); 26.67 + v->is_polling = 0; 26.68 clear_bit(_VCPUF_blocked, &v->vcpu_flags); 26.69 return rc; 26.70 } 26.71 @@ -651,8 +655,8 @@ static void schedule(void) 26.72 ASSERT(next->runstate.state != RUNSTATE_running); 26.73 vcpu_runstate_change(next, RUNSTATE_running, now); 26.74 26.75 - ASSERT(!test_bit(_VCPUF_running, &next->vcpu_flags)); 26.76 - set_bit(_VCPUF_running, &next->vcpu_flags); 26.77 + ASSERT(!next->is_running); 26.78 + next->is_running = 1; 26.79 26.80 spin_unlock_irq(&sd->schedule_lock); 26.81 26.82 @@ -673,7 +677,13 @@ static void schedule(void) 26.83 26.84 void context_saved(struct vcpu *prev) 26.85 { 26.86 - clear_bit(_VCPUF_running, &prev->vcpu_flags); 26.87 + /* Clear running flag /after/ writing context to memory. */ 26.88 + smp_wmb(); 26.89 + 26.90 + prev->is_running = 0; 26.91 + 26.92 + /* Check for migration request /after/ clearing running flag. */ 26.93 + smp_mb(); 26.94 26.95 if ( unlikely(test_bit(_VCPUF_migrating, &prev->vcpu_flags)) ) 26.96 vcpu_migrate(prev); 26.97 @@ -704,8 +714,12 @@ static void vcpu_singleshot_timer_fn(voi 26.98 static void poll_timer_fn(void *data) 26.99 { 26.100 struct vcpu *v = data; 26.101 - if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) ) 26.102 - vcpu_unblock(v); 26.103 + 26.104 + if ( !v->is_polling ) 26.105 + return; 26.106 + 26.107 + v->is_polling = 0; 26.108 + vcpu_unblock(v); 26.109 } 26.110 26.111 /* Initialise the data structures. */
27.1 --- a/xen/include/asm-ia64/event.h Thu Mar 29 16:27:52 2007 +0000 27.2 +++ b/xen/include/asm-ia64/event.h Thu Mar 29 16:28:34 2007 +0000 27.3 @@ -20,10 +20,10 @@ static inline void vcpu_kick(struct vcpu 27.4 * locks) but the key insight is that each change will cause 27.5 * evtchn_upcall_pending to be polled. 27.6 * 27.7 - * NB2. We save VCPUF_running across the unblock to avoid a needless 27.8 + * NB2. We save the running flag across the unblock to avoid a needless 27.9 * IPI for domains that we IPI'd to unblock. 27.10 */ 27.11 - int running = test_bit(_VCPUF_running, &v->vcpu_flags); 27.12 + int running = v->is_running; 27.13 vcpu_unblock(v); 27.14 if ( running ) 27.15 smp_send_event_check_cpu(v->processor);
28.1 --- a/xen/include/asm-powerpc/event.h Thu Mar 29 16:27:52 2007 +0000 28.2 +++ b/xen/include/asm-powerpc/event.h Thu Mar 29 16:28:34 2007 +0000 28.3 @@ -27,7 +27,7 @@ 28.4 static inline void evtchn_notify(struct vcpu *v) 28.5 { 28.6 #ifdef XXX_NO_SMP_YET 28.7 - int running = test_bit(_VCPUF_running, &v->vcpu_flags); 28.8 + int running = v->is_running; 28.9 vcpu_unblock(v); 28.10 if (running) 28.11 smp_send_event_check_cpu(v->processor); 28.12 @@ -73,10 +73,10 @@ static inline void vcpu_kick(struct vcpu 28.13 * locks) but the key insight is that each change will cause 28.14 * evtchn_upcall_pending to be polled. 28.15 * 28.16 - * NB2. We save VCPUF_running across the unblock to avoid a needless 28.17 + * NB2. We save the running flag across the unblock to avoid a needless 28.18 * IPI for domains that we IPI'd to unblock. 28.19 */ 28.20 - int running = test_bit(_VCPUF_running, &v->vcpu_flags); 28.21 + int running = v->is_running; 28.22 vcpu_unblock(v); 28.23 if (running) 28.24 smp_send_event_check_cpu(v->processor);
29.1 --- a/xen/include/asm-x86/event.h Thu Mar 29 16:27:52 2007 +0000 29.2 +++ b/xen/include/asm-x86/event.h Thu Mar 29 16:28:34 2007 +0000 29.3 @@ -20,10 +20,10 @@ static inline void vcpu_kick(struct vcpu 29.4 * locks) but the key insight is that each change will cause 29.5 * evtchn_upcall_pending to be polled. 29.6 * 29.7 - * NB2. We save VCPUF_running across the unblock to avoid a needless 29.8 + * NB2. We save the running flag across the unblock to avoid a needless 29.9 * IPI for domains that we IPI'd to unblock. 29.10 */ 29.11 - int running = test_bit(_VCPUF_running, &v->vcpu_flags); 29.12 + int running = v->is_running; 29.13 vcpu_unblock(v); 29.14 if ( running ) 29.15 smp_send_event_check_cpu(v->processor);
30.1 --- a/xen/include/asm-x86/i387.h Thu Mar 29 16:27:52 2007 +0000 30.2 +++ b/xen/include/asm-x86/i387.h Thu Mar 29 16:28:34 2007 +0000 30.3 @@ -18,9 +18,9 @@ extern void init_fpu(void); 30.4 extern void save_init_fpu(struct vcpu *v); 30.5 extern void restore_fpu(struct vcpu *v); 30.6 30.7 -#define unlazy_fpu(v) do { \ 30.8 - if ( test_bit(_VCPUF_fpu_dirtied, &(v)->vcpu_flags) ) \ 30.9 - save_init_fpu(v); \ 30.10 +#define unlazy_fpu(v) do { \ 30.11 + if ( (v)->fpu_dirtied ) \ 30.12 + save_init_fpu(v); \ 30.13 } while ( 0 ) 30.14 30.15 #define load_mxcsr(val) do { \ 30.16 @@ -33,9 +33,10 @@ static inline void setup_fpu(struct vcpu 30.17 /* Avoid recursion. */ 30.18 clts(); 30.19 30.20 - if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) 30.21 + if ( !v->fpu_dirtied ) 30.22 { 30.23 - if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) ) 30.24 + v->fpu_dirtied = 1; 30.25 + if ( v->fpu_initialised ) 30.26 restore_fpu(v); 30.27 else 30.28 init_fpu();
31.1 --- a/xen/include/xen/sched.h Thu Mar 29 16:27:52 2007 +0000 31.2 +++ b/xen/include/xen/sched.h Thu Mar 29 16:28:34 2007 +0000 31.3 @@ -100,6 +100,17 @@ struct vcpu 31.4 } runstate_guest; /* guest address */ 31.5 #endif 31.6 31.7 + /* Has the FPU been initialised? */ 31.8 + bool_t fpu_initialised; 31.9 + /* Has the FPU been used since it was last saved? */ 31.10 + bool_t fpu_dirtied; 31.11 + /* Is this VCPU polling any event channels (SCHEDOP_poll)? */ 31.12 + bool_t is_polling; 31.13 + /* Initialization completed for this VCPU? */ 31.14 + bool_t is_initialised; 31.15 + /* Currently running on a CPU? */ 31.16 + bool_t is_running; 31.17 + 31.18 unsigned long vcpu_flags; 31.19 31.20 spinlock_t pause_lock; 31.21 @@ -423,41 +434,26 @@ extern struct domain *domain_list; 31.22 /* 31.23 * Per-VCPU flags (vcpu_flags). 31.24 */ 31.25 - /* Has the FPU been initialised? */ 31.26 -#define _VCPUF_fpu_initialised 0 31.27 -#define VCPUF_fpu_initialised (1UL<<_VCPUF_fpu_initialised) 31.28 - /* Has the FPU been used since it was last saved? */ 31.29 -#define _VCPUF_fpu_dirtied 1 31.30 -#define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied) 31.31 /* Domain is blocked waiting for an event. */ 31.32 -#define _VCPUF_blocked 2 31.33 +#define _VCPUF_blocked 0 31.34 #define VCPUF_blocked (1UL<<_VCPUF_blocked) 31.35 - /* Currently running on a CPU? */ 31.36 -#define _VCPUF_running 3 31.37 -#define VCPUF_running (1UL<<_VCPUF_running) 31.38 - /* Initialization completed. */ 31.39 -#define _VCPUF_initialised 4 31.40 -#define VCPUF_initialised (1UL<<_VCPUF_initialised) 31.41 /* VCPU is offline. */ 31.42 -#define _VCPUF_down 5 31.43 +#define _VCPUF_down 1 31.44 #define VCPUF_down (1UL<<_VCPUF_down) 31.45 /* NMI callback pending for this VCPU? */ 31.46 -#define _VCPUF_nmi_pending 8 31.47 +#define _VCPUF_nmi_pending 2 31.48 #define VCPUF_nmi_pending (1UL<<_VCPUF_nmi_pending) 31.49 /* Avoid NMI reentry by allowing NMIs to be masked for short periods. */ 31.50 -#define _VCPUF_nmi_masked 9 31.51 +#define _VCPUF_nmi_masked 3 31.52 #define VCPUF_nmi_masked (1UL<<_VCPUF_nmi_masked) 31.53 - /* VCPU is polling a set of event channels (SCHEDOP_poll). */ 31.54 -#define _VCPUF_polling 10 31.55 -#define VCPUF_polling (1UL<<_VCPUF_polling) 31.56 /* VCPU is paused by the hypervisor? */ 31.57 -#define _VCPUF_paused 11 31.58 +#define _VCPUF_paused 4 31.59 #define VCPUF_paused (1UL<<_VCPUF_paused) 31.60 /* VCPU is blocked awaiting an event to be consumed by Xen. */ 31.61 -#define _VCPUF_blocked_in_xen 12 31.62 +#define _VCPUF_blocked_in_xen 5 31.63 #define VCPUF_blocked_in_xen (1UL<<_VCPUF_blocked_in_xen) 31.64 /* VCPU affinity has changed: migrating to a new CPU. */ 31.65 -#define _VCPUF_migrating 13 31.66 +#define _VCPUF_migrating 6 31.67 #define VCPUF_migrating (1UL<<_VCPUF_migrating) 31.68 31.69 static inline int vcpu_runnable(struct vcpu *v)