debuggers.hg
changeset 20864:508f457aa439
x86 hvm: Pre-allocate per-cpu HVM memory before bringing CPUs online
after boot. Avoids doing the allocations on the CPU itself, while in a
not-fully-online state and with irqs disabled. This way we avoid
assertions about irqs being disabled in e.g., tlb flush logic.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
after boot. Avoids doing the allocations on the CPU itself, while in a
not-fully-online state and with irqs disabled. This way we avoid
assertions about irqs being disabled in e.g., tlb flush logic.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Jan 18 10:35:36 2010 +0000 (2010-01-18) |
parents | 7a8cee80597e |
children | 618b3597603c |
files | xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/smpboot.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vmx/vmcs.h |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Sun Jan 17 18:20:04 2010 +0000 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Mon Jan 18 10:35:36 2010 +0000 1.3 @@ -809,6 +809,16 @@ static int svm_do_pmu_interrupt(struct c 1.4 return 0; 1.5 } 1.6 1.7 +static int svm_cpu_prepare(unsigned int cpu) 1.8 +{ 1.9 + if ( ((hsa[cpu] == NULL) && 1.10 + ((hsa[cpu] = alloc_host_save_area()) == NULL)) || 1.11 + ((root_vmcb[cpu] == NULL) && 1.12 + ((root_vmcb[cpu] = alloc_vmcb()) == NULL)) ) 1.13 + return -ENOMEM; 1.14 + return 0; 1.15 +} 1.16 + 1.17 static int svm_cpu_up(struct cpuinfo_x86 *c) 1.18 { 1.19 u32 eax, edx, phys_hsa_lo, phys_hsa_hi; 1.20 @@ -823,10 +833,7 @@ static int svm_cpu_up(struct cpuinfo_x86 1.21 return 0; 1.22 } 1.23 1.24 - if ( ((hsa[cpu] == NULL) && 1.25 - ((hsa[cpu] = alloc_host_save_area()) == NULL)) || 1.26 - ((root_vmcb[cpu] == NULL) && 1.27 - ((root_vmcb[cpu] = alloc_vmcb()) == NULL)) ) 1.28 + if ( svm_cpu_prepare(cpu) != 0 ) 1.29 return 0; 1.30 1.31 write_efer(read_efer() | EFER_SVME); 1.32 @@ -1231,6 +1238,7 @@ static void svm_invlpg_intercept(unsigne 1.33 1.34 static struct hvm_function_table __read_mostly svm_function_table = { 1.35 .name = "SVM", 1.36 + .cpu_prepare = svm_cpu_prepare, 1.37 .cpu_down = svm_cpu_down, 1.38 .domain_initialise = svm_domain_initialise, 1.39 .domain_destroy = svm_domain_destroy,
2.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Sun Jan 17 18:20:04 2010 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Mon Jan 18 10:35:36 2010 +0000 2.3 @@ -320,6 +320,19 @@ static void vmx_load_vmcs(struct vcpu *v 2.4 local_irq_restore(flags); 2.5 } 2.6 2.7 +int vmx_cpu_prepare(unsigned int cpu) 2.8 +{ 2.9 + if ( per_cpu(host_vmcs, cpu) != NULL ) 2.10 + return 0; 2.11 + 2.12 + per_cpu(host_vmcs, cpu) = vmx_alloc_vmcs(); 2.13 + if ( per_cpu(host_vmcs, cpu) != NULL ) 2.14 + return 0; 2.15 + 2.16 + printk("CPU%d: Could not allocate host VMCS\n", cpu); 2.17 + return -ENOMEM; 2.18 +} 2.19 + 2.20 int vmx_cpu_up(void) 2.21 { 2.22 u32 eax, edx; 2.23 @@ -367,15 +380,8 @@ int vmx_cpu_up(void) 2.24 2.25 INIT_LIST_HEAD(&this_cpu(active_vmcs_list)); 2.26 2.27 - if ( this_cpu(host_vmcs) == NULL ) 2.28 - { 2.29 - this_cpu(host_vmcs) = vmx_alloc_vmcs(); 2.30 - if ( this_cpu(host_vmcs) == NULL ) 2.31 - { 2.32 - printk("CPU%d: Could not allocate host VMCS\n", cpu); 2.33 - return 0; 2.34 - } 2.35 - } 2.36 + if ( vmx_cpu_prepare(cpu) != 0 ) 2.37 + return 0; 2.38 2.39 switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) ) 2.40 {
3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Sun Jan 17 18:20:04 2010 +0000 3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Jan 18 10:35:36 2010 +0000 3.3 @@ -1377,6 +1377,7 @@ static void vmx_set_info_guest(struct vc 3.4 3.5 static struct hvm_function_table __read_mostly vmx_function_table = { 3.6 .name = "VMX", 3.7 + .cpu_prepare = vmx_cpu_prepare, 3.8 .domain_initialise = vmx_domain_initialise, 3.9 .domain_destroy = vmx_domain_destroy, 3.10 .vcpu_initialise = vmx_vcpu_initialise,
4.1 --- a/xen/arch/x86/smpboot.c Sun Jan 17 18:20:04 2010 +0000 4.2 +++ b/xen/arch/x86/smpboot.c Mon Jan 18 10:35:36 2010 +0000 4.3 @@ -1518,7 +1518,11 @@ int cpu_add(uint32_t apic_id, uint32_t a 4.4 4.5 int __devinit __cpu_up(unsigned int cpu) 4.6 { 4.7 - int ret = 0; 4.8 + int ret; 4.9 + 4.10 + ret = hvm_cpu_prepare(cpu); 4.11 + if (ret) 4.12 + return ret; 4.13 4.14 /* 4.15 * We do warm boot only on cpus that had booted earlier
5.1 --- a/xen/include/asm-x86/hvm/hvm.h Sun Jan 17 18:20:04 2010 +0000 5.2 +++ b/xen/include/asm-x86/hvm/hvm.h Mon Jan 18 10:35:36 2010 +0000 5.3 @@ -111,6 +111,7 @@ struct hvm_function_table { 5.4 int (*event_pending)(struct vcpu *v); 5.5 int (*do_pmu_interrupt)(struct cpu_user_regs *regs); 5.6 5.7 + int (*cpu_prepare)(unsigned int cpu); 5.8 int (*cpu_up)(void); 5.9 void (*cpu_down)(void); 5.10 5.11 @@ -290,11 +291,15 @@ uint8_t hvm_combine_hw_exceptions(uint8_ 5.12 void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable); 5.13 int hvm_gtsc_need_scale(struct domain *d); 5.14 5.15 +static inline int 5.16 +hvm_cpu_prepare(unsigned int cpu) 5.17 +{ 5.18 + return (hvm_funcs.cpu_prepare ? hvm_funcs.cpu_prepare(cpu) : 0); 5.19 +} 5.20 + 5.21 static inline int hvm_cpu_up(void) 5.22 { 5.23 - if ( hvm_funcs.cpu_up ) 5.24 - return hvm_funcs.cpu_up(); 5.25 - return 1; 5.26 + return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 1); 5.27 } 5.28 5.29 static inline void hvm_cpu_down(void)
6.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Sun Jan 17 18:20:04 2010 +0000 6.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Mon Jan 18 10:35:36 2010 +0000 6.3 @@ -26,6 +26,7 @@ 6.4 extern void start_vmx(void); 6.5 extern void vmcs_dump_vcpu(struct vcpu *v); 6.6 extern void setup_vmcs_dump(void); 6.7 +extern int vmx_cpu_prepare(unsigned int cpu); 6.8 extern int vmx_cpu_up(void); 6.9 extern void vmx_cpu_down(void); 6.10