debuggers.hg
changeset 11257:80c5350a68f1
[XEN] Clean up some x86 bootstrap code. Replace some CPU iterators
with for_each_cpu() -- we want to ensure that per_cpu areas are
accessed only for cpus in cpu_possible_map.
Signed-off-by: Keir Fraser <keir@xensource.com>
with for_each_cpu() -- we want to ensure that per_cpu areas are
accessed only for cpus in cpu_possible_map.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Tue Aug 22 11:19:48 2006 +0100 (2006-08-22) |
parents | f681ffc9b01a |
children | 56b05c672033 |
files | xen/arch/x86/domain.c xen/arch/x86/setup.c xen/arch/x86/smpboot.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/common/sched_sedf.c xen/common/schedule.c xen/common/timer.c xen/include/asm-x86/page.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Mon Aug 21 12:05:11 2006 -0400 1.2 +++ b/xen/arch/x86/domain.c Tue Aug 22 11:19:48 2006 +0100 1.3 @@ -125,8 +125,15 @@ struct vcpu *alloc_vcpu_struct(struct do 1.4 1.5 v->arch.flags = TF_kernel_mode; 1.6 1.7 - v->arch.schedule_tail = is_idle_domain(d) ? 1.8 - continue_idle_domain : continue_nonidle_domain; 1.9 + if ( is_idle_domain(d) ) 1.10 + { 1.11 + v->arch.schedule_tail = continue_idle_domain; 1.12 + v->arch.cr3 = __pa(idle_pg_table); 1.13 + } 1.14 + else 1.15 + { 1.16 + v->arch.schedule_tail = continue_nonidle_domain; 1.17 + } 1.18 1.19 v->arch.ctxt_switch_from = paravirt_ctxt_switch_from; 1.20 v->arch.ctxt_switch_to = paravirt_ctxt_switch_to;
2.1 --- a/xen/arch/x86/setup.c Mon Aug 21 12:05:11 2006 -0400 2.2 +++ b/xen/arch/x86/setup.c Tue Aug 22 11:19:48 2006 +0100 2.3 @@ -190,10 +190,26 @@ static void percpu_free_unused_areas(voi 2.4 __pa(__per_cpu_end)); 2.5 } 2.6 2.7 +static void init_idle_domain(void) 2.8 +{ 2.9 + struct domain *idle_domain; 2.10 + 2.11 + /* Domain creation requires that scheduler structures are initialised. */ 2.12 + scheduler_init(); 2.13 + 2.14 + idle_domain = domain_create(IDLE_DOMAIN_ID); 2.15 + if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) ) 2.16 + BUG(); 2.17 + 2.18 + set_current(idle_domain->vcpu[0]); 2.19 + idle_vcpu[0] = this_cpu(curr_vcpu) = current; 2.20 + 2.21 + setup_idle_pagetable(); 2.22 +} 2.23 + 2.24 void __init __start_xen(multiboot_info_t *mbi) 2.25 { 2.26 char __cmdline[] = "", *cmdline = __cmdline; 2.27 - struct domain *idle_domain; 2.28 unsigned long _initrd_start = 0, _initrd_len = 0; 2.29 unsigned int initrdidx = 1; 2.30 module_t *mod = (module_t *)__va(mbi->mods_addr); 2.31 @@ -212,6 +228,7 @@ void __init __start_xen(multiboot_info_t 2.32 cmdline_parse(cmdline); 2.33 2.34 set_current((struct vcpu *)0xfffff000); /* debug sanity */ 2.35 + idle_vcpu[0] = current; 2.36 set_processor_id(0); /* needed early, for smp_processor_id() */ 2.37 2.38 smp_prepare_boot_cpu(); 2.39 @@ -437,16 +454,6 @@ void __init __start_xen(multiboot_info_t 2.40 2.41 early_cpu_init(); 2.42 2.43 - scheduler_init(); 2.44 - 2.45 - idle_domain = domain_create(IDLE_DOMAIN_ID); 2.46 - if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) ) 2.47 - BUG(); 2.48 - 2.49 - set_current(idle_domain->vcpu[0]); 2.50 - this_cpu(curr_vcpu) = idle_domain->vcpu[0]; 2.51 - idle_vcpu[0] = current; 2.52 - 2.53 paging_init(); 2.54 2.55 /* Unmap the first page of CPU0's stack. */ 2.56 @@ -477,6 +484,8 @@ void __init __start_xen(multiboot_info_t 2.57 2.58 init_IRQ(); 2.59 2.60 + init_idle_domain(); 2.61 + 2.62 trap_init(); 2.63 2.64 timer_init();
3.1 --- a/xen/arch/x86/smpboot.c Mon Aug 21 12:05:11 2006 -0400 3.2 +++ b/xen/arch/x86/smpboot.c Tue Aug 22 11:19:48 2006 +0100 3.3 @@ -896,8 +896,6 @@ static int __devinit do_boot_cpu(int api 3.4 v = alloc_idle_vcpu(cpu); 3.5 BUG_ON(v == NULL); 3.6 3.7 - v->arch.cr3 = __pa(idle_pg_table); 3.8 - 3.9 /* start_eip had better be page-aligned! */ 3.10 start_eip = setup_trampoline(); 3.11
4.1 --- a/xen/arch/x86/x86_32/mm.c Mon Aug 21 12:05:11 2006 -0400 4.2 +++ b/xen/arch/x86/x86_32/mm.c Tue Aug 22 11:19:48 2006 +0100 4.3 @@ -75,8 +75,6 @@ void __init paging_init(void) 4.4 printk("PAE disabled.\n"); 4.5 #endif 4.6 4.7 - idle_vcpu[0]->arch.cr3 = __pa(idle_pg_table); 4.8 - 4.9 if ( cpu_has_pge ) 4.10 { 4.11 /* Suitable Xen mapping can be GLOBAL. */ 4.12 @@ -120,8 +118,12 @@ void __init paging_init(void) 4.13 idle_pg_table_l2[l2_linear_offset(IOREMAP_VIRT_START) + i] = 4.14 l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR); 4.15 } 4.16 +} 4.17 4.18 - /* Install per-domain mappings for idle domain. */ 4.19 +void __init setup_idle_pagetable(void) 4.20 +{ 4.21 + int i; 4.22 + 4.23 for ( i = 0; i < PDPT_L2_ENTRIES; i++ ) 4.24 idle_pg_table_l2[l2_linear_offset(PERDOMAIN_VIRT_START) + i] = 4.25 l2e_from_page(virt_to_page(idle_vcpu[0]->domain->
5.1 --- a/xen/arch/x86/x86_64/mm.c Mon Aug 21 12:05:11 2006 -0400 5.2 +++ b/xen/arch/x86/x86_64/mm.c Tue Aug 22 11:19:48 2006 +0100 5.3 @@ -81,8 +81,6 @@ void __init paging_init(void) 5.4 l2_pgentry_t *l2_ro_mpt; 5.5 struct page_info *pg; 5.6 5.7 - idle_vcpu[0]->arch.cr3 = __pa(idle_pg_table); 5.8 - 5.9 /* Create user-accessible L2 directory to map the MPT for guests. */ 5.10 l3_ro_mpt = alloc_xenheap_page(); 5.11 clear_page(l3_ro_mpt); 5.12 @@ -121,7 +119,10 @@ void __init paging_init(void) 5.13 /* Set up linear page table mapping. */ 5.14 idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] = 5.15 l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR); 5.16 +} 5.17 5.18 +void __init setup_idle_pagetable(void) 5.19 +{ 5.20 /* Install per-domain mappings for idle domain. */ 5.21 idle_pg_table[l4_table_offset(PERDOMAIN_VIRT_START)] = 5.22 l4e_from_page(
6.1 --- a/xen/common/sched_sedf.c Mon Aug 21 12:05:11 2006 -0400 6.2 +++ b/xen/common/sched_sedf.c Tue Aug 22 11:19:48 2006 +0100 6.3 @@ -1301,16 +1301,9 @@ static int sedf_adjust_weights(struct sc 6.4 { 6.5 struct vcpu *p; 6.6 struct domain *d; 6.7 - int sumw[NR_CPUS]; 6.8 - s_time_t sumt[NR_CPUS]; 6.9 - int cpu; 6.10 + int sumw[NR_CPUS] = { 0 }; 6.11 + s_time_t sumt[NR_CPUS] = { 0 }; 6.12 6.13 - for ( cpu = 0; cpu < NR_CPUS; cpu++ ) 6.14 - { 6.15 - sumw[cpu] = 0; 6.16 - sumt[cpu] = 0; 6.17 - } 6.18 - 6.19 /* Sum across all weights. */ 6.20 for_each_domain( d ) 6.21 {
7.1 --- a/xen/common/schedule.c Mon Aug 21 12:05:11 2006 -0400 7.2 +++ b/xen/common/schedule.c Tue Aug 22 11:19:48 2006 +0100 7.3 @@ -633,7 +633,7 @@ void __init scheduler_init(void) 7.4 7.5 open_softirq(SCHEDULE_SOFTIRQ, __enter_scheduler); 7.6 7.7 - for ( i = 0; i < NR_CPUS; i++ ) 7.8 + for_each_cpu ( i ) 7.9 { 7.10 spin_lock_init(&per_cpu(schedule_data, i).schedule_lock); 7.11 init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
8.1 --- a/xen/common/timer.c Mon Aug 21 12:05:11 2006 -0400 8.2 +++ b/xen/common/timer.c Tue Aug 22 11:19:48 2006 +0100 8.3 @@ -382,7 +382,7 @@ void __init timer_init(void) 8.4 SET_HEAP_SIZE(&dummy_heap, 0); 8.5 SET_HEAP_LIMIT(&dummy_heap, 0); 8.6 8.7 - for ( i = 0; i < NR_CPUS; i++ ) 8.8 + for_each_cpu ( i ) 8.9 { 8.10 spin_lock_init(&per_cpu(timers, i).lock); 8.11 per_cpu(timers, i).heap = &dummy_heap;
9.1 --- a/xen/include/asm-x86/page.h Mon Aug 21 12:05:11 2006 -0400 9.2 +++ b/xen/include/asm-x86/page.h Tue Aug 22 11:19:48 2006 +0100 9.3 @@ -255,7 +255,8 @@ extern l2_pgentry_t idle_pg_table_l2[R 9.4 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES]; 9.5 extern l2_pgentry_t idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES]; 9.6 #endif 9.7 -extern void paging_init(void); 9.8 +void paging_init(void); 9.9 +void setup_idle_pagetable(void); 9.10 #endif 9.11 9.12 #define __pge_off() \