debuggers.hg
changeset 11260:cd4e7ace4e58
merge.
author | Ian Campbell <ian.campbell@xensource.com> |
---|---|
date | Tue Aug 22 11:34:46 2006 +0100 (2006-08-22) |
parents | a58ffedb59ce 56b05c672033 |
children | a4550b748840 |
files |
line diff
1.1 --- a/xen/arch/x86/domain.c Tue Aug 22 11:30:13 2006 +0100 1.2 +++ b/xen/arch/x86/domain.c Tue Aug 22 11:34:46 2006 +0100 1.3 @@ -125,8 +125,15 @@ struct vcpu *alloc_vcpu_struct(struct do 1.4 1.5 v->arch.flags = TF_kernel_mode; 1.6 1.7 - v->arch.schedule_tail = is_idle_domain(d) ? 1.8 - continue_idle_domain : continue_nonidle_domain; 1.9 + if ( is_idle_domain(d) ) 1.10 + { 1.11 + v->arch.schedule_tail = continue_idle_domain; 1.12 + v->arch.cr3 = __pa(idle_pg_table); 1.13 + } 1.14 + else 1.15 + { 1.16 + v->arch.schedule_tail = continue_nonidle_domain; 1.17 + } 1.18 1.19 v->arch.ctxt_switch_from = paravirt_ctxt_switch_from; 1.20 v->arch.ctxt_switch_to = paravirt_ctxt_switch_to;
2.1 --- a/xen/arch/x86/setup.c Tue Aug 22 11:30:13 2006 +0100 2.2 +++ b/xen/arch/x86/setup.c Tue Aug 22 11:34:46 2006 +0100 2.3 @@ -160,19 +160,29 @@ void discard_initial_images(void) 2.4 2.5 extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[]; 2.6 2.7 -static void percpu_init_areas(void) 2.8 +static void __init percpu_init_areas(void) 2.9 { 2.10 unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start; 2.11 2.12 BUG_ON(data_size > PERCPU_SIZE); 2.13 2.14 - for ( i = 1; i < NR_CPUS; i++ ) 2.15 - memcpy(__per_cpu_start + (i << PERCPU_SHIFT), 2.16 - __per_cpu_start, 2.17 - data_size); 2.18 + for_each_cpu ( i ) 2.19 + { 2.20 + memguard_unguard_range(__per_cpu_start + (i << PERCPU_SHIFT), 2.21 + 1 << PERCPU_SHIFT); 2.22 + if ( i != 0 ) 2.23 + memcpy(__per_cpu_start + (i << PERCPU_SHIFT), 2.24 + __per_cpu_start, 2.25 + data_size); 2.26 + } 2.27 } 2.28 2.29 -static void percpu_free_unused_areas(void) 2.30 +static void __init percpu_guard_areas(void) 2.31 +{ 2.32 + memguard_guard_range(__per_cpu_start, __per_cpu_end - __per_cpu_start); 2.33 +} 2.34 + 2.35 +static void __init percpu_free_unused_areas(void) 2.36 { 2.37 unsigned int i, first_unused; 2.38 2.39 @@ -186,14 +196,32 @@ static void percpu_free_unused_areas(voi 2.40 for ( ; i < NR_CPUS; i++ ) 2.41 BUG_ON(cpu_online(i)); 2.42 2.43 +#ifndef MEMORY_GUARD 2.44 init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT), 2.45 __pa(__per_cpu_end)); 2.46 +#endif 2.47 +} 2.48 + 2.49 +static void __init init_idle_domain(void) 2.50 +{ 2.51 + struct domain *idle_domain; 2.52 + 2.53 + /* Domain creation requires that scheduler structures are initialised. */ 2.54 + scheduler_init(); 2.55 + 2.56 + idle_domain = domain_create(IDLE_DOMAIN_ID); 2.57 + if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) ) 2.58 + BUG(); 2.59 + 2.60 + set_current(idle_domain->vcpu[0]); 2.61 + idle_vcpu[0] = this_cpu(curr_vcpu) = current; 2.62 + 2.63 + setup_idle_pagetable(); 2.64 } 2.65 2.66 void __init __start_xen(multiboot_info_t *mbi) 2.67 { 2.68 char __cmdline[] = "", *cmdline = __cmdline; 2.69 - struct domain *idle_domain; 2.70 unsigned long _initrd_start = 0, _initrd_len = 0; 2.71 unsigned int initrdidx = 1; 2.72 module_t *mod = (module_t *)__va(mbi->mods_addr); 2.73 @@ -212,6 +240,7 @@ void __init __start_xen(multiboot_info_t 2.74 cmdline_parse(cmdline); 2.75 2.76 set_current((struct vcpu *)0xfffff000); /* debug sanity */ 2.77 + idle_vcpu[0] = current; 2.78 set_processor_id(0); /* needed early, for smp_processor_id() */ 2.79 2.80 smp_prepare_boot_cpu(); 2.81 @@ -243,8 +272,6 @@ void __init __start_xen(multiboot_info_t 2.82 EARLY_FAIL(); 2.83 } 2.84 2.85 - percpu_init_areas(); 2.86 - 2.87 xenheap_phys_end = opt_xenheap_megabytes << 20; 2.88 2.89 if ( mbi->flags & MBI_MEMMAP ) 2.90 @@ -382,6 +409,7 @@ void __init __start_xen(multiboot_info_t 2.91 } 2.92 2.93 memguard_init(); 2.94 + percpu_guard_areas(); 2.95 2.96 printk("System RAM: %luMB (%lukB)\n", 2.97 nr_pages >> (20 - PAGE_SHIFT), 2.98 @@ -437,16 +465,6 @@ void __init __start_xen(multiboot_info_t 2.99 2.100 early_cpu_init(); 2.101 2.102 - scheduler_init(); 2.103 - 2.104 - idle_domain = domain_create(IDLE_DOMAIN_ID); 2.105 - if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) ) 2.106 - BUG(); 2.107 - 2.108 - set_current(idle_domain->vcpu[0]); 2.109 - this_cpu(curr_vcpu) = idle_domain->vcpu[0]; 2.110 - idle_vcpu[0] = current; 2.111 - 2.112 paging_init(); 2.113 2.114 /* Unmap the first page of CPU0's stack. */ 2.115 @@ -470,13 +488,17 @@ void __init __start_xen(multiboot_info_t 2.116 acpi_boot_table_init(); 2.117 acpi_boot_init(); 2.118 2.119 - if ( smp_found_config ) 2.120 + if ( smp_found_config ) 2.121 get_smp_config(); 2.122 2.123 init_apic_mappings(); 2.124 2.125 init_IRQ(); 2.126 2.127 + percpu_init_areas(); 2.128 + 2.129 + init_idle_domain(); 2.130 + 2.131 trap_init(); 2.132 2.133 timer_init();
3.1 --- a/xen/arch/x86/smpboot.c Tue Aug 22 11:30:13 2006 +0100 3.2 +++ b/xen/arch/x86/smpboot.c Tue Aug 22 11:34:46 2006 +0100 3.3 @@ -896,8 +896,6 @@ static int __devinit do_boot_cpu(int api 3.4 v = alloc_idle_vcpu(cpu); 3.5 BUG_ON(v == NULL); 3.6 3.7 - v->arch.cr3 = __pa(idle_pg_table); 3.8 - 3.9 /* start_eip had better be page-aligned! */ 3.10 start_eip = setup_trampoline(); 3.11
4.1 --- a/xen/arch/x86/x86_32/mm.c Tue Aug 22 11:30:13 2006 +0100 4.2 +++ b/xen/arch/x86/x86_32/mm.c Tue Aug 22 11:34:46 2006 +0100 4.3 @@ -75,8 +75,6 @@ void __init paging_init(void) 4.4 printk("PAE disabled.\n"); 4.5 #endif 4.6 4.7 - idle_vcpu[0]->arch.cr3 = __pa(idle_pg_table); 4.8 - 4.9 if ( cpu_has_pge ) 4.10 { 4.11 /* Suitable Xen mapping can be GLOBAL. */ 4.12 @@ -120,8 +118,12 @@ void __init paging_init(void) 4.13 idle_pg_table_l2[l2_linear_offset(IOREMAP_VIRT_START) + i] = 4.14 l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR); 4.15 } 4.16 +} 4.17 4.18 - /* Install per-domain mappings for idle domain. */ 4.19 +void __init setup_idle_pagetable(void) 4.20 +{ 4.21 + int i; 4.22 + 4.23 for ( i = 0; i < PDPT_L2_ENTRIES; i++ ) 4.24 idle_pg_table_l2[l2_linear_offset(PERDOMAIN_VIRT_START) + i] = 4.25 l2e_from_page(virt_to_page(idle_vcpu[0]->domain->
5.1 --- a/xen/arch/x86/x86_64/mm.c Tue Aug 22 11:30:13 2006 +0100 5.2 +++ b/xen/arch/x86/x86_64/mm.c Tue Aug 22 11:34:46 2006 +0100 5.3 @@ -81,8 +81,6 @@ void __init paging_init(void) 5.4 l2_pgentry_t *l2_ro_mpt; 5.5 struct page_info *pg; 5.6 5.7 - idle_vcpu[0]->arch.cr3 = __pa(idle_pg_table); 5.8 - 5.9 /* Create user-accessible L2 directory to map the MPT for guests. */ 5.10 l3_ro_mpt = alloc_xenheap_page(); 5.11 clear_page(l3_ro_mpt); 5.12 @@ -121,7 +119,10 @@ void __init paging_init(void) 5.13 /* Set up linear page table mapping. */ 5.14 idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] = 5.15 l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR); 5.16 +} 5.17 5.18 +void __init setup_idle_pagetable(void) 5.19 +{ 5.20 /* Install per-domain mappings for idle domain. */ 5.21 idle_pg_table[l4_table_offset(PERDOMAIN_VIRT_START)] = 5.22 l4e_from_page(
6.1 --- a/xen/common/sched_sedf.c Tue Aug 22 11:30:13 2006 +0100 6.2 +++ b/xen/common/sched_sedf.c Tue Aug 22 11:34:46 2006 +0100 6.3 @@ -1301,16 +1301,9 @@ static int sedf_adjust_weights(struct sc 6.4 { 6.5 struct vcpu *p; 6.6 struct domain *d; 6.7 - int sumw[NR_CPUS]; 6.8 - s_time_t sumt[NR_CPUS]; 6.9 - int cpu; 6.10 + int sumw[NR_CPUS] = { 0 }; 6.11 + s_time_t sumt[NR_CPUS] = { 0 }; 6.12 6.13 - for ( cpu = 0; cpu < NR_CPUS; cpu++ ) 6.14 - { 6.15 - sumw[cpu] = 0; 6.16 - sumt[cpu] = 0; 6.17 - } 6.18 - 6.19 /* Sum across all weights. */ 6.20 for_each_domain( d ) 6.21 {
7.1 --- a/xen/common/schedule.c Tue Aug 22 11:30:13 2006 +0100 7.2 +++ b/xen/common/schedule.c Tue Aug 22 11:34:46 2006 +0100 7.3 @@ -633,7 +633,7 @@ void __init scheduler_init(void) 7.4 7.5 open_softirq(SCHEDULE_SOFTIRQ, __enter_scheduler); 7.6 7.7 - for ( i = 0; i < NR_CPUS; i++ ) 7.8 + for_each_cpu ( i ) 7.9 { 7.10 spin_lock_init(&per_cpu(schedule_data, i).schedule_lock); 7.11 init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
8.1 --- a/xen/common/timer.c Tue Aug 22 11:30:13 2006 +0100 8.2 +++ b/xen/common/timer.c Tue Aug 22 11:34:46 2006 +0100 8.3 @@ -382,7 +382,7 @@ void __init timer_init(void) 8.4 SET_HEAP_SIZE(&dummy_heap, 0); 8.5 SET_HEAP_LIMIT(&dummy_heap, 0); 8.6 8.7 - for ( i = 0; i < NR_CPUS; i++ ) 8.8 + for_each_cpu ( i ) 8.9 { 8.10 spin_lock_init(&per_cpu(timers, i).lock); 8.11 per_cpu(timers, i).heap = &dummy_heap;
9.1 --- a/xen/include/asm-x86/page.h Tue Aug 22 11:30:13 2006 +0100 9.2 +++ b/xen/include/asm-x86/page.h Tue Aug 22 11:34:46 2006 +0100 9.3 @@ -255,7 +255,8 @@ extern l2_pgentry_t idle_pg_table_l2[R 9.4 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES]; 9.5 extern l2_pgentry_t idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES]; 9.6 #endif 9.7 -extern void paging_init(void); 9.8 +void paging_init(void); 9.9 +void setup_idle_pagetable(void); 9.10 #endif 9.11 9.12 #define __pge_off() \