debuggers.hg
changeset 16601:d4a3479e68ce
x86: Respect e820 map even below 16MB.
NB. Even with this patch, x86/32 still statically allocates the range
1MB-12MB. This can be changed if there really are platforms that need
stuff to persist in that range after the OS starts to boot.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
NB. Even with this patch, x86/32 still statically allocates the range
1MB-12MB. This can be changed if there really are platforms that need
stuff to persist in that range after the OS starts to boot.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Dec 07 18:24:33 2007 +0000 (2007-12-07) |
parents | 35890b260971 |
children | 180e4a77e805 |
files | xen/arch/x86/setup.c |
line diff
1.1 --- a/xen/arch/x86/setup.c Fri Dec 07 17:05:15 2007 +0000 1.2 +++ b/xen/arch/x86/setup.c Fri Dec 07 18:24:33 2007 +0000 1.3 @@ -346,6 +346,32 @@ static void __init parse_video_info(void 1.4 } 1.5 } 1.6 1.7 +void __init kexec_reserve_area(struct e820map *e820) 1.8 +{ 1.9 + unsigned long kdump_start = kexec_crash_area.start; 1.10 + unsigned long kdump_size = kexec_crash_area.size; 1.11 + static int is_reserved = 0; 1.12 + 1.13 + kdump_size = (kdump_size + PAGE_SIZE - 1) & PAGE_MASK; 1.14 + 1.15 + if ( (kdump_start == 0) || (kdump_size == 0) || is_reserved ) 1.16 + return; 1.17 + 1.18 + is_reserved = 1; 1.19 + 1.20 + if ( !reserve_e820_ram(e820, kdump_start, kdump_size) ) 1.21 + { 1.22 + printk("Kdump: DISABLED (failed to reserve %luMB (%lukB) at 0x%lx)" 1.23 + "\n", kdump_size >> 20, kdump_size >> 10, kdump_start); 1.24 + kexec_crash_area.start = kexec_crash_area.size = 0; 1.25 + } 1.26 + else 1.27 + { 1.28 + printk("Kdump: %luMB (%lukB) at 0x%lx\n", 1.29 + kdump_size >> 20, kdump_size >> 10, kdump_start); 1.30 + } 1.31 +} 1.32 + 1.33 void init_done(void) 1.34 { 1.35 extern char __init_begin[], __init_end[]; 1.36 @@ -571,27 +597,11 @@ void __init __start_xen(unsigned long mb 1.37 /* Sanitise the raw E820 map to produce a final clean version. */ 1.38 max_page = init_e820(memmap_type, e820_raw, &e820_raw_nr); 1.39 1.40 - /* 1.41 - * Create a temporary copy of the E820 map. Truncate it to above 16MB 1.42 - * as anything below that is already mapped and has a statically-allocated 1.43 - * purpose. 1.44 - */ 1.45 + /* Create a temporary copy of the E820 map. */ 1.46 memcpy(&boot_e820, &e820, sizeof(e820)); 1.47 - for ( i = 0; i < boot_e820.nr_map; i++ ) 1.48 - { 1.49 - uint64_t s, e, min = 16 << 20; /* 16MB */ 1.50 - s = boot_e820.map[i].addr; 1.51 - e = boot_e820.map[i].addr + boot_e820.map[i].size; 1.52 - if ( s >= min ) 1.53 - continue; 1.54 - if ( e > min ) 1.55 - { 1.56 - boot_e820.map[i].addr = min; 1.57 - boot_e820.map[i].size = e - min; 1.58 - } 1.59 - else 1.60 - boot_e820.map[i].type = E820_RESERVED; 1.61 - } 1.62 + 1.63 + /* Early kexec reservation (explicit static start address). */ 1.64 + kexec_reserve_area(&boot_e820); 1.65 1.66 /* 1.67 * Iterate backwards over all superpage-aligned RAM regions. 1.68 @@ -611,9 +621,10 @@ void __init __start_xen(unsigned long mb 1.69 { 1.70 uint64_t s, e, mask = (1UL << L2_PAGETABLE_SHIFT) - 1; 1.71 1.72 - /* Superpage-aligned chunks up to BOOTSTRAP_DIRECTMAP_END, please. */ 1.73 + /* Superpage-aligned chunks from 16MB to BOOTSTRAP_DIRECTMAP_END. */ 1.74 s = (boot_e820.map[i].addr + mask) & ~mask; 1.75 e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask; 1.76 + s = max_t(uint64_t, s, 16 << 20); 1.77 e = min_t(uint64_t, e, BOOTSTRAP_DIRECTMAP_END); 1.78 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) ) 1.79 continue; 1.80 @@ -716,10 +727,7 @@ void __init __start_xen(unsigned long mb 1.81 EARLY_FAIL("Not enough memory to relocate the dom0 kernel image.\n"); 1.82 reserve_e820_ram(&boot_e820, initial_images_start, initial_images_end); 1.83 1.84 - /* 1.85 - * With modules (and Xen itself, on x86/64) relocated out of the way, we 1.86 - * can now initialise the boot allocator with some memory. 1.87 - */ 1.88 + /* Initialise Xen heap and boot heap. */ 1.89 xenheap_phys_start = init_boot_allocator(__pa(&_end)); 1.90 xenheap_phys_end = opt_xenheap_megabytes << 20; 1.91 #if defined(CONFIG_X86_64) 1.92 @@ -728,30 +736,10 @@ void __init __start_xen(unsigned long mb 1.93 xenheap_phys_end += xen_phys_start; 1.94 reserve_e820_ram(&boot_e820, xen_phys_start, 1.95 xen_phys_start + (opt_xenheap_megabytes<<20)); 1.96 - init_boot_pages(1<<20, 16<<20); /* Initial seed: 15MB */ 1.97 -#else 1.98 - init_boot_pages(xenheap_phys_end, 16<<20); /* Initial seed: 4MB */ 1.99 #endif 1.100 1.101 - if ( kexec_crash_area.size != 0 ) 1.102 - { 1.103 - unsigned long kdump_start = kexec_crash_area.start; 1.104 - unsigned long kdump_size = kexec_crash_area.size; 1.105 - 1.106 - kdump_size = (kdump_size + PAGE_SIZE - 1) & PAGE_MASK; 1.107 - 1.108 - if ( !reserve_e820_ram(&boot_e820, kdump_start, kdump_size) ) 1.109 - { 1.110 - printk("Kdump: DISABLED (failed to reserve %luMB (%lukB) at 0x%lx)" 1.111 - "\n", kdump_size >> 20, kdump_size >> 10, kdump_start); 1.112 - kexec_crash_area.start = kexec_crash_area.size = 0; 1.113 - } 1.114 - else 1.115 - { 1.116 - printk("Kdump: %luMB (%lukB) at 0x%lx\n", 1.117 - kdump_size >> 20, kdump_size >> 10, kdump_start); 1.118 - } 1.119 - } 1.120 + /* Late kexec reservation (dynamic start address). */ 1.121 + kexec_reserve_area(&boot_e820); 1.122 1.123 /* 1.124 * With the boot allocator now seeded, we can walk every RAM region and 1.125 @@ -760,25 +748,40 @@ void __init __start_xen(unsigned long mb 1.126 */ 1.127 for ( i = 0; i < boot_e820.nr_map; i++ ) 1.128 { 1.129 - uint64_t s, e, map_e, mask = PAGE_SIZE - 1; 1.130 + uint64_t s, e, map_s, map_e, mask = PAGE_SIZE - 1; 1.131 1.132 /* Only page alignment required now. */ 1.133 s = (boot_e820.map[i].addr + mask) & ~mask; 1.134 e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask; 1.135 +#if defined(CONFIG_X86_32) 1.136 + s = max_t(uint64_t, s, xenheap_phys_end); 1.137 +#else 1.138 + s = max_t(uint64_t, s, 1<<20); 1.139 +#endif 1.140 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) ) 1.141 continue; 1.142 1.143 - /* Perform the mapping (truncated in 32-bit mode). */ 1.144 + /* Need to create mappings above 16MB. */ 1.145 + map_s = max_t(uint64_t, s, 16<<20); 1.146 map_e = e; 1.147 -#if defined(CONFIG_X86_32) 1.148 +#if defined(CONFIG_X86_32) /* mappings are truncated on x86_32 */ 1.149 map_e = min_t(uint64_t, map_e, BOOTSTRAP_DIRECTMAP_END); 1.150 #endif 1.151 - if ( s < map_e ) 1.152 + 1.153 + /* Pass mapped memory to allocator /before/ creating new mappings. */ 1.154 + if ( s < map_s ) 1.155 + init_boot_pages(s, map_s); 1.156 + 1.157 + /* Create new mappings /before/ passing memory to the allocator. */ 1.158 + if ( map_s < map_e ) 1.159 map_pages_to_xen( 1.160 - (unsigned long)maddr_to_bootstrap_virt(s), 1.161 - s >> PAGE_SHIFT, (map_e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR); 1.162 + (unsigned long)maddr_to_bootstrap_virt(map_s), 1.163 + map_s >> PAGE_SHIFT, (map_e-map_s) >> PAGE_SHIFT, 1.164 + PAGE_HYPERVISOR); 1.165 1.166 - init_boot_pages(s, e); 1.167 + /* Pass remainder of this memory chunk to the allocator. */ 1.168 + if ( map_s < e ) 1.169 + init_boot_pages(map_s, e); 1.170 } 1.171 1.172 memguard_init();