debuggers.hg

changeset 19952:d6c1d7992f43

Replace boot-time free-pages bitmap with a region list.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jul 08 22:08:31 2009 +0100 (2009-07-08)
parents ef38784f9f85
children 4b6e4bb7b7b4
files xen/arch/ia64/xen/xenmem.c xen/arch/ia64/xen/xensetup.c xen/arch/x86/mm.c xen/arch/x86/setup.c xen/arch/x86/tboot.c xen/arch/x86/x86_64/mm.c xen/common/kexec.c xen/common/page_alloc.c xen/include/asm-x86/mm.h xen/include/xen/mm.h
line diff
     1.1 --- a/xen/arch/ia64/xen/xenmem.c	Wed Jul 08 16:47:58 2009 +0100
     1.2 +++ b/xen/arch/ia64/xen/xenmem.c	Wed Jul 08 22:08:31 2009 +0100
     1.3 @@ -87,8 +87,6 @@ alloc_dir_page(void)
     1.4  {
     1.5  	unsigned long mfn = alloc_boot_pages(1, 1);
     1.6  	unsigned long dir;
     1.7 -	if (!mfn)
     1.8 -		panic("Not enough memory for virtual frame table!\n");
     1.9  	++table_size;
    1.10  	dir = mfn << PAGE_SHIFT;
    1.11  	clear_page(__va(dir));
    1.12 @@ -101,8 +99,6 @@ alloc_table_page(unsigned long fill)
    1.13  	unsigned long mfn = alloc_boot_pages(1, 1);
    1.14  	unsigned long *table;
    1.15  	unsigned long i;
    1.16 -	if (!mfn)
    1.17 -		panic("Not enough memory for virtual frame table!\n");
    1.18  	++table_size;
    1.19  	table = (unsigned long *)__va((mfn << PAGE_SHIFT));
    1.20  	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
    1.21 @@ -245,8 +241,6 @@ void __init init_frametable(void)
    1.22  	 * address is identity mapped */
    1.23  	pfn = alloc_boot_pages(
    1.24              frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
    1.25 -	if (pfn == 0)
    1.26 -		panic("Not enough memory for frame table.\n");
    1.27  
    1.28  	frame_table = __va(pfn << PAGE_SHIFT);
    1.29  	memset(frame_table, 0, frame_table_size);
     2.1 --- a/xen/arch/ia64/xen/xensetup.c	Wed Jul 08 16:47:58 2009 +0100
     2.2 +++ b/xen/arch/ia64/xen/xensetup.c	Wed Jul 08 22:08:31 2009 +0100
     2.3 @@ -514,9 +514,7 @@ skip_move:
     2.4      efi_print();
     2.5      
     2.6      xen_heap_start = memguard_init(ia64_imva(&_end));
     2.7 -    printk("Before xen_heap_start: %p\n", xen_heap_start);
     2.8 -    xen_heap_start = __va(init_boot_allocator(__pa(xen_heap_start)));
     2.9 -    printk("After xen_heap_start: %p\n", xen_heap_start);
    2.10 +    printk("xen_heap_start: %p\n", xen_heap_start);
    2.11  
    2.12      efi_memmap_walk(filter_rsvd_memory, init_boot_pages);
    2.13      efi_memmap_walk(xen_count_pages, &nr_pages);
     3.1 --- a/xen/arch/x86/mm.c	Wed Jul 08 16:47:58 2009 +0100
     3.2 +++ b/xen/arch/x86/mm.c	Wed Jul 08 22:08:31 2009 +0100
     3.3 @@ -199,8 +199,6 @@ void __init init_frametable(void)
     3.4          while (nr_pages + 4 - i < page_step)
     3.5              page_step >>= PAGETABLE_ORDER;
     3.6          mfn = alloc_boot_pages(page_step, page_step);
     3.7 -        if ( mfn == 0 )
     3.8 -            panic("Not enough memory for frame table\n");
     3.9          map_pages_to_xen(
    3.10              FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
    3.11              mfn, page_step, PAGE_HYPERVISOR);
     4.1 --- a/xen/arch/x86/setup.c	Wed Jul 08 16:47:58 2009 +0100
     4.2 +++ b/xen/arch/x86/setup.c	Wed Jul 08 22:08:31 2009 +0100
     4.3 @@ -97,7 +97,6 @@ int early_boot = 1;
     4.4  cpumask_t cpu_present_map;
     4.5  
     4.6  unsigned long xen_phys_start;
     4.7 -unsigned long allocator_bitmap_end;
     4.8  
     4.9  #ifdef CONFIG_X86_32
    4.10  /* Limits of Xen heap, used to initialise the allocator. */
    4.11 @@ -764,24 +763,21 @@ void __init __start_xen(unsigned long mb
    4.12          EARLY_FAIL("Not enough memory to relocate the dom0 kernel image.\n");
    4.13      reserve_e820_ram(&boot_e820, initial_images_base, initial_images_end);
    4.14  
    4.15 -    /* Initialise boot heap. */
    4.16 -    allocator_bitmap_end = init_boot_allocator(__pa(&_end));
    4.17  #if defined(CONFIG_X86_32)
    4.18 -    xenheap_initial_phys_start = allocator_bitmap_end;
    4.19 +    xenheap_initial_phys_start = __pa(&_end);
    4.20      xenheap_phys_end = DIRECTMAP_MBYTES << 20;
    4.21  #else
    4.22      if ( !xen_phys_start )
    4.23          EARLY_FAIL("Not enough memory to relocate Xen.\n");
    4.24 -    reserve_e820_ram(&boot_e820, __pa(&_start), allocator_bitmap_end);
    4.25 +    reserve_e820_ram(&boot_e820, __pa(&_start), __pa(&_end));
    4.26  #endif
    4.27  
    4.28      /* Late kexec reservation (dynamic start address). */
    4.29      kexec_reserve_area(&boot_e820);
    4.30  
    4.31      /*
    4.32 -     * With the boot allocator now initialised, we can walk every RAM region
    4.33 -     * and map it in its entirety (on x86/64, at least) and notify it to the
    4.34 -     * boot allocator.
    4.35 +     * Walk every RAM region and map it in its entirety (on x86/64, at least)
    4.36 +     * and notify it to the boot allocator.
    4.37       */
    4.38      for ( i = 0; i < boot_e820.nr_map; i++ )
    4.39      {
    4.40 @@ -1132,7 +1128,6 @@ int xen_in_range(paddr_t start, paddr_t 
    4.41      if ( !xen_regions[0].s )
    4.42      {
    4.43          extern char __init_begin[], __bss_start[];
    4.44 -        extern unsigned long allocator_bitmap_end;
    4.45  
    4.46          /* S3 resume code (and other real mode trampoline code) */
    4.47          xen_regions[0].s = bootsym_phys(trampoline_start);
    4.48 @@ -1144,9 +1139,9 @@ int xen_in_range(paddr_t start, paddr_t 
    4.49          xen_regions[2].s = __pa(&__per_cpu_start);
    4.50          xen_regions[2].e = xen_regions[2].s +
    4.51              (((paddr_t)last_cpu(cpu_possible_map) + 1) << PERCPU_SHIFT);
    4.52 -        /* bss + boot allocator bitmap */
    4.53 +        /* bss */
    4.54          xen_regions[3].s = __pa(&__bss_start);
    4.55 -        xen_regions[3].e = allocator_bitmap_end;
    4.56 +        xen_regions[3].e = __pa(&_end);
    4.57      }
    4.58  
    4.59      for ( i = 0; i < ARRAY_SIZE(xen_regions); i++ )
     5.1 --- a/xen/arch/x86/tboot.c	Wed Jul 08 16:47:58 2009 +0100
     5.2 +++ b/xen/arch/x86/tboot.c	Wed Jul 08 22:08:31 2009 +0100
     5.3 @@ -47,7 +47,6 @@ static uint64_t sinit_base, sinit_size;
     5.4  #define TXTCR_HEAP_SIZE             0x0308
     5.5  
     5.6  extern char __init_begin[], __per_cpu_start[], __bss_start[];
     5.7 -extern unsigned long allocator_bitmap_end;
     5.8  
     5.9  #define SHA1_SIZE      20
    5.10  typedef uint8_t   sha1_hash_t[SHA1_SIZE];
    5.11 @@ -299,7 +298,7 @@ void tboot_shutdown(uint32_t shutdown_ty
    5.12          /*
    5.13           * Xen regions for tboot to MAC
    5.14           */
    5.15 -        g_tboot_shared->num_mac_regions = 5;
    5.16 +        g_tboot_shared->num_mac_regions = 4;
    5.17          /* S3 resume code (and other real mode trampoline code) */
    5.18          g_tboot_shared->mac_regions[0].start = bootsym_phys(trampoline_start);
    5.19          g_tboot_shared->mac_regions[0].size = bootsym_phys(trampoline_end) -
    5.20 @@ -315,10 +314,6 @@ void tboot_shutdown(uint32_t shutdown_ty
    5.21          /* bss */
    5.22          g_tboot_shared->mac_regions[3].start = (uint64_t)__pa(&__bss_start);
    5.23          g_tboot_shared->mac_regions[3].size = __pa(&_end) - __pa(&__bss_start);
    5.24 -        /* boot allocator bitmap */
    5.25 -        g_tboot_shared->mac_regions[4].start = (uint64_t)__pa(&_end);
    5.26 -        g_tboot_shared->mac_regions[4].size = allocator_bitmap_end -
    5.27 -                                              __pa(&_end);
    5.28  
    5.29          /*
    5.30           * MAC domains and other Xen memory
     6.1 --- a/xen/arch/x86/x86_64/mm.c	Wed Jul 08 16:47:58 2009 +0100
     6.2 +++ b/xen/arch/x86/x86_64/mm.c	Wed Jul 08 22:08:31 2009 +0100
     6.3 @@ -68,7 +68,6 @@ void *alloc_xen_pagetable(void)
     6.4      }
     6.5  
     6.6      mfn = alloc_boot_pages(1, 1);
     6.7 -    BUG_ON(mfn == 0);
     6.8      return mfn_to_virt(mfn);
     6.9  }
    6.10  
     7.1 --- a/xen/common/kexec.c	Wed Jul 08 16:47:58 2009 +0100
     7.2 +++ b/xen/common/kexec.c	Wed Jul 08 22:08:31 2009 +0100
     7.3 @@ -334,7 +334,6 @@ static void crash_save_vmcoreinfo(void)
     7.4  
     7.5      VMCOREINFO_SYMBOL(domain_list);
     7.6      VMCOREINFO_SYMBOL(frame_table);
     7.7 -    VMCOREINFO_SYMBOL(alloc_bitmap);
     7.8      VMCOREINFO_SYMBOL(max_page);
     7.9  
    7.10      VMCOREINFO_STRUCT_SIZE(page_info);
     8.1 --- a/xen/common/page_alloc.c	Wed Jul 08 16:47:58 2009 +0100
     8.2 +++ b/xen/common/page_alloc.c	Wed Jul 08 22:08:31 2009 +0100
     8.3 @@ -69,117 +69,78 @@ PAGE_LIST_HEAD(page_offlined_list);
     8.4  /* Broken page list, protected by heap_lock. */
     8.5  PAGE_LIST_HEAD(page_broken_list);
     8.6  
     8.7 -/*********************
     8.8 - * ALLOCATION BITMAP
     8.9 - *  One bit per page of memory. Bit set => page is allocated.
    8.10 - */
    8.11 -
    8.12 -unsigned long *alloc_bitmap;
    8.13 -#define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
    8.14 -
    8.15 -#define allocated_in_map(_pn)                       \
    8.16 -({  unsigned long ___pn = (_pn);                    \
    8.17 -    !!(alloc_bitmap[___pn/PAGES_PER_MAPWORD] &      \
    8.18 -       (1UL<<(___pn&(PAGES_PER_MAPWORD-1)))); })
    8.19 -
    8.20 -/*
    8.21 - * Hint regarding bitwise arithmetic in map_{alloc,free}:
    8.22 - *  -(1<<n)  sets all bits >= n. 
    8.23 - *  (1<<n)-1 sets all bits <  n.
    8.24 - * Variable names in map_{alloc,free}:
    8.25 - *  *_idx == Index into `alloc_bitmap' array.
    8.26 - *  *_off == Bit offset within an element of the `alloc_bitmap' array.
    8.27 - */
    8.28 -
    8.29 -static void map_alloc(unsigned long first_page, unsigned long nr_pages)
    8.30 -{
    8.31 -    unsigned long start_off, end_off, curr_idx, end_idx;
    8.32 -
    8.33 -#ifndef NDEBUG
    8.34 -    unsigned long i;
    8.35 -    /* Check that the block isn't already allocated. */
    8.36 -    for ( i = 0; i < nr_pages; i++ )
    8.37 -        ASSERT(!allocated_in_map(first_page + i));
    8.38 -#endif
    8.39 -
    8.40 -    curr_idx  = first_page / PAGES_PER_MAPWORD;
    8.41 -    start_off = first_page & (PAGES_PER_MAPWORD-1);
    8.42 -    end_idx   = (first_page + nr_pages) / PAGES_PER_MAPWORD;
    8.43 -    end_off   = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
    8.44 -
    8.45 -    if ( curr_idx == end_idx )
    8.46 -    {
    8.47 -        alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
    8.48 -    }
    8.49 -    else 
    8.50 -    {
    8.51 -        alloc_bitmap[curr_idx] |= -(1UL<<start_off);
    8.52 -        while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0UL;
    8.53 -        alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
    8.54 -    }
    8.55 -}
    8.56 -
    8.57 -static void map_free(unsigned long first_page, unsigned long nr_pages)
    8.58 -{
    8.59 -    unsigned long start_off, end_off, curr_idx, end_idx;
    8.60 -
    8.61 -#ifndef NDEBUG
    8.62 -    unsigned long i;
    8.63 -    /* Check that the block isn't already freed. */
    8.64 -    for ( i = 0; i < nr_pages; i++ )
    8.65 -        ASSERT(allocated_in_map(first_page + i));
    8.66 -#endif
    8.67 -
    8.68 -    curr_idx  = first_page / PAGES_PER_MAPWORD;
    8.69 -    start_off = first_page & (PAGES_PER_MAPWORD-1);
    8.70 -    end_idx   = (first_page + nr_pages) / PAGES_PER_MAPWORD;
    8.71 -    end_off   = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
    8.72 -
    8.73 -    if ( curr_idx == end_idx )
    8.74 -    {
    8.75 -        alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
    8.76 -    }
    8.77 -    else 
    8.78 -    {
    8.79 -        alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
    8.80 -        while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
    8.81 -        alloc_bitmap[curr_idx] &= -(1UL<<end_off);
    8.82 -    }
    8.83 -}
    8.84 -
    8.85 -
    8.86 -
    8.87  /*************************
    8.88   * BOOT-TIME ALLOCATOR
    8.89   */
    8.90  
    8.91 -static unsigned long first_valid_mfn = ~0UL;
    8.92 +static unsigned long __initdata first_valid_mfn = ~0UL;
    8.93 +
    8.94 +static struct bootmem_region {
    8.95 +    unsigned long s, e; /* MFNs @s through @e-1 inclusive are free */
    8.96 +} *__initdata bootmem_region_list;
    8.97 +static unsigned int __initdata nr_bootmem_regions;
    8.98 +
    8.99 +static void __init boot_bug(int line)
   8.100 +{
   8.101 +    panic("Boot BUG at %s:%d\n", __FILE__, line);
   8.102 +}
   8.103 +#define BOOT_BUG_ON(p) if ( p ) boot_bug(__LINE__);
   8.104  
   8.105 -/* Initialise allocator to handle up to @max_page pages. */
   8.106 -paddr_t __init init_boot_allocator(paddr_t bitmap_start)
   8.107 +static void __init bootmem_region_add(unsigned long s, unsigned long e)
   8.108  {
   8.109 -    unsigned long bitmap_size;
   8.110 +    unsigned int i;
   8.111 +
   8.112 +    if ( (bootmem_region_list == NULL) && (s < e) )
   8.113 +        bootmem_region_list = mfn_to_virt(s++);
   8.114 +
   8.115 +    if ( s >= e )
   8.116 +        return;
   8.117  
   8.118 -    bitmap_start = round_pgup(bitmap_start);
   8.119 +    for ( i = 0; i < nr_bootmem_regions; i++ )
   8.120 +        if ( s < bootmem_region_list[i].e )
   8.121 +            break;
   8.122 +
   8.123 +    BOOT_BUG_ON((i < nr_bootmem_regions) && (e > bootmem_region_list[i].s));
   8.124 +    BOOT_BUG_ON(nr_bootmem_regions ==
   8.125 +                (PAGE_SIZE / sizeof(struct bootmem_region)));
   8.126  
   8.127 -    /*
   8.128 -     * Allocate space for the allocation bitmap. Include an extra longword
   8.129 -     * of padding for possible overrun in map_alloc and map_free.
   8.130 -     */
   8.131 -    bitmap_size  = max_page / 8;
   8.132 -    bitmap_size += sizeof(unsigned long);
   8.133 -    bitmap_size  = round_pgup(bitmap_size);
   8.134 -    alloc_bitmap = (unsigned long *)maddr_to_virt(bitmap_start);
   8.135 +    memmove(&bootmem_region_list[i+1], &bootmem_region_list[i],
   8.136 +            (nr_bootmem_regions - i) * sizeof(*bootmem_region_list));
   8.137 +    bootmem_region_list[i] = (struct bootmem_region) { s, e };
   8.138 +    nr_bootmem_regions++;
   8.139 +}
   8.140 +
   8.141 +static void __init bootmem_region_zap(unsigned long s, unsigned long e)
   8.142 +{
   8.143 +    unsigned int i;
   8.144  
   8.145 -    /* All allocated by default. */
   8.146 -    memset(alloc_bitmap, ~0, bitmap_size);
   8.147 -
   8.148 -    return bitmap_start + bitmap_size;
   8.149 +    for ( i = 0; i < nr_bootmem_regions; i++ )
   8.150 +    {
   8.151 +        struct bootmem_region *r = &bootmem_region_list[i];
   8.152 +        if ( e <= r->s )
   8.153 +            break;
   8.154 +        if ( s >= r->e )
   8.155 +            continue;
   8.156 +        if ( s <= r->s )
   8.157 +        {
   8.158 +            r->s = min(e, r->e);
   8.159 +        }
   8.160 +        else if ( e >= r->e )
   8.161 +        {
   8.162 +            r->e = s;
   8.163 +        }
   8.164 +        else
   8.165 +        {
   8.166 +            unsigned long _e = r->e;
   8.167 +            r->e = s;
   8.168 +            bootmem_region_add(e, _e);
   8.169 +        }
   8.170 +    }
   8.171  }
   8.172  
   8.173  void __init init_boot_pages(paddr_t ps, paddr_t pe)
   8.174  {
   8.175 -    unsigned long bad_spfn, bad_epfn, i;
   8.176 +    unsigned long bad_spfn, bad_epfn;
   8.177      const char *p;
   8.178  
   8.179      ps = round_pgup(ps);
   8.180 @@ -189,7 +150,7 @@ void __init init_boot_pages(paddr_t ps, 
   8.181  
   8.182      first_valid_mfn = min_t(unsigned long, ps >> PAGE_SHIFT, first_valid_mfn);
   8.183  
   8.184 -    map_free(ps >> PAGE_SHIFT, (pe - ps) >> PAGE_SHIFT);
   8.185 +    bootmem_region_add(ps >> PAGE_SHIFT, pe >> PAGE_SHIFT);
   8.186  
   8.187      /* Check new pages against the bad-page list. */
   8.188      p = opt_badpage;
   8.189 @@ -217,32 +178,29 @@ void __init init_boot_pages(paddr_t ps, 
   8.190              printk("Marking pages %lx through %lx as bad\n",
   8.191                     bad_spfn, bad_epfn);
   8.192  
   8.193 -        for ( i = bad_spfn; i <= bad_epfn; i++ )
   8.194 -            if ( (i < max_page) && !allocated_in_map(i) )
   8.195 -                map_alloc(i, 1);
   8.196 +        bootmem_region_zap(bad_spfn, bad_epfn+1);
   8.197      }
   8.198  }
   8.199  
   8.200  unsigned long __init alloc_boot_pages(
   8.201      unsigned long nr_pfns, unsigned long pfn_align)
   8.202  {
   8.203 -    unsigned long pg, i;
   8.204 +    unsigned long pg, _e;
   8.205 +    int i;
   8.206  
   8.207 -    /* Search backwards to obtain highest available range. */
   8.208 -    for ( pg = (max_page - nr_pfns) & ~(pfn_align - 1);
   8.209 -          pg >= first_valid_mfn;
   8.210 -          pg = (pg + i - nr_pfns) & ~(pfn_align - 1) )
   8.211 +    for ( i = nr_bootmem_regions - 1; i >= 0; i-- )
   8.212      {
   8.213 -        for ( i = 0; i < nr_pfns; i++ )
   8.214 -            if ( allocated_in_map(pg+i) )
   8.215 -                break;
   8.216 -        if ( i == nr_pfns )
   8.217 -        {
   8.218 -            map_alloc(pg, nr_pfns);
   8.219 -            return pg;
   8.220 -        }
   8.221 +        struct bootmem_region *r = &bootmem_region_list[i];
   8.222 +        pg = (r->e - nr_pfns) & ~(pfn_align - 1);
   8.223 +        if ( pg < r->s )
   8.224 +            continue;
   8.225 +        _e = r->e;
   8.226 +        r->e = pg;
   8.227 +        bootmem_region_add(pg + nr_pfns, _e);
   8.228 +        return pg;
   8.229      }
   8.230  
   8.231 +    BOOT_BUG_ON(1);
   8.232      return 0;
   8.233  }
   8.234  
   8.235 @@ -660,12 +618,7 @@ int offline_page(unsigned long mfn, int 
   8.236      *status = 0;
   8.237      pg = mfn_to_page(mfn);
   8.238  
   8.239 -#if defined(__x86_64__)
   8.240 -     /* Xen's txt mfn in x86_64 is reserved in e820 */
   8.241      if ( is_xen_fixed_mfn(mfn) )
   8.242 -#elif defined(__i386__)
   8.243 -    if ( is_xen_heap_mfn(mfn) )
   8.244 -#endif
   8.245      {
   8.246          *status = PG_OFFLINE_XENPAGE | PG_OFFLINE_FAILED |
   8.247            (DOMID_XEN << PG_OFFLINE_OWNER_SHIFT);
   8.248 @@ -673,14 +626,14 @@ int offline_page(unsigned long mfn, int 
   8.249      }
   8.250  
   8.251      /*
   8.252 -     * N.B. xen's txt in x86_64 is marked reserved and handled already
   8.253 -     *  Also kexec range is reserved
   8.254 +     * N.B. xen's txt in x86_64 is marked reserved and handled already.
   8.255 +     * Also kexec range is reserved.
   8.256       */
   8.257 -     if ( !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) )
   8.258 -     {
   8.259 +    if ( !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) )
   8.260 +    {
   8.261          *status = PG_OFFLINE_FAILED | PG_OFFLINE_NOT_CONV_RAM;
   8.262          return -EINVAL;
   8.263 -     }
   8.264 +    }
   8.265  
   8.266      spin_lock(&heap_lock);
   8.267  
   8.268 @@ -703,7 +656,7 @@ int offline_page(unsigned long mfn, int 
   8.269              /* Release the reference since it will not be allocated anymore */
   8.270              put_page(pg);
   8.271      }
   8.272 -    else if ( old_info & PGC_xen_heap)
   8.273 +    else if ( old_info & PGC_xen_heap )
   8.274      {
   8.275          *status = PG_OFFLINE_XENPAGE | PG_OFFLINE_PENDING |
   8.276            (DOMID_XEN << PG_OFFLINE_OWNER_SHIFT);
   8.277 @@ -880,31 +833,18 @@ static unsigned long avail_heap_pages(
   8.278      return free_pages;
   8.279  }
   8.280  
   8.281 -#define avail_for_domheap(mfn) !(allocated_in_map(mfn) || is_xen_heap_mfn(mfn))
   8.282  void __init end_boot_allocator(void)
   8.283  {
   8.284 -    unsigned long i, nr = 0;
   8.285 -    int curr_free, next_free;
   8.286 +    unsigned int i;
   8.287  
   8.288      /* Pages that are free now go to the domain sub-allocator. */
   8.289 -    if ( (curr_free = next_free = avail_for_domheap(first_valid_mfn)) )
   8.290 -        map_alloc(first_valid_mfn, 1);
   8.291 -    for ( i = first_valid_mfn; i < max_page; i++ )
   8.292 +    for ( i = 0; i < nr_bootmem_regions; i++ )
   8.293      {
   8.294 -        curr_free = next_free;
   8.295 -        next_free = avail_for_domheap(i+1);
   8.296 -        if ( next_free )
   8.297 -            map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
   8.298 -        if ( curr_free )
   8.299 -            ++nr;
   8.300 -        else if ( nr )
   8.301 -        {
   8.302 -            init_heap_pages(mfn_to_page(i - nr), nr);
   8.303 -            nr = 0;
   8.304 -        }
   8.305 +        struct bootmem_region *r = &bootmem_region_list[i];
   8.306 +        if ( r->s < r->e )
   8.307 +            init_heap_pages(mfn_to_page(r->s), r->e - r->s);
   8.308      }
   8.309 -    if ( nr )
   8.310 -        init_heap_pages(mfn_to_page(i - nr), nr);
   8.311 +    init_heap_pages(virt_to_page(bootmem_region_list), 1);
   8.312  
   8.313      if ( !dma_bitsize && (num_online_nodes() > 1) )
   8.314      {
   8.315 @@ -923,7 +863,6 @@ void __init end_boot_allocator(void)
   8.316          printk(" DMA width %u bits", dma_bitsize);
   8.317      printk("\n");
   8.318  }
   8.319 -#undef avail_for_domheap
   8.320  
   8.321  /*
   8.322   * Scrub all unallocated pages in all heap zones. This function is more
     9.1 --- a/xen/include/asm-x86/mm.h	Wed Jul 08 16:47:58 2009 +0100
     9.2 +++ b/xen/include/asm-x86/mm.h	Wed Jul 08 22:08:31 2009 +0100
     9.3 @@ -219,14 +219,14 @@ struct page_info
     9.4      unsigned long _mfn = (mfn);                         \
     9.5      (_mfn < paddr_to_pfn(xenheap_phys_end));            \
     9.6  })
     9.7 +#define is_xen_fixed_mfn(mfn) is_xen_heap_mfn(mfn)
     9.8  #else
     9.9 -extern unsigned long allocator_bitmap_end;
    9.10  #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
    9.11  #define is_xen_heap_mfn(mfn) \
    9.12      (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
    9.13 -#define is_xen_fixed_mfn(mfn) \
    9.14 -    ( (mfn << PAGE_SHIFT) >= __pa(&_start) &&    \
    9.15 -          (mfn << PAGE_SHIFT) <= allocator_bitmap_end )
    9.16 +#define is_xen_fixed_mfn(mfn)                     \
    9.17 +    ((((mfn) << PAGE_SHIFT) >= __pa(&_start)) &&  \
    9.18 +     (((mfn) << PAGE_SHIFT) <= __pa(&_end)))
    9.19  #endif
    9.20  
    9.21  #if defined(__i386__)
    10.1 --- a/xen/include/xen/mm.h	Wed Jul 08 16:47:58 2009 +0100
    10.2 +++ b/xen/include/xen/mm.h	Wed Jul 08 22:08:31 2009 +0100
    10.3 @@ -37,7 +37,6 @@ struct domain;
    10.4  struct page_info;
    10.5  
    10.6  /* Boot-time allocator. Turns into generic allocator after bootstrap. */
    10.7 -paddr_t init_boot_allocator(paddr_t bitmap_start);
    10.8  void init_boot_pages(paddr_t ps, paddr_t pe);
    10.9  unsigned long alloc_boot_pages(
   10.10      unsigned long nr_pfns, unsigned long pfn_align);
   10.11 @@ -307,9 +306,7 @@ int guest_remove_page(struct domain *d, 
   10.12  #define RAM_TYPE_RESERVED     0x00000002
   10.13  #define RAM_TYPE_UNUSABLE     0x00000004
   10.14  #define RAM_TYPE_ACPI         0x00000008
   10.15 -/* Returns TRUE if the whole page at @mfn is of the requested RAM type(s) above. */
   10.16 +/* TRUE if the whole page at @mfn is of the requested RAM type(s) above. */
   10.17  int page_is_ram_type(unsigned long mfn, unsigned long mem_type);
   10.18  
   10.19 -extern unsigned long *alloc_bitmap;	/* for vmcoreinfo */
   10.20 -
   10.21  #endif /* __XEN_MM_H__ */