debuggers.hg

changeset 20011:5adc108c0085

page allocator: add mfn_valid() check to free_heap_pages() and scrub_pages()

The changesets, 19913:ef38784f9f85 and 19914:d6c1d7992f43 eliminates
boot allocator bitmap which is also used for buddy allocator bitmap.
With those patches, xen/ia64 doesn't boot because page allocator
touches struct page_info which doesn't exist.
That happends because memory is populated sparsely on ia64
and struct page_info is so.

This patches fixes ia64 boot failure.
In fact, this is also a potential bug on x86. max_page seems
to be well aligned so that MAX_ORDER loop check prevented
to be bug appear.

- fix free_heap_pages().
When merging chunks, buddy page_info() doesn't always exists.
So check it by mfn_valid().

- fix scrub_pages()
On ia64 page_info() is sparsely populated, so struct page_info
doesn't always exist. Check it by mfn_valid()

- offline_pages(), online_pages() and query_page_offline()
Also replace "< max_page" check with mfn_valid() for consistency.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jul 22 14:06:21 2009 +0100 (2009-07-22)
parents 091036b8dbb9
children 9ced12c28e05
files xen/common/page_alloc.c
line diff
     1.1 --- a/xen/common/page_alloc.c	Wed Jul 22 14:05:26 2009 +0100
     1.2 +++ b/xen/common/page_alloc.c	Wed Jul 22 14:06:21 2009 +0100
     1.3 @@ -507,7 +507,8 @@ static void free_heap_pages(
     1.4          if ( (page_to_mfn(pg) & mask) )
     1.5          {
     1.6              /* Merge with predecessor block? */
     1.7 -            if ( !page_state_is(pg-mask, free) ||
     1.8 +            if ( !mfn_valid(page_to_mfn(pg-mask)) ||
     1.9 +                 !page_state_is(pg-mask, free) ||
    1.10                   (PFN_ORDER(pg-mask) != order) )
    1.11                  break;
    1.12              pg -= mask;
    1.13 @@ -516,7 +517,8 @@ static void free_heap_pages(
    1.14          else
    1.15          {
    1.16              /* Merge with successor block? */
    1.17 -            if ( !page_state_is(pg+mask, free) ||
    1.18 +            if ( !mfn_valid(page_to_mfn(pg+mask)) ||
    1.19 +                 !page_state_is(pg+mask, free) ||
    1.20                   (PFN_ORDER(pg+mask) != order) )
    1.21                  break;
    1.22              page_list_del(pg + mask, &heap(node, zone, order));
    1.23 @@ -608,7 +610,7 @@ int offline_page(unsigned long mfn, int 
    1.24      int ret = 0;
    1.25      struct page_info *pg;
    1.26  
    1.27 -    if ( mfn > max_page )
    1.28 +    if ( mfn_valid(mfn) )
    1.29      {
    1.30          dprintk(XENLOG_WARNING,
    1.31                  "try to offline page out of range %lx\n", mfn);
    1.32 @@ -694,7 +696,7 @@ unsigned int online_page(unsigned long m
    1.33      struct page_info *pg;
    1.34      int ret;
    1.35  
    1.36 -    if ( mfn > max_page )
    1.37 +    if ( !mfn_valid(mfn) )
    1.38      {
    1.39          dprintk(XENLOG_WARNING, "call expand_pages() first\n");
    1.40          return -EINVAL;
    1.41 @@ -745,7 +747,7 @@ int query_page_offline(unsigned long mfn
    1.42  {
    1.43      struct page_info *pg;
    1.44  
    1.45 -    if ( (mfn > max_page) || !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) )
    1.46 +    if ( !mfn_valid(mfn) || !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) )
    1.47      {
    1.48          dprintk(XENLOG_WARNING, "call expand_pages() first\n");
    1.49          return -EINVAL;
    1.50 @@ -886,7 +888,7 @@ void __init scrub_heap_pages(void)
    1.51          pg = mfn_to_page(mfn);
    1.52  
    1.53          /* Quick lock-free check. */
    1.54 -        if ( !page_state_is(pg, free) )
    1.55 +        if ( !mfn_valid(mfn) || !page_state_is(pg, free) )
    1.56              continue;
    1.57  
    1.58          /* Every 100MB, print a progress dot. */