debuggers.hg

changeset 20849:50bd4235f486

x86: Fix xen_in_range() for fragmented percpu data area.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jan 14 10:03:44 2010 +0000 (2010-01-14)
parents aaf34d74b622
children c3d3e3c8e5f4
files xen/arch/x86/setup.c
line diff
     1.1 --- a/xen/arch/x86/setup.c	Thu Jan 14 09:44:08 2010 +0000
     1.2 +++ b/xen/arch/x86/setup.c	Thu Jan 14 10:03:44 2010 +0000
     1.3 @@ -230,7 +230,7 @@ static void __init percpu_free_unused_ar
     1.4      /* Free all unused per-cpu data areas. */
     1.5      free_xen_data(&__per_cpu_start[first_unused << PERCPU_SHIFT], __bss_start);
     1.6  
     1.7 -    data_size = (data_size + PAGE_SIZE + 1) & PAGE_MASK;
     1.8 +    data_size = (data_size + PAGE_SIZE - 1) & PAGE_MASK;
     1.9      if ( data_size != PERCPU_SIZE )
    1.10          for ( i = 0; i < first_unused; i++ )
    1.11              free_xen_data(&__per_cpu_start[(i << PERCPU_SHIFT) + data_size],
    1.12 @@ -1198,33 +1198,37 @@ void arch_get_xen_caps(xen_capabilities_
    1.13  int xen_in_range(paddr_t start, paddr_t end)
    1.14  {
    1.15      int i;
    1.16 +
    1.17 +    enum { region_s3, region_text, region_percpu, region_bss, nr_regions };
    1.18      static struct {
    1.19          paddr_t s, e;
    1.20 -    } xen_regions[4];
    1.21 +    } xen_regions[nr_regions];
    1.22 +    static unsigned int percpu_data_size;
    1.23  
    1.24      /* initialize first time */
    1.25      if ( !xen_regions[0].s )
    1.26      {
    1.27          /* S3 resume code (and other real mode trampoline code) */
    1.28 -        xen_regions[0].s = bootsym_phys(trampoline_start);
    1.29 -        xen_regions[0].e = bootsym_phys(trampoline_end);
    1.30 +        xen_regions[region_s3].s = bootsym_phys(trampoline_start);
    1.31 +        xen_regions[region_s3].e = bootsym_phys(trampoline_end);
    1.32          /* hypervisor code + data */
    1.33 -        xen_regions[1].s =__pa(&_stext);
    1.34 -        xen_regions[1].e = __pa(&__init_begin);
    1.35 +        xen_regions[region_text].s =__pa(&_stext);
    1.36 +        xen_regions[region_text].e = __pa(&__init_begin);
    1.37          /* per-cpu data */
    1.38 -        xen_regions[2].s = __pa(&__per_cpu_start);
    1.39 -        xen_regions[2].e = xen_regions[2].s +
    1.40 +        xen_regions[region_percpu].s = __pa(&__per_cpu_start);
    1.41 +        xen_regions[region_percpu].e = xen_regions[2].s +
    1.42              (((paddr_t)last_cpu(cpu_possible_map) + 1) << PERCPU_SHIFT);
    1.43 +        percpu_data_size = __per_cpu_data_end - __per_cpu_start;
    1.44 +        percpu_data_size = (percpu_data_size + PAGE_SIZE - 1) & PAGE_MASK;
    1.45          /* bss */
    1.46 -        xen_regions[3].s = __pa(&__bss_start);
    1.47 -        xen_regions[3].e = __pa(&_end);
    1.48 +        xen_regions[region_bss].s = __pa(&__bss_start);
    1.49 +        xen_regions[region_bss].e = __pa(&_end);
    1.50      }
    1.51  
    1.52 -    for ( i = 0; i < ARRAY_SIZE(xen_regions); i++ )
    1.53 -    {
    1.54 +    for ( i = 0; i < nr_regions; i++ )
    1.55          if ( (start < xen_regions[i].e) && (end > xen_regions[i].s) )
    1.56 -            return 1;
    1.57 -    }
    1.58 +            return ((i != region_percpu) ||
    1.59 +                    ((start & (PERCPU_SIZE - 1)) < percpu_data_size));
    1.60  
    1.61      return 0;
    1.62  }