debuggers.hg

changeset 20663:283a5357d196

memory hotadd 4/7: Setup frametable for hot-added memory

We can't use alloc_boot_pages for memory hot-add, so change it to use
the pages range passed in.

One changes need notice is, when memory hotplug needed, we have to
setup initial frametable as pdx index (i.e. the pdx_gorund_valid)
aligned, to make sure mfn_valid() still works after the max_page is
not maximum anymore.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 11 08:56:04 2009 +0000 (2009-12-11)
parents 0ca5a5f477be
children 611f49efe955
files xen/arch/x86/mm.c xen/arch/x86/setup.c xen/arch/x86/x86_64/mm.c xen/include/asm-x86/page.h
line diff
     1.1 --- a/xen/arch/x86/mm.c	Fri Dec 11 08:55:08 2009 +0000
     1.2 +++ b/xen/arch/x86/mm.c	Fri Dec 11 08:56:04 2009 +0000
     1.3 @@ -219,8 +219,16 @@ void __init init_frametable(void)
     1.4          init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
     1.5                                pdx_to_page(eidx * PDX_GROUP_COUNT));
     1.6      }
     1.7 -    init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
     1.8 -                          pdx_to_page(max_pdx - 1) + 1);
     1.9 +    if ( !mem_hotplug )
    1.10 +        init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
    1.11 +                              pdx_to_page(max_pdx - 1) + 1);
    1.12 +    else
    1.13 +    {
    1.14 +        init_frametable_chunk(pdx_to_page(sidx *PDX_GROUP_COUNT),
    1.15 +                              pdx_to_page(max_idx * PDX_GROUP_COUNT));
    1.16 +        memset(pdx_to_page(max_pdx), -1, (unsigned long)pdx_to_page(max_idx) -
    1.17 +                        (unsigned long)(pdx_to_page(max_pdx)));
    1.18 +    }
    1.19  }
    1.20  
    1.21  void __init arch_init_memory(void)
     2.1 --- a/xen/arch/x86/setup.c	Fri Dec 11 08:55:08 2009 +0000
     2.2 +++ b/xen/arch/x86/setup.c	Fri Dec 11 08:56:04 2009 +0000
     2.3 @@ -304,7 +304,7 @@ static void __init setup_max_pdx(void)
     2.4  #endif
     2.5  }
     2.6  
     2.7 -static void __init set_pdx_range(unsigned long smfn, unsigned long emfn)
     2.8 +void set_pdx_range(unsigned long smfn, unsigned long emfn)
     2.9  {
    2.10      unsigned long idx, eidx;
    2.11  
     3.1 --- a/xen/arch/x86/x86_64/mm.c	Fri Dec 11 08:55:08 2009 +0000
     3.2 +++ b/xen/arch/x86/x86_64/mm.c	Fri Dec 11 08:56:04 2009 +0000
     3.3 @@ -801,6 +801,116 @@ int __cpuinit setup_compat_arg_xlat(unsi
     3.4      return 0;
     3.5  }
     3.6  
     3.7 +void cleanup_frame_table(struct mem_hotadd_info *info)
     3.8 +{
     3.9 +    unsigned long sva, eva;
    3.10 +    l3_pgentry_t l3e;
    3.11 +    l2_pgentry_t l2e;
    3.12 +    unsigned long spfn, epfn;
    3.13 +
    3.14 +    spfn = info->spfn;
    3.15 +    epfn = info->epfn;
    3.16 +
    3.17 +    sva = (unsigned long)pdx_to_page(pfn_to_pdx(spfn));
    3.18 +    eva = (unsigned long)pdx_to_page(pfn_to_pdx(epfn));
    3.19 +
    3.20 +    /* Intialize all page */
    3.21 +    memset(mfn_to_page(spfn), -1, mfn_to_page(epfn) - mfn_to_page(spfn));
    3.22 +
    3.23 +    while (sva < eva)
    3.24 +    {
    3.25 +        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(sva)])[
    3.26 +          l3_table_offset(sva)];
    3.27 +        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
    3.28 +             (l3e_get_flags(l3e) & _PAGE_PSE) )
    3.29 +        {
    3.30 +            sva = (sva & ~((1UL << L3_PAGETABLE_SHIFT) - 1)) +
    3.31 +                    (1UL << L3_PAGETABLE_SHIFT);
    3.32 +            continue;
    3.33 +        }
    3.34 +
    3.35 +        l2e = l3e_to_l2e(l3e)[l2_table_offset(sva)];
    3.36 +        ASSERT(l2e_get_flags(l2e) & _PAGE_PRESENT);
    3.37 +
    3.38 +        if ( (l2e_get_flags(l2e) & (_PAGE_PRESENT | _PAGE_PSE)) ==
    3.39 +              (_PAGE_PSE | _PAGE_PRESENT) )
    3.40 +        {
    3.41 +            if (hotadd_mem_valid(l2e_get_pfn(l2e), info))
    3.42 +                destroy_xen_mappings(sva & ~((1UL << L2_PAGETABLE_SHIFT) - 1),
    3.43 +                         ((sva & ~((1UL << L2_PAGETABLE_SHIFT) -1 )) +
    3.44 +                            (1UL << L2_PAGETABLE_SHIFT) - 1));
    3.45 +
    3.46 +            sva = (sva & ~((1UL << L2_PAGETABLE_SHIFT) -1 )) +
    3.47 +                  (1UL << L2_PAGETABLE_SHIFT);
    3.48 +            continue;
    3.49 +        }
    3.50 +
    3.51 +        ASSERT(l1e_get_flags(l2e_to_l1e(l2e)[l1_table_offset(sva)]) &
    3.52 +                _PAGE_PRESENT);
    3.53 +         sva = (sva & ~((1UL << PAGE_SHIFT) - 1)) +
    3.54 +                    (1UL << PAGE_SHIFT);
    3.55 +    }
    3.56 +
    3.57 +    /* Brute-Force flush all TLB */
    3.58 +    flush_tlb_all();
    3.59 +}
    3.60 +
    3.61 +/* Should we be paraniod failure in map_pages_to_xen? */
    3.62 +static int setup_frametable_chunk(void *start, void *end,
    3.63 +                                  struct mem_hotadd_info *info)
    3.64 +{
    3.65 +    unsigned long s = (unsigned long)start;
    3.66 +    unsigned long e = (unsigned long)end;
    3.67 +    unsigned long mfn;
    3.68 +
    3.69 +    ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1)));
    3.70 +    ASSERT(!(e & ((1 << L2_PAGETABLE_SHIFT) - 1)));
    3.71 +
    3.72 +    for ( ; s < e; s += (1UL << L2_PAGETABLE_SHIFT))
    3.73 +    {
    3.74 +        mfn = alloc_hotadd_mfn(info);
    3.75 +        map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR);
    3.76 +    }
    3.77 +    memset(start, -1, s - (unsigned long)start);
    3.78 +
    3.79 +    return 0;
    3.80 +}
    3.81 +
    3.82 +int extend_frame_table(struct mem_hotadd_info *info)
    3.83 +{
    3.84 +    unsigned long cidx, nidx, eidx, spfn, epfn;
    3.85 +
    3.86 +    spfn = info->spfn;
    3.87 +    epfn = info->epfn;
    3.88 +
    3.89 +    eidx = (pfn_to_pdx(epfn) + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT;
    3.90 +    nidx = cidx = pfn_to_pdx(spfn)/PDX_GROUP_COUNT;
    3.91 +
    3.92 +    ASSERT( pfn_to_pdx(epfn) <= (DIRECTMAP_SIZE >> PAGE_SHIFT) &&
    3.93 +         (pfn_to_pdx(epfn) <= FRAMETABLE_SIZE / sizeof(struct page_info)) );
    3.94 +
    3.95 +    if ( test_bit(cidx, pdx_group_valid) )
    3.96 +        cidx = find_next_zero_bit(pdx_group_valid, eidx, cidx);
    3.97 +
    3.98 +    if ( cidx >= eidx )
    3.99 +        return 0;
   3.100 +
   3.101 +    while ( cidx < eidx )
   3.102 +    {
   3.103 +        nidx = find_next_bit(pdx_group_valid, eidx, cidx);
   3.104 +        if ( nidx >= eidx )
   3.105 +            nidx = eidx;
   3.106 +        setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
   3.107 +                                     pdx_to_page(nidx * PDX_GROUP_COUNT),
   3.108 +                                     info);
   3.109 +
   3.110 +        cidx = find_next_zero_bit(pdx_group_valid, eidx, nidx);
   3.111 +    }
   3.112 +
   3.113 +    memset(mfn_to_page(spfn), 0, mfn_to_page(epfn) - mfn_to_page(spfn));
   3.114 +    return 0;
   3.115 +}
   3.116 +
   3.117  void __init subarch_init_memory(void)
   3.118  {
   3.119      unsigned long i, n, v, m2p_start_mfn;
     4.1 --- a/xen/include/asm-x86/page.h	Fri Dec 11 08:55:08 2009 +0000
     4.2 +++ b/xen/include/asm-x86/page.h	Fri Dec 11 08:56:04 2009 +0000
     4.3 @@ -360,6 +360,8 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
     4.4  l3_pgentry_t *virt_to_xen_l3e(unsigned long v);
     4.5  #endif
     4.6  
     4.7 +extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
     4.8 +
     4.9  /* Map machine page range in Xen virtual address space. */
    4.10  #define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages for the mapping */
    4.11  int map_pages_to_xen(