debuggers.hg

changeset 20661:adb62ca21d31

memory hotadd 2/7: Destroy m2p table for hot-added memory when hot-add failed.

As when we destroy the m2p table, it should not be used, so we don't
need consider clean the head/tail mapping that may exits before hot-add.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 11 08:54:37 2009 +0000 (2009-12-11)
parents b7cf749e14fc
children 0ca5a5f477be
files xen/arch/x86/x86_64/mm.c
line diff
     1.1 --- a/xen/arch/x86/x86_64/mm.c	Fri Dec 11 08:53:57 2009 +0000
     1.2 +++ b/xen/arch/x86/x86_64/mm.c	Fri Dec 11 08:54:37 2009 +0000
     1.3 @@ -249,6 +249,104 @@ static int m2p_mapped(unsigned long spfn
     1.4      return M2P_NO_MAPPED;
     1.5  }
     1.6  
     1.7 +static void destroy_compat_m2p_mapping(struct mem_hotadd_info *info)
     1.8 +{
     1.9 +    unsigned long i, va, rwva, pt_pfn;
    1.10 +    unsigned long smap = info->spfn, emap = info->spfn;
    1.11 +
    1.12 +    l3_pgentry_t *l3_ro_mpt;
    1.13 +    l2_pgentry_t *l2_ro_mpt;
    1.14 +
    1.15 +    if ( smap > ((RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2) )
    1.16 +        return;
    1.17 +
    1.18 +    if ( emap > ((RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2) )
    1.19 +        emap = (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) >> 2;
    1.20 +
    1.21 +    l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)]);
    1.22 +
    1.23 +    ASSERT(l3e_get_flags(l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)]) & _PAGE_PRESENT);
    1.24 +
    1.25 +    l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)]);
    1.26 +
    1.27 +    for ( i = smap; i < emap; )
    1.28 +    {
    1.29 +        va = HIRO_COMPAT_MPT_VIRT_START +
    1.30 +              i * sizeof(*compat_machine_to_phys_mapping);
    1.31 +        rwva = RDWR_COMPAT_MPT_VIRT_START +
    1.32 +             i * sizeof(*compat_machine_to_phys_mapping);
    1.33 +        if ( l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT )
    1.34 +        {
    1.35 +            pt_pfn = l2e_get_pfn(l2_ro_mpt[l2_table_offset(va)]);
    1.36 +            if ( hotadd_mem_valid(pt_pfn, info) )
    1.37 +            {
    1.38 +                destroy_xen_mappings(rwva, rwva +
    1.39 +                        (1UL << L2_PAGETABLE_SHIFT));
    1.40 +                l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_empty());
    1.41 +            }
    1.42 +        }
    1.43 +
    1.44 +        i += 1UL < (L2_PAGETABLE_SHIFT - 2);
    1.45 +    }
    1.46 +
    1.47 +    return;
    1.48 +}
    1.49 +
    1.50 +void destroy_m2p_mapping(struct mem_hotadd_info *info)
    1.51 +{
    1.52 +    l3_pgentry_t *l3_ro_mpt;
    1.53 +    unsigned long i, va, rwva;
    1.54 +    unsigned long smap = info->spfn, emap = info->epfn;
    1.55 +
    1.56 +    l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]);
    1.57 +
    1.58 +    /*
    1.59 +     * No need to clean m2p structure existing before the hotplug
    1.60 +     */
    1.61 +    for (i = smap; i < emap;)
    1.62 +    {
    1.63 +        unsigned long pt_pfn;
    1.64 +        l2_pgentry_t *l2_ro_mpt;
    1.65 +
    1.66 +        va = RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
    1.67 +        rwva = RDWR_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
    1.68 +
    1.69 +        /* 1G mapping should not be created by mem hotadd */
    1.70 +        if (!(l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PRESENT) ||
    1.71 +            (l3e_get_flags(l3_ro_mpt[l3_table_offset(va)]) & _PAGE_PSE))
    1.72 +        {
    1.73 +            i = ( i & ~((1UL << (L3_PAGETABLE_SHIFT - 3)) - 1)) +
    1.74 +                (1UL << (L3_PAGETABLE_SHIFT - 3) );
    1.75 +            continue;
    1.76 +        }
    1.77 +
    1.78 +        l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]);
    1.79 +        if (!(l2e_get_flags(l2_ro_mpt[l2_table_offset(va)]) & _PAGE_PRESENT))
    1.80 +        {
    1.81 +            i = ( i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) +
    1.82 +                    (1UL << (L2_PAGETABLE_SHIFT - 3)) ;
    1.83 +            continue;
    1.84 +        }
    1.85 +
    1.86 +        pt_pfn = l2e_get_pfn(l2_ro_mpt[l2_table_offset(va)]);
    1.87 +        if ( hotadd_mem_valid(pt_pfn, info) )
    1.88 +        {
    1.89 +            destroy_xen_mappings(rwva, rwva + (1UL << L2_PAGETABLE_SHIFT));
    1.90 +
    1.91 +            l2_ro_mpt = l3e_to_l2e(l3_ro_mpt[l3_table_offset(va)]);
    1.92 +            l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_empty());
    1.93 +        }
    1.94 +        i = ( i & ~((1UL << (L2_PAGETABLE_SHIFT - 3)) - 1)) +
    1.95 +              (1UL << (L2_PAGETABLE_SHIFT - 3));
    1.96 +    }
    1.97 +
    1.98 +    destroy_compat_m2p_mapping(info);
    1.99 +
   1.100 +    /* Brute-Force flush all TLB */
   1.101 +    flush_tlb_all();
   1.102 +    return;
   1.103 +}
   1.104 +
   1.105  /*
   1.106   * Allocate and map the compatibility mode machine-to-phys table.
   1.107   * spfn/epfn: the pfn ranges to be setup