debuggers.hg

changeset 20662:0ca5a5f477be

memory hotadd 3/7: Function to share m2p tables with guest.

The m2p tables should be shared by guest as they will be read-only
mapped by guest. This logical is similar to what happens in
subarch_init_memory(). But we need check the mapping is just setup.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 11 08:55:08 2009 +0000 (2009-12-11)
parents adb62ca21d31
children 283a5357d196
files xen/arch/x86/x86_64/mm.c
line diff
     1.1 --- a/xen/arch/x86/x86_64/mm.c	Fri Dec 11 08:54:37 2009 +0000
     1.2 +++ b/xen/arch/x86/x86_64/mm.c	Fri Dec 11 08:55:08 2009 +0000
     1.3 @@ -249,6 +249,67 @@ static int m2p_mapped(unsigned long spfn
     1.4      return M2P_NO_MAPPED;
     1.5  }
     1.6  
     1.7 +int share_hotadd_m2p_table(struct mem_hotadd_info *info)
     1.8 +{
     1.9 +    unsigned long i, n, v, m2p_start_mfn = 0;
    1.10 +    l3_pgentry_t l3e;
    1.11 +    l2_pgentry_t l2e;
    1.12 +
    1.13 +    /* M2P table is mappable read-only by privileged domains. */
    1.14 +    for ( v  = RDWR_MPT_VIRT_START;
    1.15 +          v != RDWR_MPT_VIRT_END;
    1.16 +          v += n << PAGE_SHIFT )
    1.17 +    {
    1.18 +        n = L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES;
    1.19 +        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
    1.20 +            l3_table_offset(v)];
    1.21 +        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
    1.22 +            continue;
    1.23 +        if ( !(l3e_get_flags(l3e) & _PAGE_PSE) )
    1.24 +        {
    1.25 +            n = L1_PAGETABLE_ENTRIES;
    1.26 +            l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
    1.27 +            if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
    1.28 +                continue;
    1.29 +            m2p_start_mfn = l2e_get_pfn(l2e);
    1.30 +        }
    1.31 +        else
    1.32 +            continue;
    1.33 +
    1.34 +        for ( i = 0; i < n; i++ )
    1.35 +        {
    1.36 +            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
    1.37 +            if (hotadd_mem_valid(m2p_start_mfn + i, info))
    1.38 +                share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
    1.39 +        }
    1.40 +    }
    1.41 +
    1.42 +    for ( v  = RDWR_COMPAT_MPT_VIRT_START;
    1.43 +          v != RDWR_COMPAT_MPT_VIRT_END;
    1.44 +          v += 1 << L2_PAGETABLE_SHIFT )
    1.45 +    {
    1.46 +        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
    1.47 +            l3_table_offset(v)];
    1.48 +        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
    1.49 +            continue;
    1.50 +        l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
    1.51 +        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
    1.52 +            continue;
    1.53 +        m2p_start_mfn = l2e_get_pfn(l2e);
    1.54 +
    1.55 +        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    1.56 +        {
    1.57 +            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
    1.58 +            if (hotadd_mem_valid(m2p_start_mfn + i, info))
    1.59 +            {
    1.60 +                printk("now share page %lx\n", m2p_start_mfn + i);
    1.61 +                share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
    1.62 +            }
    1.63 +        }
    1.64 +    }
    1.65 +    return 0;
    1.66 +}
    1.67 +
    1.68  static void destroy_compat_m2p_mapping(struct mem_hotadd_info *info)
    1.69  {
    1.70      unsigned long i, va, rwva, pt_pfn;