debuggers.hg

changeset 20664:611f49efe955

memory hotadd 5/7: Sync changes to mapping changes caused by memory
hotplug in page fault handler.

In compact guest situation, the compat m2p table is copied, not
directly mapped in L3, so we have to sync it. Direct mapping range
may changes, and we need sync it with guest's table.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 11 08:56:50 2009 +0000 (2009-12-11)
parents 283a5357d196
children 7d7e221370ea
files xen/arch/x86/traps.c xen/arch/x86/x86_64/mm.c xen/include/asm-x86/mm.h
line diff
     1.1 --- a/xen/arch/x86/traps.c	Fri Dec 11 08:56:04 2009 +0000
     1.2 +++ b/xen/arch/x86/traps.c	Fri Dec 11 08:56:50 2009 +0000
     1.3 @@ -1230,6 +1230,10 @@ static int fixup_page_fault(unsigned lon
     1.4          return ret;
     1.5      }
     1.6  
     1.7 +    if ( !(regs->error_code & PFEC_page_present) &&
     1.8 +          (pagefault_by_memadd(addr, regs)) )
     1.9 +        return handle_memadd_fault(addr, regs);
    1.10 +
    1.11      if ( unlikely(IN_HYPERVISOR_RANGE(addr)) )
    1.12      {
    1.13          if ( !(regs->error_code & PFEC_reserved_bit) &&
     2.1 --- a/xen/arch/x86/x86_64/mm.c	Fri Dec 11 08:56:04 2009 +0000
     2.2 +++ b/xen/arch/x86/x86_64/mm.c	Fri Dec 11 08:56:50 2009 +0000
     2.3 @@ -1182,6 +1182,87 @@ int check_descriptor(const struct domain
     2.4      return 0;
     2.5  }
     2.6  
     2.7 +int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs)
     2.8 +{
     2.9 +    struct domain *d = current->domain;
    2.10 +
    2.11 +    if (guest_mode(regs) &&
    2.12 +        is_pv_32bit_domain(d) &&
    2.13 +        ((addr >= HYPERVISOR_COMPAT_VIRT_START(d)) &&
    2.14 +             (addr < MACH2PHYS_COMPAT_VIRT_END)) )
    2.15 +            return 1;
    2.16 +    return 0;
    2.17 +}
    2.18 +
    2.19 +int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
    2.20 +{
    2.21 +    struct domain *d = current->domain;
    2.22 +    l4_pgentry_t *pl4e = NULL;
    2.23 +    l4_pgentry_t l4e;
    2.24 +    l3_pgentry_t  *pl3e = NULL;
    2.25 +    l3_pgentry_t l3e;
    2.26 +    l2_pgentry_t *pl2e = NULL;
    2.27 +    l2_pgentry_t l2e, idle_l2e;
    2.28 +    unsigned long mfn, idle_index;
    2.29 +    int ret = 0;
    2.30 +
    2.31 +    if (!is_pv_32on64_domain(d))
    2.32 +        return 0;
    2.33 +
    2.34 +    if ((addr < HYPERVISOR_COMPAT_VIRT_START(d)) ||
    2.35 +             (addr > MACH2PHYS_COMPAT_VIRT_END) )
    2.36 +        return 0;
    2.37 +
    2.38 +    mfn = (read_cr3()) >> PAGE_SHIFT;
    2.39 +
    2.40 +    pl4e = map_domain_page(mfn);
    2.41 +
    2.42 +    l4e = pl4e[addr];
    2.43 +
    2.44 +    if (!(l4e_get_flags(l4e) & _PAGE_PRESENT))
    2.45 +        goto unmap;
    2.46 +
    2.47 +    mfn = l4e_get_pfn(l4e);
    2.48 +    /* We don't need get page type here since it is current CR3 */
    2.49 +    pl3e = map_domain_page(mfn);
    2.50 +
    2.51 +    l3e = pl3e[3];
    2.52 +
    2.53 +    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
    2.54 +        goto unmap;
    2.55 +
    2.56 +    mfn = l3e_get_pfn(l3e);
    2.57 +    pl2e = map_domain_page(mfn);
    2.58 +
    2.59 +    l2e = pl2e[l2_table_offset(addr)];
    2.60 +
    2.61 +    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT))
    2.62 +        goto unmap;
    2.63 +
    2.64 +    idle_index = (l2_table_offset(addr) -
    2.65 +                        COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d))/
    2.66 +                  sizeof(l2_pgentry_t);
    2.67 +    idle_l2e = compat_idle_pg_table_l2[idle_index];
    2.68 +    if (!(l2e_get_flags(idle_l2e) & _PAGE_PRESENT))
    2.69 +        goto unmap;
    2.70 +
    2.71 +    memcpy(&pl2e[l2_table_offset(addr)],
    2.72 +            &compat_idle_pg_table_l2[idle_index],
    2.73 +            sizeof(l2_pgentry_t));
    2.74 +
    2.75 +    ret = EXCRET_fault_fixed;
    2.76 +
    2.77 +unmap:
    2.78 +    if ( pl4e )
    2.79 +        unmap_domain_page(pl4e);
    2.80 +    if ( pl3e )
    2.81 +        unmap_domain_page(pl3e);
    2.82 +    if ( pl2e )
    2.83 +        unmap_domain_page(pl2e);
    2.84 +
    2.85 +    return ret;
    2.86 +}
    2.87 +
    2.88  void domain_set_alloc_bitsize(struct domain *d)
    2.89  {
    2.90      if ( !is_pv_32on64_domain(d) ||
     3.1 --- a/xen/include/asm-x86/mm.h	Fri Dec 11 08:56:04 2009 +0000
     3.2 +++ b/xen/include/asm-x86/mm.h	Fri Dec 11 08:56:50 2009 +0000
     3.3 @@ -476,6 +476,21 @@ int  ptwr_do_page_fault(struct vcpu *, u
     3.4  
     3.5  int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
     3.6  
     3.7 +#ifdef CONFIG_X86_64
     3.8 +extern int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs);
     3.9 +extern int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs);
    3.10 +#else
    3.11 +int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs)
    3.12 +{
    3.13 +    return 0;
    3.14 +}
    3.15 +
    3.16 +int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
    3.17 +{
    3.18 +    return 0;
    3.19 +}
    3.20 +#endif
    3.21 +
    3.22  #ifndef NDEBUG
    3.23  
    3.24  #define AUDIT_SHADOW_ALREADY_LOCKED ( 1u << 0 )