debuggers.hg

changeset 21986:e7afe98afd43

Nested Virtualization: p2m infrastructure

Change p2m infrastructure to operate on per-p2m instead of per-domain.
This allows us to use multiple p2m tables per-domain.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Aug 09 16:46:42 2010 +0100 (2010-08-09)
parents 9e58c46ee63b
children 2f4a89ad2528
files xen/arch/x86/debug.c xen/arch/x86/domain.c xen/arch/x86/domctl.c xen/arch/x86/hvm/emulate.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/mtrr.c xen/arch/x86/hvm/stdvga.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm.c xen/arch/x86/mm/guest_walk.c xen/arch/x86/mm/hap/guest_walk.c xen/arch/x86/mm/hap/hap.c xen/arch/x86/mm/hap/p2m-ept.c xen/arch/x86/mm/mem_event.c xen/arch/x86/mm/mem_paging.c xen/arch/x86/mm/mem_sharing.c xen/arch/x86/mm/p2m.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/common/grant_table.c xen/common/memory.c xen/common/tmem_xen.c xen/include/asm-x86/guest_pt.h xen/include/asm-x86/mem_sharing.h xen/include/asm-x86/p2m.h
line diff
     1.1 --- a/xen/arch/x86/debug.c	Mon Aug 09 16:40:18 2010 +0100
     1.2 +++ b/xen/arch/x86/debug.c	Mon Aug 09 16:46:42 2010 +0100
     1.3 @@ -61,7 +61,7 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct dom
     1.4          return INVALID_MFN;
     1.5      }
     1.6  
     1.7 -    mfn = mfn_x(gfn_to_mfn(dp, gfn, &gfntype)); 
     1.8 +    mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(dp), gfn, &gfntype)); 
     1.9      if ( p2m_is_readonly(gfntype) && toaddr )
    1.10      {
    1.11          DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
     2.1 --- a/xen/arch/x86/domain.c	Mon Aug 09 16:40:18 2010 +0100
     2.2 +++ b/xen/arch/x86/domain.c	Mon Aug 09 16:46:42 2010 +0100
     2.3 @@ -151,7 +151,7 @@ void dump_pageframe_info(struct domain *
     2.4  
     2.5      if ( is_hvm_domain(d) )
     2.6      {
     2.7 -        p2m_pod_dump_data(d);
     2.8 +        p2m_pod_dump_data(p2m_get_hostp2m(d));
     2.9      }
    2.10  
    2.11      spin_lock(&d->page_alloc_lock);
     3.1 --- a/xen/arch/x86/domctl.c	Mon Aug 09 16:40:18 2010 +0100
     3.2 +++ b/xen/arch/x86/domctl.c	Mon Aug 09 16:46:42 2010 +0100
     3.3 @@ -982,7 +982,7 @@ long arch_do_domctl(
     3.4  
     3.5              ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
     3.6              for ( i = 0; i < nr_mfns; i++ )
     3.7 -                set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
     3.8 +                set_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i, _mfn(mfn+i));
     3.9          }
    3.10          else
    3.11          {
    3.12 @@ -991,7 +991,7 @@ long arch_do_domctl(
    3.13                   gfn, mfn, nr_mfns);
    3.14  
    3.15              for ( i = 0; i < nr_mfns; i++ )
    3.16 -                clear_mmio_p2m_entry(d, gfn+i);
    3.17 +                clear_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i);
    3.18              ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
    3.19          }
    3.20  
     4.1 --- a/xen/arch/x86/hvm/emulate.c	Mon Aug 09 16:40:18 2010 +0100
     4.2 +++ b/xen/arch/x86/hvm/emulate.c	Mon Aug 09 16:46:42 2010 +0100
     4.3 @@ -55,6 +55,7 @@ int hvmemul_do_io(
     4.4      paddr_t value = ram_gpa;
     4.5      int value_is_ptr = (p_data == NULL);
     4.6      struct vcpu *curr = current;
     4.7 +    struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain);
     4.8      ioreq_t *p = get_ioreq(curr);
     4.9      unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
    4.10      p2m_type_t p2mt;
    4.11 @@ -62,10 +63,10 @@ int hvmemul_do_io(
    4.12      int rc;
    4.13  
    4.14      /* Check for paged out page */
    4.15 -    ram_mfn = gfn_to_mfn_unshare(current->domain, ram_gfn, &p2mt, 0);
    4.16 +    ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt, 0);
    4.17      if ( p2m_is_paging(p2mt) )
    4.18      {
    4.19 -        p2m_mem_paging_populate(curr->domain, ram_gfn);
    4.20 +        p2m_mem_paging_populate(p2m, ram_gfn);
    4.21          return X86EMUL_RETRY;
    4.22      }
    4.23      if ( p2m_is_shared(p2mt) )
    4.24 @@ -638,6 +639,7 @@ static int hvmemul_rep_movs(
    4.25      unsigned long saddr, daddr, bytes;
    4.26      paddr_t sgpa, dgpa;
    4.27      uint32_t pfec = PFEC_page_present;
    4.28 +    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
    4.29      p2m_type_t p2mt;
    4.30      int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
    4.31      char *buf;
    4.32 @@ -668,12 +670,12 @@ static int hvmemul_rep_movs(
    4.33      if ( rc != X86EMUL_OKAY )
    4.34          return rc;
    4.35  
    4.36 -    (void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt);
    4.37 +    (void)gfn_to_mfn(p2m, sgpa >> PAGE_SHIFT, &p2mt);
    4.38      if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
    4.39          return hvmemul_do_mmio(
    4.40              sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
    4.41  
    4.42 -    (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
    4.43 +    (void)gfn_to_mfn(p2m, dgpa >> PAGE_SHIFT, &p2mt);
    4.44      if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
    4.45          return hvmemul_do_mmio(
    4.46              dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
     5.1 --- a/xen/arch/x86/hvm/hvm.c	Mon Aug 09 16:40:18 2010 +0100
     5.2 +++ b/xen/arch/x86/hvm/hvm.c	Mon Aug 09 16:46:42 2010 +0100
     5.3 @@ -335,16 +335,17 @@ static int hvm_set_ioreq_page(
     5.4      struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn)
     5.5  {
     5.6      struct page_info *page;
     5.7 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     5.8      p2m_type_t p2mt;
     5.9      unsigned long mfn;
    5.10      void *va;
    5.11  
    5.12 -    mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn, &p2mt, 0));
    5.13 +    mfn = mfn_x(gfn_to_mfn_unshare(p2m, gmfn, &p2mt, 0));
    5.14      if ( !p2m_is_ram(p2mt) )
    5.15          return -EINVAL;
    5.16      if ( p2m_is_paging(p2mt) )
    5.17      {
    5.18 -        p2m_mem_paging_populate(d, gmfn);
    5.19 +        p2m_mem_paging_populate(p2m, gmfn);
    5.20          return -ENOENT;
    5.21      }
    5.22      if ( p2m_is_shared(p2mt) )
    5.23 @@ -968,8 +969,10 @@ bool_t hvm_hap_nested_page_fault(unsigne
    5.24  {
    5.25      p2m_type_t p2mt;
    5.26      mfn_t mfn;
    5.27 -
    5.28 -    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
    5.29 +    struct vcpu *v = current;
    5.30 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
    5.31 +
    5.32 +    mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
    5.33  
    5.34      /*
    5.35       * If this GFN is emulated MMIO or marked as read-only, pass the fault
    5.36 @@ -985,12 +988,12 @@ bool_t hvm_hap_nested_page_fault(unsigne
    5.37  #ifdef __x86_64__
    5.38      /* Check if the page has been paged out */
    5.39      if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
    5.40 -        p2m_mem_paging_populate(current->domain, gfn);
    5.41 +        p2m_mem_paging_populate(p2m, gfn);
    5.42  
    5.43      /* Mem sharing: unshare the page and try again */
    5.44      if ( p2mt == p2m_ram_shared )
    5.45      {
    5.46 -        mem_sharing_unshare_page(current->domain, gfn, 0);
    5.47 +        mem_sharing_unshare_page(p2m, gfn, 0);
    5.48          return 1;
    5.49      }
    5.50  #endif
    5.51 @@ -1003,8 +1006,8 @@ bool_t hvm_hap_nested_page_fault(unsigne
    5.52           * a large page, we do not change other pages type within that large
    5.53           * page.
    5.54           */
    5.55 -        paging_mark_dirty(current->domain, mfn_x(mfn));
    5.56 -        p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
    5.57 +        paging_mark_dirty(v->domain, mfn_x(mfn));
    5.58 +        p2m_change_type(p2m, gfn, p2m_ram_logdirty, p2m_ram_rw);
    5.59          return 1;
    5.60      }
    5.61  
    5.62 @@ -1088,6 +1091,7 @@ int hvm_set_cr0(unsigned long value)
    5.63  {
    5.64      struct vcpu *v = current;
    5.65      p2m_type_t p2mt;
    5.66 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
    5.67      unsigned long gfn, mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
    5.68  
    5.69      HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
    5.70 @@ -1126,7 +1130,7 @@ int hvm_set_cr0(unsigned long value)
    5.71          {
    5.72              /* The guest CR3 must be pointing to the guest physical. */
    5.73              gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
    5.74 -            mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
    5.75 +            mfn = mfn_x(gfn_to_mfn(p2m, gfn, &p2mt));
    5.76              if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
    5.77                   !get_page(mfn_to_page(mfn), v->domain))
    5.78              {
    5.79 @@ -1213,7 +1217,8 @@ int hvm_set_cr3(unsigned long value)
    5.80      {
    5.81          /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
    5.82          HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
    5.83 -        mfn = mfn_x(gfn_to_mfn_current(value >> PAGE_SHIFT, &p2mt));
    5.84 +        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
    5.85 +            value >> PAGE_SHIFT, &p2mt));
    5.86          if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
    5.87               !get_page(mfn_to_page(mfn), v->domain) )
    5.88                goto bad_cr3;
    5.89 @@ -1356,6 +1361,8 @@ static void *hvm_map_entry(unsigned long
    5.90      unsigned long gfn, mfn;
    5.91      p2m_type_t p2mt;
    5.92      uint32_t pfec;
    5.93 +    struct vcpu *v = current;
    5.94 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
    5.95  
    5.96      if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE )
    5.97      {
    5.98 @@ -1372,10 +1379,10 @@ static void *hvm_map_entry(unsigned long
    5.99      gfn = paging_gva_to_gfn(current, va, &pfec);
   5.100      if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
   5.101          return NULL;
   5.102 -    mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
   5.103 +    mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0));
   5.104      if ( p2m_is_paging(p2mt) )
   5.105      {
   5.106 -        p2m_mem_paging_populate(current->domain, gfn);
   5.107 +        p2m_mem_paging_populate(p2m, gfn);
   5.108          return NULL;
   5.109      }
   5.110      if ( p2m_is_shared(p2mt) )
   5.111 @@ -1742,6 +1749,7 @@ static enum hvm_copy_result __hvm_copy(
   5.112      void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
   5.113  {
   5.114      struct vcpu *curr = current;
   5.115 +    struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain);
   5.116      unsigned long gfn, mfn;
   5.117      p2m_type_t p2mt;
   5.118      char *p;
   5.119 @@ -1770,11 +1778,11 @@ static enum hvm_copy_result __hvm_copy(
   5.120              gfn = addr >> PAGE_SHIFT;
   5.121          }
   5.122  
   5.123 -        mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
   5.124 +        mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0));
   5.125  
   5.126          if ( p2m_is_paging(p2mt) )
   5.127          {
   5.128 -            p2m_mem_paging_populate(curr->domain, gfn);
   5.129 +            p2m_mem_paging_populate(p2m, gfn);
   5.130              return HVMCOPY_gfn_paged_out;
   5.131          }
   5.132          if ( p2m_is_shared(p2mt) )
   5.133 @@ -3031,6 +3039,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
   5.134      {
   5.135          struct xen_hvm_modified_memory a;
   5.136          struct domain *d;
   5.137 +        struct p2m_domain *p2m;
   5.138          unsigned long pfn;
   5.139  
   5.140          if ( copy_from_guest(&a, arg, 1) )
   5.141 @@ -3058,13 +3067,14 @@ long do_hvm_op(unsigned long op, XEN_GUE
   5.142          if ( !paging_mode_log_dirty(d) )
   5.143              goto param_fail3;
   5.144  
   5.145 +        p2m = p2m_get_hostp2m(d);
   5.146          for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
   5.147          {
   5.148              p2m_type_t t;
   5.149 -            mfn_t mfn = gfn_to_mfn(d, pfn, &t);
   5.150 +            mfn_t mfn = gfn_to_mfn(p2m, pfn, &t);
   5.151              if ( p2m_is_paging(t) )
   5.152              {
   5.153 -                p2m_mem_paging_populate(d, pfn);
   5.154 +                p2m_mem_paging_populate(p2m, pfn);
   5.155  
   5.156                  rc = -EINVAL;
   5.157                  goto param_fail3;
   5.158 @@ -3091,6 +3101,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
   5.159      {
   5.160          struct xen_hvm_set_mem_type a;
   5.161          struct domain *d;
   5.162 +        struct p2m_domain *p2m;
   5.163          unsigned long pfn;
   5.164          
   5.165          /* Interface types to internal p2m types */
   5.166 @@ -3120,15 +3131,16 @@ long do_hvm_op(unsigned long op, XEN_GUE
   5.167          if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
   5.168              goto param_fail4;
   5.169  
   5.170 +        p2m = p2m_get_hostp2m(d);
   5.171          for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
   5.172          {
   5.173              p2m_type_t t;
   5.174              p2m_type_t nt;
   5.175              mfn_t mfn;
   5.176 -            mfn = gfn_to_mfn_unshare(d, pfn, &t, 0);
   5.177 +            mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0);
   5.178              if ( p2m_is_paging(t) )
   5.179              {
   5.180 -                p2m_mem_paging_populate(d, pfn);
   5.181 +                p2m_mem_paging_populate(p2m, pfn);
   5.182  
   5.183                  rc = -EINVAL;
   5.184                  goto param_fail4;
   5.185 @@ -3147,7 +3159,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
   5.186              }
   5.187              else
   5.188              {
   5.189 -                nt = p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]);
   5.190 +                nt = p2m_change_type(p2m, pfn, t, memtype[a.hvmmem_type]);
   5.191                  if ( nt != t )
   5.192                  {
   5.193                      gdprintk(XENLOG_WARNING,
     6.1 --- a/xen/arch/x86/hvm/mtrr.c	Mon Aug 09 16:40:18 2010 +0100
     6.2 +++ b/xen/arch/x86/hvm/mtrr.c	Mon Aug 09 16:46:42 2010 +0100
     6.3 @@ -399,7 +399,7 @@ uint32_t get_pat_flags(struct vcpu *v,
     6.4      {
     6.5          struct domain *d = v->domain;
     6.6          p2m_type_t p2mt;
     6.7 -        gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt);
     6.8 +        gfn_to_mfn(p2m_get_hostp2m(d), paddr_to_pfn(gpaddr), &p2mt);
     6.9          if (p2m_is_ram(p2mt))
    6.10              gdprintk(XENLOG_WARNING,
    6.11                      "Conflict occurs for a given guest l1e flags:%x "
     7.1 --- a/xen/arch/x86/hvm/stdvga.c	Mon Aug 09 16:40:18 2010 +0100
     7.2 +++ b/xen/arch/x86/hvm/stdvga.c	Mon Aug 09 16:46:42 2010 +0100
     7.3 @@ -469,6 +469,7 @@ static int mmio_move(struct hvm_hw_stdvg
     7.4      int i;
     7.5      int sign = p->df ? -1 : 1;
     7.6      p2m_type_t p2mt;
     7.7 +    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
     7.8  
     7.9      if ( p->data_is_ptr )
    7.10      {
    7.11 @@ -481,7 +482,7 @@ static int mmio_move(struct hvm_hw_stdvg
    7.12                  if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
    7.13                       HVMCOPY_okay )
    7.14                  {
    7.15 -                    (void)gfn_to_mfn_current(data >> PAGE_SHIFT, &p2mt);
    7.16 +                    (void)gfn_to_mfn(p2m, data >> PAGE_SHIFT, &p2mt);
    7.17                      /*
    7.18                       * The only case we handle is vga_mem <-> vga_mem.
    7.19                       * Anything else disables caching and leaves it to qemu-dm.
    7.20 @@ -503,7 +504,7 @@ static int mmio_move(struct hvm_hw_stdvg
    7.21                  if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
    7.22                       HVMCOPY_okay )
    7.23                  {
    7.24 -                    (void)gfn_to_mfn_current(data >> PAGE_SHIFT, &p2mt);
    7.25 +                    (void)gfn_to_mfn(p2m, data >> PAGE_SHIFT, &p2mt);
    7.26                      if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
    7.27                           ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
    7.28                          return 0;
     8.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Aug 09 16:40:18 2010 +0100
     8.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Aug 09 16:46:42 2010 +0100
     8.3 @@ -232,7 +232,7 @@ static int svm_vmcb_restore(struct vcpu 
     8.4      {
     8.5          if ( c->cr0 & X86_CR0_PG )
     8.6          {
     8.7 -            mfn = mfn_x(gfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt));
     8.8 +            mfn = mfn_x(gfn_to_mfn(p2m, c->cr3 >> PAGE_SHIFT, &p2mt));
     8.9              if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
    8.10              {
    8.11                  gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n",
    8.12 @@ -946,6 +946,9 @@ static void svm_do_nested_pgfault(paddr_
    8.13      unsigned long gfn = gpa >> PAGE_SHIFT;
    8.14      mfn_t mfn;
    8.15      p2m_type_t p2mt;
    8.16 +    struct p2m_domain *p2m;
    8.17 +
    8.18 +    p2m = p2m_get_hostp2m(current->domain);
    8.19  
    8.20      if ( tb_init_done )
    8.21      {
    8.22 @@ -958,7 +961,7 @@ static void svm_do_nested_pgfault(paddr_
    8.23  
    8.24          _d.gpa = gpa;
    8.25          _d.qualification = 0;
    8.26 -        _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
    8.27 +        _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
    8.28          
    8.29          __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
    8.30      }
    8.31 @@ -967,7 +970,7 @@ static void svm_do_nested_pgfault(paddr_
    8.32          return;
    8.33  
    8.34      /* Everything else is an error. */
    8.35 -    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
    8.36 +    mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
    8.37      gdprintk(XENLOG_ERR, "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
    8.38               gpa, mfn_x(mfn), p2mt);
    8.39      domain_crash(current->domain);
     9.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Aug 09 16:40:18 2010 +0100
     9.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Aug 09 16:46:42 2010 +0100
     9.3 @@ -486,7 +486,8 @@ static int vmx_restore_cr0_cr3(
     9.4      {
     9.5          if ( cr0 & X86_CR0_PG )
     9.6          {
     9.7 -            mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
     9.8 +            mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
     9.9 +                cr3 >> PAGE_SHIFT, &p2mt));
    9.10              if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
    9.11              {
    9.12                  gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
    9.13 @@ -1002,7 +1003,8 @@ static void vmx_load_pdptrs(struct vcpu 
    9.14      if ( cr3 & 0x1fUL )
    9.15          goto crash;
    9.16  
    9.17 -    mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
    9.18 +    mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
    9.19 +        cr3 >> PAGE_SHIFT, &p2mt));
    9.20      if ( !p2m_is_ram(p2mt) )
    9.21          goto crash;
    9.22  
    9.23 @@ -1221,7 +1223,7 @@ void ept_sync_domain(struct domain *d)
    9.24          return;
    9.25  
    9.26      ASSERT(local_irq_is_enabled());
    9.27 -    ASSERT(p2m_locked_by_me(d->arch.p2m));
    9.28 +    ASSERT(p2m_locked_by_me(p2m_get_hostp2m(d)));
    9.29  
    9.30      /*
    9.31       * Flush active cpus synchronously. Flush others the next time this domain
    9.32 @@ -1340,7 +1342,7 @@ static void vmx_set_uc_mode(struct vcpu 
    9.33  {
    9.34      if ( paging_mode_hap(v->domain) )
    9.35          ept_change_entry_emt_with_range(
    9.36 -            v->domain, 0, v->domain->arch.p2m->max_mapped_pfn);
    9.37 +            v->domain, 0, p2m_get_hostp2m(v->domain)->max_mapped_pfn);
    9.38      hvm_asid_flush_vcpu(v);
    9.39  }
    9.40  
    9.41 @@ -1893,7 +1895,8 @@ static int vmx_alloc_vlapic_mapping(stru
    9.42          return -ENOMEM;
    9.43      share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable);
    9.44      set_mmio_p2m_entry(
    9.45 -        d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(virt_to_mfn(apic_va)));
    9.46 +        p2m_get_hostp2m(d), paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
    9.47 +        _mfn(virt_to_mfn(apic_va)));
    9.48      d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va);
    9.49  
    9.50      return 0;
    9.51 @@ -2098,6 +2101,7 @@ static void ept_handle_violation(unsigne
    9.52      unsigned long gla, gfn = gpa >> PAGE_SHIFT;
    9.53      mfn_t mfn;
    9.54      p2m_type_t p2mt;
    9.55 +    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
    9.56  
    9.57      if ( tb_init_done )
    9.58      {
    9.59 @@ -2110,7 +2114,7 @@ static void ept_handle_violation(unsigne
    9.60  
    9.61          _d.gpa = gpa;
    9.62          _d.qualification = qualification;
    9.63 -        _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
    9.64 +        _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
    9.65          
    9.66          __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
    9.67      }
    9.68 @@ -2120,7 +2124,7 @@ static void ept_handle_violation(unsigne
    9.69          return;
    9.70  
    9.71      /* Everything else is an error. */
    9.72 -    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
    9.73 +    mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
    9.74      gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
    9.75               "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", 
    9.76               qualification, 
    10.1 --- a/xen/arch/x86/mm.c	Mon Aug 09 16:40:18 2010 +0100
    10.2 +++ b/xen/arch/x86/mm.c	Mon Aug 09 16:46:42 2010 +0100
    10.3 @@ -398,7 +398,7 @@ int page_is_ram_type(unsigned long mfn, 
    10.4  unsigned long domain_get_maximum_gpfn(struct domain *d)
    10.5  {
    10.6      if ( is_hvm_domain(d) )
    10.7 -        return d->arch.p2m->max_mapped_pfn;
    10.8 +        return p2m_get_hostp2m(d)->max_mapped_pfn;
    10.9      /* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. */
   10.10      return arch_get_max_pfn(d) - 1;
   10.11  }
   10.12 @@ -1741,7 +1741,8 @@ static int mod_l1_entry(l1_pgentry_t *pl
   10.13      if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
   10.14      {
   10.15          /* Translate foreign guest addresses. */
   10.16 -        mfn = mfn_x(gfn_to_mfn(pg_dom, l1e_get_pfn(nl1e), &p2mt));
   10.17 +        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pg_dom),
   10.18 +            l1e_get_pfn(nl1e), &p2mt));
   10.19          if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) )
   10.20              return 0;
   10.21          ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
   10.22 @@ -3318,8 +3319,8 @@ int do_mmu_update(
   10.23      struct page_info *page;
   10.24      int rc = 0, okay = 1, i = 0;
   10.25      unsigned int cmd, done = 0, pt_dom;
   10.26 -    struct domain *d = current->domain, *pt_owner = d, *pg_owner;
   10.27      struct vcpu *v = current;
   10.28 +    struct domain *d = v->domain, *pt_owner = d, *pg_owner;
   10.29      struct domain_mmap_cache mapcache;
   10.30  
   10.31      if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
   10.32 @@ -3403,13 +3404,13 @@ int do_mmu_update(
   10.33  
   10.34              req.ptr -= cmd;
   10.35              gmfn = req.ptr >> PAGE_SHIFT;
   10.36 -            mfn = mfn_x(gfn_to_mfn(pt_owner, gmfn, &p2mt));
   10.37 +            mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pt_owner), gmfn, &p2mt));
   10.38              if ( !p2m_is_valid(p2mt) )
   10.39                mfn = INVALID_MFN;
   10.40  
   10.41              if ( p2m_is_paged(p2mt) )
   10.42              {
   10.43 -                p2m_mem_paging_populate(pg_owner, gmfn);
   10.44 +                p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), gmfn);
   10.45  
   10.46                  rc = -ENOENT;
   10.47                  break;
   10.48 @@ -3434,12 +3435,13 @@ int do_mmu_update(
   10.49                  {
   10.50                      l1_pgentry_t l1e = l1e_from_intpte(req.val);
   10.51                      p2m_type_t l1e_p2mt;
   10.52 -                    gfn_to_mfn(pg_owner, l1e_get_pfn(l1e), &l1e_p2mt);
   10.53 +                    gfn_to_mfn(p2m_get_hostp2m(pg_owner),
   10.54 +                        l1e_get_pfn(l1e), &l1e_p2mt);
   10.55  
   10.56                      if ( p2m_is_paged(l1e_p2mt) )
   10.57                      {
   10.58 -                        p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
   10.59 -
   10.60 +                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
   10.61 +                            l1e_get_pfn(l1e));
   10.62                          rc = -ENOENT;
   10.63                          break;
   10.64                      }
   10.65 @@ -3457,7 +3459,7 @@ int do_mmu_update(
   10.66                          /* Unshare the page for RW foreign mappings */
   10.67                          if ( l1e_get_flags(l1e) & _PAGE_RW )
   10.68                          {
   10.69 -                            rc = mem_sharing_unshare_page(pg_owner, 
   10.70 +                            rc = mem_sharing_unshare_page(p2m_get_hostp2m(pg_owner), 
   10.71                                                            l1e_get_pfn(l1e), 
   10.72                                                            0);
   10.73                              if ( rc )
   10.74 @@ -3475,12 +3477,12 @@ int do_mmu_update(
   10.75                  {
   10.76                      l2_pgentry_t l2e = l2e_from_intpte(req.val);
   10.77                      p2m_type_t l2e_p2mt;
   10.78 -                    gfn_to_mfn(pg_owner, l2e_get_pfn(l2e), &l2e_p2mt);
   10.79 +                    gfn_to_mfn(p2m_get_hostp2m(pg_owner), l2e_get_pfn(l2e), &l2e_p2mt);
   10.80  
   10.81                      if ( p2m_is_paged(l2e_p2mt) )
   10.82                      {
   10.83 -                        p2m_mem_paging_populate(pg_owner, l2e_get_pfn(l2e));
   10.84 -
   10.85 +                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
   10.86 +                            l2e_get_pfn(l2e));
   10.87                          rc = -ENOENT;
   10.88                          break;
   10.89                      }
   10.90 @@ -3505,12 +3507,12 @@ int do_mmu_update(
   10.91                  {
   10.92                      l3_pgentry_t l3e = l3e_from_intpte(req.val);
   10.93                      p2m_type_t l3e_p2mt;
   10.94 -                    gfn_to_mfn(pg_owner, l3e_get_pfn(l3e), &l3e_p2mt);
   10.95 +                    gfn_to_mfn(p2m_get_hostp2m(pg_owner), l3e_get_pfn(l3e), &l3e_p2mt);
   10.96  
   10.97                      if ( p2m_is_paged(l3e_p2mt) )
   10.98                      {
   10.99 -                        p2m_mem_paging_populate(pg_owner, l3e_get_pfn(l3e));
  10.100 -
  10.101 +                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
  10.102 +                            l3e_get_pfn(l3e));
  10.103                          rc = -ENOENT;
  10.104                          break;
  10.105                      }
  10.106 @@ -3536,12 +3538,13 @@ int do_mmu_update(
  10.107                  {
  10.108                      l4_pgentry_t l4e = l4e_from_intpte(req.val);
  10.109                      p2m_type_t l4e_p2mt;
  10.110 -                    gfn_to_mfn(pg_owner, l4e_get_pfn(l4e), &l4e_p2mt);
  10.111 +                    gfn_to_mfn(p2m_get_hostp2m(pg_owner),
  10.112 +                        l4e_get_pfn(l4e), &l4e_p2mt);
  10.113  
  10.114                      if ( p2m_is_paged(l4e_p2mt) )
  10.115                      {
  10.116 -                        p2m_mem_paging_populate(pg_owner, l4e_get_pfn(l4e));
  10.117 -
  10.118 +                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
  10.119 +                            l4e_get_pfn(l4e));
  10.120                          rc = -ENOENT;
  10.121                          break;
  10.122                      }
  10.123 @@ -3923,8 +3926,8 @@ static int create_grant_p2m_mapping(uint
  10.124          p2mt = p2m_grant_map_ro;
  10.125      else
  10.126          p2mt = p2m_grant_map_rw;
  10.127 -    rc = guest_physmap_add_entry(current->domain, addr >> PAGE_SHIFT,
  10.128 -                                 frame, 0, p2mt);
  10.129 +    rc = guest_physmap_add_entry(p2m_get_hostp2m(current->domain),
  10.130 +                                 addr >> PAGE_SHIFT, frame, 0, p2mt);
  10.131      if ( rc )
  10.132          return GNTST_general_error;
  10.133      else
  10.134 @@ -3962,11 +3965,12 @@ static int replace_grant_p2m_mapping(
  10.135      unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT);
  10.136      p2m_type_t type;
  10.137      mfn_t old_mfn;
  10.138 +    struct domain *d = current->domain;
  10.139  
  10.140      if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
  10.141          return GNTST_general_error;
  10.142  
  10.143 -    old_mfn = gfn_to_mfn_current(gfn, &type);
  10.144 +    old_mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &type);
  10.145      if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame )
  10.146      {
  10.147          gdprintk(XENLOG_WARNING,
  10.148 @@ -3974,7 +3978,7 @@ static int replace_grant_p2m_mapping(
  10.149                   type, mfn_x(old_mfn), frame);
  10.150          return GNTST_general_error;
  10.151      }
  10.152 -    guest_physmap_remove_page(current->domain, gfn, frame, 0);
  10.153 +    guest_physmap_remove_page(d, gfn, frame, 0);
  10.154  
  10.155      return GNTST_okay;
  10.156  }
  10.157 @@ -4581,7 +4585,8 @@ long arch_memory_op(int op, XEN_GUEST_HA
  10.158          {
  10.159              p2m_type_t p2mt;
  10.160  
  10.161 -            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt, 0));
  10.162 +            xatp.idx = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d),
  10.163 +                                                xatp.idx, &p2mt, 0));
  10.164              /* If the page is still shared, exit early */
  10.165              if ( p2m_is_shared(p2mt) )
  10.166              {
  10.167 @@ -4771,6 +4776,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
  10.168      {
  10.169          xen_pod_target_t target;
  10.170          struct domain *d;
  10.171 +        struct p2m_domain *p2m;
  10.172  
  10.173          /* Support DOMID_SELF? */
  10.174          if ( !IS_PRIV(current->domain) )
  10.175 @@ -4794,9 +4800,10 @@ long arch_memory_op(int op, XEN_GUEST_HA
  10.176              rc = p2m_pod_set_mem_target(d, target.target_pages);
  10.177          }
  10.178  
  10.179 +        p2m = p2m_get_hostp2m(d);
  10.180          target.tot_pages       = d->tot_pages;
  10.181 -        target.pod_cache_pages = d->arch.p2m->pod.count;
  10.182 -        target.pod_entries     = d->arch.p2m->pod.entry_count;
  10.183 +        target.pod_cache_pages = p2m->pod.count;
  10.184 +        target.pod_entries     = p2m->pod.entry_count;
  10.185  
  10.186          if ( copy_to_guest(arg, &target, 1) )
  10.187          {
    11.1 --- a/xen/arch/x86/mm/guest_walk.c	Mon Aug 09 16:40:18 2010 +0100
    11.2 +++ b/xen/arch/x86/mm/guest_walk.c	Mon Aug 09 16:46:42 2010 +0100
    11.3 @@ -86,17 +86,17 @@ static uint32_t set_ad_bits(void *guest_
    11.4      return 0;
    11.5  }
    11.6  
    11.7 -static inline void *map_domain_gfn(struct domain *d,
    11.8 +static inline void *map_domain_gfn(struct p2m_domain *p2m,
    11.9                                     gfn_t gfn, 
   11.10                                     mfn_t *mfn,
   11.11                                     p2m_type_t *p2mt,
   11.12                                     uint32_t *rc) 
   11.13  {
   11.14      /* Translate the gfn, unsharing if shared */
   11.15 -    *mfn = gfn_to_mfn_unshare(d, gfn_x(gfn), p2mt, 0);
   11.16 +    *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt, 0);
   11.17      if ( p2m_is_paging(*p2mt) )
   11.18      {
   11.19 -        p2m_mem_paging_populate(d, gfn_x(gfn));
   11.20 +        p2m_mem_paging_populate(p2m, gfn_x(gfn));
   11.21  
   11.22          *rc = _PAGE_PAGED;
   11.23          return NULL;
   11.24 @@ -119,7 +119,8 @@ static inline void *map_domain_gfn(struc
   11.25  
   11.26  /* Walk the guest pagetables, after the manner of a hardware walker. */
   11.27  uint32_t
   11.28 -guest_walk_tables(struct vcpu *v, unsigned long va, walk_t *gw, 
   11.29 +guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
   11.30 +                  unsigned long va, walk_t *gw, 
   11.31                    uint32_t pfec, mfn_t top_mfn, void *top_map)
   11.32  {
   11.33      struct domain *d = v->domain;
   11.34 @@ -154,7 +155,7 @@ guest_walk_tables(struct vcpu *v, unsign
   11.35      if ( rc & _PAGE_PRESENT ) goto out;
   11.36  
   11.37      /* Map the l3 table */
   11.38 -    l3p = map_domain_gfn(d, 
   11.39 +    l3p = map_domain_gfn(p2m, 
   11.40                           guest_l4e_get_gfn(gw->l4e), 
   11.41                           &gw->l3mfn,
   11.42                           &p2mt, 
   11.43 @@ -181,7 +182,7 @@ guest_walk_tables(struct vcpu *v, unsign
   11.44  #endif /* PAE or 64... */
   11.45  
   11.46      /* Map the l2 table */
   11.47 -    l2p = map_domain_gfn(d, 
   11.48 +    l2p = map_domain_gfn(p2m, 
   11.49                           guest_l3e_get_gfn(gw->l3e), 
   11.50                           &gw->l2mfn,
   11.51                           &p2mt, 
   11.52 @@ -237,7 +238,7 @@ guest_walk_tables(struct vcpu *v, unsign
   11.53      else 
   11.54      {
   11.55          /* Not a superpage: carry on and find the l1e. */
   11.56 -        l1p = map_domain_gfn(d, 
   11.57 +        l1p = map_domain_gfn(p2m, 
   11.58                               guest_l2e_get_gfn(gw->l2e), 
   11.59                               &gw->l1mfn,
   11.60                               &p2mt,
    12.1 --- a/xen/arch/x86/mm/hap/guest_walk.c	Mon Aug 09 16:40:18 2010 +0100
    12.2 +++ b/xen/arch/x86/mm/hap/guest_walk.c	Mon Aug 09 16:46:42 2010 +0100
    12.3 @@ -43,13 +43,14 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
    12.4      void *top_map;
    12.5      p2m_type_t p2mt;
    12.6      walk_t gw;
    12.7 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
    12.8  
    12.9      /* Get the top-level table's MFN */
   12.10      cr3 = v->arch.hvm_vcpu.guest_cr[3];
   12.11 -    top_mfn = gfn_to_mfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt, 0);
   12.12 +    top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt, 0);
   12.13      if ( p2m_is_paging(p2mt) )
   12.14      {
   12.15 -        p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT);
   12.16 +        p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
   12.17  
   12.18          pfec[0] = PFEC_page_paged;
   12.19          return INVALID_GFN;
   12.20 @@ -71,17 +72,17 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
   12.21  #if GUEST_PAGING_LEVELS == 3
   12.22      top_map += (cr3 & ~(PAGE_MASK | 31));
   12.23  #endif
   12.24 -    missing = guest_walk_tables(v, gva, &gw, pfec[0], top_mfn, top_map);
   12.25 +    missing = guest_walk_tables(v, p2m, gva, &gw, pfec[0], top_mfn, top_map);
   12.26      unmap_domain_page(top_map);
   12.27  
   12.28      /* Interpret the answer */
   12.29      if ( missing == 0 )
   12.30      {
   12.31          gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
   12.32 -        gfn_to_mfn_unshare(v->domain, gfn_x(gfn), &p2mt, 0);
   12.33 +        gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt, 0);
   12.34          if ( p2m_is_paging(p2mt) )
   12.35          {
   12.36 -            p2m_mem_paging_populate(v->domain, gfn_x(gfn));
   12.37 +            p2m_mem_paging_populate(p2m, gfn_x(gfn));
   12.38  
   12.39              pfec[0] = PFEC_page_paged;
   12.40              return INVALID_GFN;
   12.41 @@ -130,4 +131,3 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
   12.42   * indent-tabs-mode: nil
   12.43   * End:
   12.44   */
   12.45 -
    13.1 --- a/xen/arch/x86/mm/hap/hap.c	Mon Aug 09 16:40:18 2010 +0100
    13.2 +++ b/xen/arch/x86/mm/hap/hap.c	Mon Aug 09 16:46:42 2010 +0100
    13.3 @@ -70,7 +70,7 @@ static int hap_enable_vram_tracking(stru
    13.4  
    13.5      /* set l1e entries of P2M table to be read-only. */
    13.6      for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
    13.7 -        p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
    13.8 +        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);
    13.9  
   13.10      flush_tlb_mask(&d->domain_dirty_cpumask);
   13.11      return 0;
   13.12 @@ -90,7 +90,7 @@ static int hap_disable_vram_tracking(str
   13.13  
   13.14      /* set l1e entries of P2M table with normal mode */
   13.15      for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
   13.16 -        p2m_change_type(d, i, p2m_ram_logdirty, p2m_ram_rw);
   13.17 +        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_logdirty, p2m_ram_rw);
   13.18  
   13.19      flush_tlb_mask(&d->domain_dirty_cpumask);
   13.20      return 0;
   13.21 @@ -106,7 +106,7 @@ static void hap_clean_vram_tracking(stru
   13.22  
   13.23      /* set l1e entries of P2M table to be read-only. */
   13.24      for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
   13.25 -        p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
   13.26 +        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);
   13.27  
   13.28      flush_tlb_mask(&d->domain_dirty_cpumask);
   13.29  }
   13.30 @@ -200,7 +200,8 @@ static int hap_enable_log_dirty(struct d
   13.31      hap_unlock(d);
   13.32  
   13.33      /* set l1e entries of P2M table to be read-only. */
   13.34 -    p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
   13.35 +    p2m_change_entry_type_global(p2m_get_hostp2m(d),
   13.36 +        p2m_ram_rw, p2m_ram_logdirty);
   13.37      flush_tlb_mask(&d->domain_dirty_cpumask);
   13.38      return 0;
   13.39  }
   13.40 @@ -212,14 +213,16 @@ static int hap_disable_log_dirty(struct 
   13.41      hap_unlock(d);
   13.42  
   13.43      /* set l1e entries of P2M table with normal mode */
   13.44 -    p2m_change_entry_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
   13.45 +    p2m_change_entry_type_global(p2m_get_hostp2m(d),
   13.46 +        p2m_ram_logdirty, p2m_ram_rw);
   13.47      return 0;
   13.48  }
   13.49  
   13.50  static void hap_clean_dirty_bitmap(struct domain *d)
   13.51  {
   13.52      /* set l1e entries of P2M table to be read-only. */
   13.53 -    p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
   13.54 +    p2m_change_entry_type_global(p2m_get_hostp2m(d),
   13.55 +        p2m_ram_rw, p2m_ram_logdirty);
   13.56      flush_tlb_mask(&d->domain_dirty_cpumask);
   13.57  }
   13.58  
   13.59 @@ -273,8 +276,9 @@ static void hap_free(struct domain *d, m
   13.60      page_list_add_tail(pg, &d->arch.paging.hap.freelist);
   13.61  }
   13.62  
   13.63 -static struct page_info *hap_alloc_p2m_page(struct domain *d)
   13.64 +static struct page_info *hap_alloc_p2m_page(struct p2m_domain *p2m)
   13.65  {
   13.66 +    struct domain *d = p2m->domain;
   13.67      struct page_info *pg;
   13.68  
   13.69      hap_lock(d);
   13.70 @@ -312,8 +316,9 @@ static struct page_info *hap_alloc_p2m_p
   13.71      return pg;
   13.72  }
   13.73  
   13.74 -static void hap_free_p2m_page(struct domain *d, struct page_info *pg)
   13.75 +static void hap_free_p2m_page(struct p2m_domain *p2m, struct page_info *pg)
   13.76  {
   13.77 +    struct domain *d = p2m->domain;
   13.78      hap_lock(d);
   13.79      ASSERT(page_get_owner(pg) == d);
   13.80      /* Should have just the one ref we gave it in alloc_p2m_page() */
   13.81 @@ -594,7 +599,8 @@ int hap_enable(struct domain *d, u32 mod
   13.82      /* allocate P2m table */
   13.83      if ( mode & PG_translate )
   13.84      {
   13.85 -        rv = p2m_alloc_table(d, hap_alloc_p2m_page, hap_free_p2m_page);
   13.86 +        rv = p2m_alloc_table(p2m_get_hostp2m(d),
   13.87 +            hap_alloc_p2m_page, hap_free_p2m_page);
   13.88          if ( rv != 0 )
   13.89              goto out;
   13.90      }
   13.91 @@ -611,7 +617,7 @@ void hap_final_teardown(struct domain *d
   13.92      if ( d->arch.paging.hap.total_pages != 0 )
   13.93          hap_teardown(d);
   13.94  
   13.95 -    p2m_teardown(d);
   13.96 +    p2m_teardown(p2m_get_hostp2m(d));
   13.97      ASSERT(d->arch.paging.hap.p2m_pages == 0);
   13.98  }
   13.99  
  13.100 @@ -711,9 +717,11 @@ void hap_vcpu_init(struct vcpu *v)
  13.101  static int hap_page_fault(struct vcpu *v, unsigned long va,
  13.102                            struct cpu_user_regs *regs)
  13.103  {
  13.104 +    struct domain *d = v->domain;
  13.105 +
  13.106      HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
  13.107 -              v->domain->domain_id, v->vcpu_id);
  13.108 -    domain_crash(v->domain);
  13.109 +              d->domain_id, v->vcpu_id);
  13.110 +    domain_crash(d);
  13.111      return 0;
  13.112  }
  13.113  
  13.114 @@ -882,5 +890,3 @@ static const struct paging_mode hap_pagi
  13.115   * indent-tabs-mode: nil
  13.116   * End:
  13.117   */
  13.118 -
  13.119 -
    14.1 --- a/xen/arch/x86/mm/hap/p2m-ept.c	Mon Aug 09 16:40:18 2010 +0100
    14.2 +++ b/xen/arch/x86/mm/hap/p2m-ept.c	Mon Aug 09 16:46:42 2010 +0100
    14.3 @@ -36,23 +36,23 @@
    14.4  #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
    14.5  
    14.6  /* Non-ept "lock-and-check" wrapper */
    14.7 -static int ept_pod_check_and_populate(struct domain *d, unsigned long gfn,
    14.8 +static int ept_pod_check_and_populate(struct p2m_domain *p2m, unsigned long gfn,
    14.9                                        ept_entry_t *entry, int order,
   14.10                                        p2m_query_t q)
   14.11  {
   14.12      int r;
   14.13 -    p2m_lock(d->arch.p2m);
   14.14 +    p2m_lock(p2m);
   14.15  
   14.16      /* Check to make sure this is still PoD */
   14.17      if ( entry->avail1 != p2m_populate_on_demand )
   14.18      {
   14.19 -        p2m_unlock(d->arch.p2m);
   14.20 +        p2m_unlock(p2m);
   14.21          return 0;
   14.22      }
   14.23  
   14.24 -    r = p2m_pod_demand_populate(d, gfn, order, q);
   14.25 +    r = p2m_pod_demand_populate(p2m, gfn, order, q);
   14.26  
   14.27 -    p2m_unlock(d->arch.p2m);
   14.28 +    p2m_unlock(p2m);
   14.29  
   14.30      return r;
   14.31  }
   14.32 @@ -98,11 +98,11 @@ static void ept_p2m_type_to_flags(ept_en
   14.33  #define GUEST_TABLE_POD_PAGE    3
   14.34  
   14.35  /* Fill in middle levels of ept table */
   14.36 -static int ept_set_middle_entry(struct domain *d, ept_entry_t *ept_entry)
   14.37 +static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)
   14.38  {
   14.39      struct page_info *pg;
   14.40  
   14.41 -    pg = p2m_alloc_ptp(d, 0);
   14.42 +    pg = p2m_alloc_ptp(p2m, 0);
   14.43      if ( pg == NULL )
   14.44          return 0;
   14.45  
   14.46 @@ -119,7 +119,7 @@ static int ept_set_middle_entry(struct d
   14.47  }
   14.48  
   14.49  /* free ept sub tree behind an entry */
   14.50 -void ept_free_entry(struct domain *d, ept_entry_t *ept_entry, int level)
   14.51 +void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int level)
   14.52  {
   14.53      /* End if the entry is a leaf entry. */
   14.54      if ( level == 0 || !is_epte_present(ept_entry) ||
   14.55 @@ -130,14 +130,14 @@ void ept_free_entry(struct domain *d, ep
   14.56      {
   14.57          ept_entry_t *epte = map_domain_page(ept_entry->mfn);
   14.58          for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
   14.59 -            ept_free_entry(d, epte + i, level - 1);
   14.60 +            ept_free_entry(p2m, epte + i, level - 1);
   14.61          unmap_domain_page(epte);
   14.62      }
   14.63  
   14.64 -    d->arch.p2m->free_page(d, mfn_to_page(ept_entry->mfn));
   14.65 +    p2m->free_page(p2m, mfn_to_page(ept_entry->mfn));
   14.66  }
   14.67  
   14.68 -static int ept_split_super_page(struct domain *d, ept_entry_t *ept_entry,
   14.69 +static int ept_split_super_page(struct p2m_domain *p2m, ept_entry_t *ept_entry,
   14.70                                  int level, int target)
   14.71  {
   14.72      ept_entry_t new_ept, *table;
   14.73 @@ -150,7 +150,7 @@ static int ept_split_super_page(struct d
   14.74  
   14.75      ASSERT(is_epte_superpage(ept_entry));
   14.76  
   14.77 -    if ( !ept_set_middle_entry(d, &new_ept) )
   14.78 +    if ( !ept_set_middle_entry(p2m, &new_ept) )
   14.79          return 0;
   14.80  
   14.81      table = map_domain_page(new_ept.mfn);
   14.82 @@ -174,7 +174,7 @@ static int ept_split_super_page(struct d
   14.83  
   14.84          ASSERT(is_epte_superpage(epte));
   14.85  
   14.86 -        if ( !(rv = ept_split_super_page(d, epte, level - 1, target)) )
   14.87 +        if ( !(rv = ept_split_super_page(p2m, epte, level - 1, target)) )
   14.88              break;
   14.89      }
   14.90  
   14.91 @@ -200,7 +200,7 @@ static int ept_split_super_page(struct d
   14.92   *  GUEST_TABLE_POD:
   14.93   *   The next entry is marked populate-on-demand.
   14.94   */
   14.95 -static int ept_next_level(struct domain *d, bool_t read_only,
   14.96 +static int ept_next_level(struct p2m_domain *p2m, bool_t read_only,
   14.97                            ept_entry_t **table, unsigned long *gfn_remainder,
   14.98                            int next_level)
   14.99  {
  14.100 @@ -225,7 +225,7 @@ static int ept_next_level(struct domain 
  14.101          if ( read_only )
  14.102              return GUEST_TABLE_MAP_FAILED;
  14.103  
  14.104 -        if ( !ept_set_middle_entry(d, ept_entry) )
  14.105 +        if ( !ept_set_middle_entry(p2m, ept_entry) )
  14.106              return GUEST_TABLE_MAP_FAILED;
  14.107      }
  14.108  
  14.109 @@ -245,7 +245,7 @@ static int ept_next_level(struct domain 
  14.110   * by observing whether any gfn->mfn translations are modified.
  14.111   */
  14.112  static int
  14.113 -ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
  14.114 +ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
  14.115                unsigned int order, p2m_type_t p2mt)
  14.116  {
  14.117      ept_entry_t *table, *ept_entry;
  14.118 @@ -259,6 +259,7 @@ ept_set_entry(struct domain *d, unsigned
  14.119      uint8_t ipat = 0;
  14.120      int need_modify_vtd_table = 1;
  14.121      int needs_sync = 1;
  14.122 +    struct domain *d = p2m->domain;
  14.123  
  14.124      /*
  14.125       * the caller must make sure:
  14.126 @@ -281,7 +282,7 @@ ept_set_entry(struct domain *d, unsigned
  14.127  
  14.128      for ( i = ept_get_wl(d); i > target; i-- )
  14.129      {
  14.130 -        ret = ept_next_level(d, 0, &table, &gfn_remainder, i);
  14.131 +        ret = ept_next_level(p2m, 0, &table, &gfn_remainder, i);
  14.132          if ( !ret )
  14.133              goto out;
  14.134          else if ( ret != GUEST_TABLE_NORMAL_PAGE )
  14.135 @@ -311,7 +312,7 @@ ept_set_entry(struct domain *d, unsigned
  14.136          if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) ||
  14.137               (p2mt == p2m_ram_paging_in_start) )
  14.138          {
  14.139 -            ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, &ipat,
  14.140 +            ept_entry->emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
  14.141                                                  direct_mmio);
  14.142              ept_entry->ipat = ipat;
  14.143              ept_entry->sp = order ? 1 : 0;
  14.144 @@ -337,9 +338,9 @@ ept_set_entry(struct domain *d, unsigned
  14.145  
  14.146          split_ept_entry = *ept_entry;
  14.147  
  14.148 -        if ( !ept_split_super_page(d, &split_ept_entry, i, target) )
  14.149 +        if ( !ept_split_super_page(p2m, &split_ept_entry, i, target) )
  14.150          {
  14.151 -            ept_free_entry(d, &split_ept_entry, i);
  14.152 +            ept_free_entry(p2m, &split_ept_entry, i);
  14.153              goto out;
  14.154          }
  14.155  
  14.156 @@ -349,7 +350,7 @@ ept_set_entry(struct domain *d, unsigned
  14.157  
  14.158          /* then move to the level we want to make real changes */
  14.159          for ( ; i > target; i-- )
  14.160 -            ept_next_level(d, 0, &table, &gfn_remainder, i);
  14.161 +            ept_next_level(p2m, 0, &table, &gfn_remainder, i);
  14.162  
  14.163          ASSERT(i == target);
  14.164  
  14.165 @@ -374,8 +375,8 @@ ept_set_entry(struct domain *d, unsigned
  14.166  
  14.167      /* Track the highest gfn for which we have ever had a valid mapping */
  14.168      if ( mfn_valid(mfn_x(mfn)) &&
  14.169 -         (gfn + (1UL << order) - 1 > d->arch.p2m->max_mapped_pfn) )
  14.170 -        d->arch.p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
  14.171 +         (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
  14.172 +        p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
  14.173  
  14.174      /* Success */
  14.175      rv = 1;
  14.176 @@ -384,10 +385,10 @@ out:
  14.177      unmap_domain_page(table);
  14.178  
  14.179      if ( needs_sync )
  14.180 -        ept_sync_domain(d);
  14.181 +        ept_sync_domain(p2m->domain);
  14.182  
  14.183      /* Now the p2m table is not shared with vt-d page table */
  14.184 -    if ( rv && iommu_enabled && need_iommu(d) && need_modify_vtd_table )
  14.185 +    if ( rv && iommu_enabled && need_iommu(p2m->domain) && need_modify_vtd_table )
  14.186      {
  14.187          if ( p2mt == p2m_ram_rw )
  14.188          {
  14.189 @@ -395,22 +396,22 @@ out:
  14.190              {
  14.191                  for ( i = 0; i < (1 << order); i++ )
  14.192                      iommu_map_page(
  14.193 -                        d, gfn - offset + i, mfn_x(mfn) - offset + i,
  14.194 +                        p2m->domain, gfn - offset + i, mfn_x(mfn) - offset + i,
  14.195                          IOMMUF_readable | IOMMUF_writable);
  14.196              }
  14.197              else if ( !order )
  14.198                  iommu_map_page(
  14.199 -                    d, gfn, mfn_x(mfn), IOMMUF_readable | IOMMUF_writable);
  14.200 +                    p2m->domain, gfn, mfn_x(mfn), IOMMUF_readable | IOMMUF_writable);
  14.201          }
  14.202          else
  14.203          {
  14.204              if ( order == EPT_TABLE_ORDER )
  14.205              {
  14.206                  for ( i = 0; i < (1 << order); i++ )
  14.207 -                    iommu_unmap_page(d, gfn - offset + i);
  14.208 +                    iommu_unmap_page(p2m->domain, gfn - offset + i);
  14.209              }
  14.210              else if ( !order )
  14.211 -                iommu_unmap_page(d, gfn);
  14.212 +                iommu_unmap_page(p2m->domain, gfn);
  14.213          }
  14.214      }
  14.215  
  14.216 @@ -418,9 +419,11 @@ out:
  14.217  }
  14.218  
  14.219  /* Read ept p2m entries */
  14.220 -static mfn_t ept_get_entry(struct domain *d, unsigned long gfn, p2m_type_t *t,
  14.221 +static mfn_t ept_get_entry(struct p2m_domain *p2m,
  14.222 +                           unsigned long gfn, p2m_type_t *t,
  14.223                             p2m_query_t q)
  14.224  {
  14.225 +    struct domain *d = p2m->domain;
  14.226      ept_entry_t *table = map_domain_page(ept_get_asr(d));
  14.227      unsigned long gfn_remainder = gfn;
  14.228      ept_entry_t *ept_entry;
  14.229 @@ -432,7 +435,7 @@ static mfn_t ept_get_entry(struct domain
  14.230      *t = p2m_mmio_dm;
  14.231  
  14.232      /* This pfn is higher than the highest the p2m map currently holds */
  14.233 -    if ( gfn > d->arch.p2m->max_mapped_pfn )
  14.234 +    if ( gfn > p2m->max_mapped_pfn )
  14.235          goto out;
  14.236  
  14.237      /* Should check if gfn obeys GAW here. */
  14.238 @@ -440,7 +443,7 @@ static mfn_t ept_get_entry(struct domain
  14.239      for ( i = ept_get_wl(d); i > 0; i-- )
  14.240      {
  14.241      retry:
  14.242 -        ret = ept_next_level(d, 1, &table, &gfn_remainder, i);
  14.243 +        ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i);
  14.244          if ( !ret )
  14.245              goto out;
  14.246          else if ( ret == GUEST_TABLE_POD_PAGE )
  14.247 @@ -457,7 +460,7 @@ static mfn_t ept_get_entry(struct domain
  14.248              index = gfn_remainder >> ( i * EPT_TABLE_ORDER);
  14.249              ept_entry = table + index;
  14.250  
  14.251 -            if ( !ept_pod_check_and_populate(d, gfn,
  14.252 +            if ( !ept_pod_check_and_populate(p2m, gfn,
  14.253                                               ept_entry, 9, q) )
  14.254                  goto retry;
  14.255              else
  14.256 @@ -480,7 +483,7 @@ static mfn_t ept_get_entry(struct domain
  14.257  
  14.258          ASSERT(i == 0);
  14.259          
  14.260 -        if ( ept_pod_check_and_populate(d, gfn,
  14.261 +        if ( ept_pod_check_and_populate(p2m, gfn,
  14.262                                          ept_entry, 0, q) )
  14.263              goto out;
  14.264      }
  14.265 @@ -511,9 +514,10 @@ out:
  14.266  /* WARNING: Only caller doesn't care about PoD pages.  So this function will
  14.267   * always return 0 for PoD pages, not populate them.  If that becomes necessary,
  14.268   * pass a p2m_query_t type along to distinguish. */
  14.269 -static ept_entry_t ept_get_entry_content(struct domain *d, unsigned long gfn, int *level)
  14.270 +static ept_entry_t ept_get_entry_content(struct p2m_domain *p2m,
  14.271 +    unsigned long gfn, int *level)
  14.272  {
  14.273 -    ept_entry_t *table = map_domain_page(ept_get_asr(d));
  14.274 +    ept_entry_t *table = map_domain_page(ept_get_asr(p2m->domain));
  14.275      unsigned long gfn_remainder = gfn;
  14.276      ept_entry_t *ept_entry;
  14.277      ept_entry_t content = { .epte = 0 };
  14.278 @@ -522,12 +526,12 @@ static ept_entry_t ept_get_entry_content
  14.279      int ret=0;
  14.280  
  14.281      /* This pfn is higher than the highest the p2m map currently holds */
  14.282 -    if ( gfn > d->arch.p2m->max_mapped_pfn )
  14.283 +    if ( gfn > p2m->max_mapped_pfn )
  14.284          goto out;
  14.285  
  14.286 -    for ( i = ept_get_wl(d); i > 0; i-- )
  14.287 +    for ( i = ept_get_wl(p2m->domain); i > 0; i-- )
  14.288      {
  14.289 -        ret = ept_next_level(d, 1, &table, &gfn_remainder, i);
  14.290 +        ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i);
  14.291          if ( !ret || ret == GUEST_TABLE_POD_PAGE )
  14.292              goto out;
  14.293          else if ( ret == GUEST_TABLE_SUPER_PAGE )
  14.294 @@ -546,6 +550,7 @@ static ept_entry_t ept_get_entry_content
  14.295  
  14.296  void ept_walk_table(struct domain *d, unsigned long gfn)
  14.297  {
  14.298 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
  14.299      ept_entry_t *table = map_domain_page(ept_get_asr(d));
  14.300      unsigned long gfn_remainder = gfn;
  14.301  
  14.302 @@ -555,10 +560,10 @@ void ept_walk_table(struct domain *d, un
  14.303             d->domain_id, gfn);
  14.304  
  14.305      /* This pfn is higher than the highest the p2m map currently holds */
  14.306 -    if ( gfn > d->arch.p2m->max_mapped_pfn )
  14.307 +    if ( gfn > p2m->max_mapped_pfn )
  14.308      {
  14.309          gdprintk(XENLOG_ERR, " gfn exceeds max_mapped_pfn %lx\n",
  14.310 -               d->arch.p2m->max_mapped_pfn);
  14.311 +               p2m->max_mapped_pfn);
  14.312          goto out;
  14.313      }
  14.314  
  14.315 @@ -593,17 +598,18 @@ out:
  14.316      return;
  14.317  }
  14.318  
  14.319 -static mfn_t ept_get_entry_current(unsigned long gfn, p2m_type_t *t,
  14.320 +static mfn_t ept_get_entry_current(struct p2m_domain *p2m,
  14.321 +                                   unsigned long gfn, p2m_type_t *t,
  14.322                                     p2m_query_t q)
  14.323  {
  14.324 -    return ept_get_entry(current->domain, gfn, t, q);
  14.325 +    return ept_get_entry(p2m, gfn, t, q);
  14.326  }
  14.327  
  14.328  /*
  14.329   * To test if the new emt type is the same with old,
  14.330   * return 1 to not to reset ept entry.
  14.331   */
  14.332 -static int need_modify_ept_entry(struct domain *d, unsigned long gfn,
  14.333 +static int need_modify_ept_entry(struct p2m_domain *p2m, unsigned long gfn,
  14.334                                   mfn_t mfn, uint8_t o_ipat, uint8_t o_emt,
  14.335                                   p2m_type_t p2mt)
  14.336  {
  14.337 @@ -611,7 +617,7 @@ static int need_modify_ept_entry(struct 
  14.338      uint8_t emt;
  14.339      bool_t direct_mmio = (p2mt == p2m_mmio_direct);
  14.340  
  14.341 -    emt = epte_get_entry_emt(d, gfn, mfn, &ipat, direct_mmio);
  14.342 +    emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat, direct_mmio);
  14.343  
  14.344      if ( (emt == o_emt) && (ipat == o_ipat) )
  14.345          return 0;
  14.346 @@ -619,21 +625,23 @@ static int need_modify_ept_entry(struct 
  14.347      return 1;
  14.348  }
  14.349  
  14.350 -void ept_change_entry_emt_with_range(struct domain *d, unsigned long start_gfn,
  14.351 +void ept_change_entry_emt_with_range(struct domain *d,
  14.352 +                                     unsigned long start_gfn,
  14.353                                       unsigned long end_gfn)
  14.354  {
  14.355      unsigned long gfn;
  14.356      ept_entry_t e;
  14.357      mfn_t mfn;
  14.358      int order = 0;
  14.359 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
  14.360  
  14.361 -    p2m_lock(d->arch.p2m);
  14.362 +    p2m_lock(p2m);
  14.363      for ( gfn = start_gfn; gfn <= end_gfn; gfn++ )
  14.364      {
  14.365          int level = 0;
  14.366          uint64_t trunk = 0;
  14.367  
  14.368 -        e = ept_get_entry_content(d, gfn, &level);
  14.369 +        e = ept_get_entry_content(p2m, gfn, &level);
  14.370          if ( !p2m_has_emt(e.avail1) )
  14.371              continue;
  14.372  
  14.373 @@ -652,9 +660,9 @@ void ept_change_entry_emt_with_range(str
  14.374                       * Set emt for super page.
  14.375                       */
  14.376                      order = level * EPT_TABLE_ORDER;
  14.377 -                    if ( need_modify_ept_entry(d, gfn, mfn, 
  14.378 +                    if ( need_modify_ept_entry(p2m, gfn, mfn, 
  14.379                            e.ipat, e.emt, e.avail1) )
  14.380 -                        ept_set_entry(d, gfn, mfn, order, e.avail1);
  14.381 +                        ept_set_entry(p2m, gfn, mfn, order, e.avail1);
  14.382                      gfn += trunk;
  14.383                      break;
  14.384                  }
  14.385 @@ -663,11 +671,11 @@ void ept_change_entry_emt_with_range(str
  14.386          }
  14.387          else /* gfn assigned with 4k */
  14.388          {
  14.389 -            if ( need_modify_ept_entry(d, gfn, mfn, e.ipat, e.emt, e.avail1) )
  14.390 -                ept_set_entry(d, gfn, mfn, order, e.avail1);
  14.391 +            if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.avail1) )
  14.392 +                ept_set_entry(p2m, gfn, mfn, order, e.avail1);
  14.393          }
  14.394      }
  14.395 -    p2m_unlock(d->arch.p2m);
  14.396 +    p2m_unlock(p2m);
  14.397  }
  14.398  
  14.399  /*
  14.400 @@ -701,9 +709,10 @@ static void ept_change_entry_type_page(m
  14.401      unmap_domain_page(epte);
  14.402  }
  14.403  
  14.404 -static void ept_change_entry_type_global(struct domain *d,
  14.405 +static void ept_change_entry_type_global(struct p2m_domain *p2m,
  14.406                                           p2m_type_t ot, p2m_type_t nt)
  14.407  {
  14.408 +    struct domain *d = p2m->domain;
  14.409      if ( ept_get_asr(d) == 0 )
  14.410          return;
  14.411  
  14.412 @@ -714,10 +723,11 @@ static void ept_change_entry_type_global
  14.413  
  14.414  void ept_p2m_init(struct domain *d)
  14.415  {
  14.416 -    d->arch.p2m->set_entry = ept_set_entry;
  14.417 -    d->arch.p2m->get_entry = ept_get_entry;
  14.418 -    d->arch.p2m->get_entry_current = ept_get_entry_current;
  14.419 -    d->arch.p2m->change_entry_type_global = ept_change_entry_type_global;
  14.420 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
  14.421 +    p2m->set_entry = ept_set_entry;
  14.422 +    p2m->get_entry = ept_get_entry;
  14.423 +    p2m->get_entry_current = ept_get_entry_current;
  14.424 +    p2m->change_entry_type_global = ept_change_entry_type_global;
  14.425  }
  14.426  
  14.427  static void ept_dump_p2m_table(unsigned char key)
  14.428 @@ -742,7 +752,7 @@ static void ept_dump_p2m_table(unsigned 
  14.429          p2m = p2m_get_hostp2m(d);
  14.430          printk("\ndomain%d EPT p2m table: \n", d->domain_id);
  14.431  
  14.432 -        for ( gfn = 0; gfn <= d->arch.p2m->max_mapped_pfn; gfn += (1 << order) )
  14.433 +        for ( gfn = 0; gfn <= p2m->max_mapped_pfn; gfn += (1 << order) )
  14.434          {
  14.435              gfn_remainder = gfn;
  14.436              mfn = _mfn(INVALID_MFN);
  14.437 @@ -750,7 +760,7 @@ static void ept_dump_p2m_table(unsigned 
  14.438  
  14.439              for ( i = ept_get_wl(d); i > 0; i-- )
  14.440              {
  14.441 -                ret = ept_next_level(d, 1, &table, &gfn_remainder, i);
  14.442 +                ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i);
  14.443                  if ( ret != GUEST_TABLE_NORMAL_PAGE )
  14.444                      break;
  14.445              }
    15.1 --- a/xen/arch/x86/mm/mem_event.c	Mon Aug 09 16:40:18 2010 +0100
    15.2 +++ b/xen/arch/x86/mm/mem_event.c	Mon Aug 09 16:46:42 2010 +0100
    15.3 @@ -235,7 +235,7 @@ int mem_event_domctl(struct domain *d, x
    15.4              /* Get MFN of ring page */
    15.5              guest_get_eff_l1e(v, ring_addr, &l1e);
    15.6              gfn = l1e_get_pfn(l1e);
    15.7 -            ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
    15.8 +            ring_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt);
    15.9  
   15.10              rc = -EINVAL;
   15.11              if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
   15.12 @@ -244,7 +244,7 @@ int mem_event_domctl(struct domain *d, x
   15.13              /* Get MFN of shared page */
   15.14              guest_get_eff_l1e(v, shared_addr, &l1e);
   15.15              gfn = l1e_get_pfn(l1e);
   15.16 -            shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
   15.17 +            shared_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt);
   15.18  
   15.19              rc = -EINVAL;
   15.20              if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
    16.1 --- a/xen/arch/x86/mm/mem_paging.c	Mon Aug 09 16:40:18 2010 +0100
    16.2 +++ b/xen/arch/x86/mm/mem_paging.c	Mon Aug 09 16:46:42 2010 +0100
    16.3 @@ -29,33 +29,34 @@ int mem_paging_domctl(struct domain *d, 
    16.4                        XEN_GUEST_HANDLE(void) u_domctl)
    16.5  {
    16.6      int rc;
    16.7 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
    16.8  
    16.9      switch( mec->op )
   16.10      {
   16.11      case XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE:
   16.12      {
   16.13          unsigned long gfn = mec->gfn;
   16.14 -        rc = p2m_mem_paging_nominate(d, gfn);
   16.15 +        rc = p2m_mem_paging_nominate(p2m, gfn);
   16.16      }
   16.17      break;
   16.18  
   16.19      case XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT:
   16.20      {
   16.21          unsigned long gfn = mec->gfn;
   16.22 -        rc = p2m_mem_paging_evict(d, gfn);
   16.23 +        rc = p2m_mem_paging_evict(p2m, gfn);
   16.24      }
   16.25      break;
   16.26  
   16.27      case XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP:
   16.28      {
   16.29          unsigned long gfn = mec->gfn;
   16.30 -        rc = p2m_mem_paging_prep(d, gfn);
   16.31 +        rc = p2m_mem_paging_prep(p2m, gfn);
   16.32      }
   16.33      break;
   16.34  
   16.35      case XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME:
   16.36      {
   16.37 -        p2m_mem_paging_resume(d);
   16.38 +        p2m_mem_paging_resume(p2m);
   16.39          rc = 0;
   16.40      }
   16.41      break;
    17.1 --- a/xen/arch/x86/mm/mem_sharing.c	Mon Aug 09 16:40:18 2010 +0100
    17.2 +++ b/xen/arch/x86/mm/mem_sharing.c	Mon Aug 09 16:46:42 2010 +0100
    17.3 @@ -251,6 +251,7 @@ static void mem_sharing_audit(void)
    17.4              list_for_each(le, &e->gfns)
    17.5              {
    17.6                  struct domain *d;
    17.7 +                struct p2m_domain *p2m;
    17.8                  p2m_type_t t;
    17.9                  mfn_t mfn;
   17.10  
   17.11 @@ -262,7 +263,8 @@ static void mem_sharing_audit(void)
   17.12                              g->domain, g->gfn, mfn_x(e->mfn));
   17.13                      continue;
   17.14                  }
   17.15 -                mfn = gfn_to_mfn(d, g->gfn, &t); 
   17.16 +                p2m = p2m_get_hostp2m(d);
   17.17 +                mfn = gfn_to_mfn(p2m, g->gfn, &t); 
   17.18                  if(mfn_x(mfn) != mfn_x(e->mfn))
   17.19                      MEM_SHARING_DEBUG("Incorrect P2M for d=%d, PFN=%lx."
   17.20                                        "Expecting MFN=%ld, got %ld\n",
   17.21 @@ -377,7 +379,7 @@ int mem_sharing_debug_gfn(struct domain 
   17.22      mfn_t mfn;
   17.23      struct page_info *page;
   17.24  
   17.25 -    mfn = gfn_to_mfn(d, gfn, &p2mt);
   17.26 +    mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &p2mt);
   17.27      page = mfn_to_page(mfn);
   17.28  
   17.29      printk("Debug for domain=%d, gfn=%lx, ", 
   17.30 @@ -487,7 +489,7 @@ int mem_sharing_debug_gref(struct domain
   17.31      return mem_sharing_debug_gfn(d, gfn); 
   17.32  }
   17.33  
   17.34 -int mem_sharing_nominate_page(struct domain *d, 
   17.35 +int mem_sharing_nominate_page(struct p2m_domain *p2m, 
   17.36                                unsigned long gfn,
   17.37                                int expected_refcnt,
   17.38                                shr_handle_t *phandle)
   17.39 @@ -499,10 +501,11 @@ int mem_sharing_nominate_page(struct dom
   17.40      shr_handle_t handle;
   17.41      shr_hash_entry_t *hash_entry;
   17.42      struct gfn_info *gfn_info;
   17.43 +    struct domain *d = p2m->domain;
   17.44  
   17.45      *phandle = 0UL;
   17.46  
   17.47 -    mfn = gfn_to_mfn(d, gfn, &p2mt);
   17.48 +    mfn = gfn_to_mfn(p2m, gfn, &p2mt);
   17.49  
   17.50      /* Check if mfn is valid */
   17.51      ret = -EINVAL;
   17.52 @@ -536,7 +539,7 @@ int mem_sharing_nominate_page(struct dom
   17.53      }
   17.54  
   17.55      /* Change the p2m type */
   17.56 -    if(p2m_change_type(d, gfn, p2mt, p2m_ram_shared) != p2mt) 
   17.57 +    if(p2m_change_type(p2m, gfn, p2mt, p2m_ram_shared) != p2mt) 
   17.58      {
   17.59          /* This is unlikely, as the type must have changed since we've checked
   17.60           * it a few lines above.
   17.61 @@ -599,7 +602,7 @@ int mem_sharing_share_pages(shr_handle_t
   17.62          list_del(&gfn->list);
   17.63          d = get_domain_by_id(gfn->domain);
   17.64          BUG_ON(!d);
   17.65 -        BUG_ON(set_shared_p2m_entry(d, gfn->gfn, se->mfn) == 0);
   17.66 +        BUG_ON(set_shared_p2m_entry(p2m_get_hostp2m(d), gfn->gfn, se->mfn) == 0);
   17.67          put_domain(d);
   17.68          list_add(&gfn->list, &se->gfns);
   17.69          put_page_and_type(cpage);
   17.70 @@ -618,7 +621,7 @@ err_out:
   17.71      return ret;
   17.72  }
   17.73  
   17.74 -int mem_sharing_unshare_page(struct domain *d, 
   17.75 +int mem_sharing_unshare_page(struct p2m_domain *p2m,
   17.76                               unsigned long gfn, 
   17.77                               uint16_t flags)
   17.78  {
   17.79 @@ -631,8 +634,9 @@ int mem_sharing_unshare_page(struct doma
   17.80      struct gfn_info *gfn_info = NULL;
   17.81      shr_handle_t handle;
   17.82      struct list_head *le;
   17.83 +    struct domain *d = p2m->domain;
   17.84  
   17.85 -    mfn = gfn_to_mfn(d, gfn, &p2mt);
   17.86 +    mfn = gfn_to_mfn(p2m, gfn, &p2mt);
   17.87  
   17.88      page = mfn_to_page(mfn);
   17.89      handle = page->shr_handle;
   17.90 @@ -696,7 +700,7 @@ gfn_found:
   17.91      unmap_domain_page(s);
   17.92      unmap_domain_page(t);
   17.93  
   17.94 -    ASSERT(set_shared_p2m_entry(d, gfn, page_to_mfn(page)) != 0);
   17.95 +    ASSERT(set_shared_p2m_entry(p2m, gfn, page_to_mfn(page)) != 0);
   17.96      put_page_and_type(old_page);
   17.97  
   17.98  private_page_found:    
   17.99 @@ -708,7 +712,7 @@ private_page_found:
  17.100          atomic_dec(&nr_saved_mfns);
  17.101      shr_unlock();
  17.102  
  17.103 -    if(p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) != 
  17.104 +    if(p2m_change_type(p2m, gfn, p2m_ram_shared, p2m_ram_rw) != 
  17.105                                                  p2m_ram_shared) 
  17.106      {
  17.107          printk("Could not change p2m type.\n");
  17.108 @@ -740,7 +744,7 @@ int mem_sharing_domctl(struct domain *d,
  17.109              shr_handle_t handle;
  17.110              if(!mem_sharing_enabled(d))
  17.111                  return -EINVAL;
  17.112 -            rc = mem_sharing_nominate_page(d, gfn, 0, &handle);
  17.113 +            rc = mem_sharing_nominate_page(p2m_get_hostp2m(d), gfn, 0, &handle);
  17.114              mec->u.nominate.handle = handle;
  17.115              mem_sharing_audit();
  17.116          }
  17.117 @@ -756,7 +760,8 @@ int mem_sharing_domctl(struct domain *d,
  17.118                  return -EINVAL;
  17.119              if(mem_sharing_gref_to_gfn(d, gref, &gfn) < 0)
  17.120                  return -EINVAL;
  17.121 -            rc = mem_sharing_nominate_page(d, gfn, 3, &handle);
  17.122 +            rc = mem_sharing_nominate_page(p2m_get_hostp2m(d),
  17.123 +                gfn, 3, &handle);
  17.124              mec->u.nominate.handle = handle;
  17.125              mem_sharing_audit();
  17.126          }
    18.1 --- a/xen/arch/x86/mm/p2m.c	Mon Aug 09 16:40:18 2010 +0100
    18.2 +++ b/xen/arch/x86/mm/p2m.c	Mon Aug 09 16:46:42 2010 +0100
    18.3 @@ -108,9 +108,9 @@ static unsigned long p2m_type_to_flags(p
    18.4  }
    18.5  
    18.6  #if P2M_AUDIT
    18.7 -static void audit_p2m(struct domain *d);
    18.8 +static void audit_p2m(struct p2m_domain *p2m);
    18.9  #else
   18.10 -# define audit_p2m(_d) do { (void)(_d); } while(0)
   18.11 +# define audit_p2m(_p2m) do { (void)(_p2m); } while(0)
   18.12  #endif /* P2M_AUDIT */
   18.13  
   18.14  // Find the next level's P2M entry, checking for out-of-range gfn's...
   18.15 @@ -135,15 +135,17 @@ p2m_find_entry(void *table, unsigned lon
   18.16  }
   18.17  
   18.18  struct page_info *
   18.19 -p2m_alloc_ptp(struct domain *d, unsigned long type)
   18.20 +p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
   18.21  {
   18.22      struct page_info *pg;
   18.23  
   18.24 -    pg = d->arch.p2m->alloc_page(d);
   18.25 +    ASSERT(p2m);
   18.26 +    ASSERT(p2m->alloc_page);
   18.27 +    pg = p2m->alloc_page(p2m);
   18.28      if (pg == NULL)
   18.29          return NULL;
   18.30  
   18.31 -    page_list_add_tail(pg, &d->arch.p2m->pages);
   18.32 +    page_list_add_tail(pg, &p2m->pages);
   18.33      pg->u.inuse.type_info = type | 1 | PGT_validated;
   18.34      pg->count_info |= 1;
   18.35  
   18.36 @@ -154,7 +156,7 @@ p2m_alloc_ptp(struct domain *d, unsigned
   18.37  // Returns 0 on error.
   18.38  //
   18.39  static int
   18.40 -p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
   18.41 +p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table,
   18.42                 unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
   18.43                 u32 max, unsigned long type)
   18.44  {
   18.45 @@ -163,7 +165,7 @@ p2m_next_level(struct domain *d, mfn_t *
   18.46      l1_pgentry_t new_entry;
   18.47      void *next;
   18.48      int i;
   18.49 -    ASSERT(d->arch.p2m->alloc_page);
   18.50 +    ASSERT(p2m->alloc_page);
   18.51  
   18.52      if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
   18.53                                        shift, max)) )
   18.54 @@ -174,7 +176,7 @@ p2m_next_level(struct domain *d, mfn_t *
   18.55      {
   18.56          struct page_info *pg;
   18.57  
   18.58 -        pg = p2m_alloc_ptp(d, type);
   18.59 +        pg = p2m_alloc_ptp(p2m, type);
   18.60          if ( pg == NULL )
   18.61              return 0;
   18.62  
   18.63 @@ -183,7 +185,7 @@ p2m_next_level(struct domain *d, mfn_t *
   18.64  
   18.65          switch ( type ) {
   18.66          case PGT_l3_page_table:
   18.67 -            paging_write_p2m_entry(d, gfn,
   18.68 +            paging_write_p2m_entry(p2m->domain, gfn,
   18.69                                     p2m_entry, *table_mfn, new_entry, 4);
   18.70              break;
   18.71          case PGT_l2_page_table:
   18.72 @@ -191,11 +193,11 @@ p2m_next_level(struct domain *d, mfn_t *
   18.73              /* for PAE mode, PDPE only has PCD/PWT/P bits available */
   18.74              new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
   18.75  #endif
   18.76 -            paging_write_p2m_entry(d, gfn,
   18.77 +            paging_write_p2m_entry(p2m->domain, gfn,
   18.78                                     p2m_entry, *table_mfn, new_entry, 3);
   18.79              break;
   18.80          case PGT_l1_page_table:
   18.81 -            paging_write_p2m_entry(d, gfn,
   18.82 +            paging_write_p2m_entry(p2m->domain, gfn,
   18.83                                     p2m_entry, *table_mfn, new_entry, 2);
   18.84              break;
   18.85          default:
   18.86 @@ -212,7 +214,7 @@ p2m_next_level(struct domain *d, mfn_t *
   18.87          unsigned long flags, pfn;
   18.88          struct page_info *pg;
   18.89  
   18.90 -        pg = p2m_alloc_ptp(d, PGT_l2_page_table);
   18.91 +        pg = p2m_alloc_ptp(p2m, PGT_l2_page_table);
   18.92          if ( pg == NULL )
   18.93              return 0;
   18.94  
   18.95 @@ -223,13 +225,13 @@ p2m_next_level(struct domain *d, mfn_t *
   18.96          for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
   18.97          {
   18.98              new_entry = l1e_from_pfn(pfn + (i * L1_PAGETABLE_ENTRIES), flags);
   18.99 -            paging_write_p2m_entry(d, gfn, l1_entry+i, *table_mfn, new_entry,
  18.100 -                                   2);
  18.101 +            paging_write_p2m_entry(p2m->domain, gfn,
  18.102 +                                   l1_entry+i, *table_mfn, new_entry, 2);
  18.103          }
  18.104          unmap_domain_page(l1_entry);
  18.105          new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
  18.106                                   __PAGE_HYPERVISOR|_PAGE_USER); //disable PSE
  18.107 -        paging_write_p2m_entry(d, gfn,
  18.108 +        paging_write_p2m_entry(p2m->domain, gfn,
  18.109                                 p2m_entry, *table_mfn, new_entry, 3);
  18.110      }
  18.111  
  18.112 @@ -240,7 +242,7 @@ p2m_next_level(struct domain *d, mfn_t *
  18.113          unsigned long flags, pfn;
  18.114          struct page_info *pg;
  18.115  
  18.116 -        pg = p2m_alloc_ptp(d, PGT_l1_page_table);
  18.117 +        pg = p2m_alloc_ptp(p2m, PGT_l1_page_table);
  18.118          if ( pg == NULL )
  18.119              return 0;
  18.120  
  18.121 @@ -257,14 +259,14 @@ p2m_next_level(struct domain *d, mfn_t *
  18.122          for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
  18.123          {
  18.124              new_entry = l1e_from_pfn(pfn + i, flags);
  18.125 -            paging_write_p2m_entry(d, gfn,
  18.126 +            paging_write_p2m_entry(p2m->domain, gfn,
  18.127                                     l1_entry+i, *table_mfn, new_entry, 1);
  18.128          }
  18.129          unmap_domain_page(l1_entry);
  18.130          
  18.131          new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
  18.132                                   __PAGE_HYPERVISOR|_PAGE_USER);
  18.133 -        paging_write_p2m_entry(d, gfn,
  18.134 +        paging_write_p2m_entry(p2m->domain, gfn,
  18.135                                 p2m_entry, *table_mfn, new_entry, 2);
  18.136      }
  18.137  
  18.138 @@ -280,17 +282,17 @@ p2m_next_level(struct domain *d, mfn_t *
  18.139   * Populate-on-demand functionality
  18.140   */
  18.141  static
  18.142 -int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, 
  18.143 +int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
  18.144                    unsigned int page_order, p2m_type_t p2mt);
  18.145  
  18.146  static int
  18.147 -p2m_pod_cache_add(struct domain *d,
  18.148 +p2m_pod_cache_add(struct p2m_domain *p2m,
  18.149                    struct page_info *page,
  18.150                    unsigned long order)
  18.151  {
  18.152      int i;
  18.153      struct page_info *p;
  18.154 -    struct p2m_domain *p2md = d->arch.p2m;
  18.155 +    struct domain *d = p2m->domain;
  18.156  
  18.157  #ifndef NDEBUG
  18.158      mfn_t mfn;
  18.159 @@ -320,7 +322,7 @@ p2m_pod_cache_add(struct domain *d,
  18.160      }
  18.161  #endif
  18.162  
  18.163 -    ASSERT(p2m_locked_by_me(p2md));
  18.164 +    ASSERT(p2m_locked_by_me(p2m));
  18.165  
  18.166      /*
  18.167       * Pages from domain_alloc and returned by the balloon driver aren't
  18.168 @@ -347,12 +349,12 @@ p2m_pod_cache_add(struct domain *d,
  18.169      switch(order)
  18.170      {
  18.171      case 9:
  18.172 -        page_list_add_tail(page, &p2md->pod.super); /* lock: page_alloc */
  18.173 -        p2md->pod.count += 1 << order;
  18.174 +        page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
  18.175 +        p2m->pod.count += 1 << order;
  18.176          break;
  18.177      case 0:
  18.178 -        page_list_add_tail(page, &p2md->pod.single); /* lock: page_alloc */
  18.179 -        p2md->pod.count += 1 ;
  18.180 +        page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
  18.181 +        p2m->pod.count += 1;
  18.182          break;
  18.183      default:
  18.184          BUG();
  18.185 @@ -371,57 +373,56 @@ p2m_pod_cache_add(struct domain *d,
  18.186   * down 2-meg pages into singleton pages automatically.  Returns null if
  18.187   * a superpage is requested and no superpages are available.  Must be called
  18.188   * with the d->page_lock held. */
  18.189 -static struct page_info * p2m_pod_cache_get(struct domain *d,
  18.190 +static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m,
  18.191                                              unsigned long order)
  18.192  {
  18.193 -    struct p2m_domain *p2md = d->arch.p2m;
  18.194      struct page_info *p = NULL;
  18.195      int i;
  18.196  
  18.197 -    if ( order == 9 && page_list_empty(&p2md->pod.super) )
  18.198 +    if ( order == 9 && page_list_empty(&p2m->pod.super) )
  18.199      {
  18.200          return NULL;
  18.201      }
  18.202 -    else if ( order == 0 && page_list_empty(&p2md->pod.single) )
  18.203 +    else if ( order == 0 && page_list_empty(&p2m->pod.single) )
  18.204      {
  18.205          unsigned long mfn;
  18.206          struct page_info *q;
  18.207  
  18.208 -        BUG_ON( page_list_empty(&p2md->pod.super) );
  18.209 +        BUG_ON( page_list_empty(&p2m->pod.super) );
  18.210  
  18.211          /* Break up a superpage to make single pages. NB count doesn't
  18.212           * need to be adjusted. */
  18.213 -        p = page_list_remove_head(&p2md->pod.super);
  18.214 +        p = page_list_remove_head(&p2m->pod.super);
  18.215          mfn = mfn_x(page_to_mfn(p));
  18.216  
  18.217          for ( i=0; i<SUPERPAGE_PAGES; i++ )
  18.218          {
  18.219              q = mfn_to_page(_mfn(mfn+i));
  18.220 -            page_list_add_tail(q, &p2md->pod.single);
  18.221 +            page_list_add_tail(q, &p2m->pod.single);
  18.222          }
  18.223      }
  18.224  
  18.225      switch ( order )
  18.226      {
  18.227      case 9:
  18.228 -        BUG_ON( page_list_empty(&p2md->pod.super) );
  18.229 -        p = page_list_remove_head(&p2md->pod.super);
  18.230 -        p2md->pod.count -= 1 << order; /* Lock: page_alloc */
  18.231 +        BUG_ON( page_list_empty(&p2m->pod.super) );
  18.232 +        p = page_list_remove_head(&p2m->pod.super);
  18.233 +        p2m->pod.count -= 1 << order; /* Lock: page_alloc */
  18.234          break;
  18.235      case 0:
  18.236 -        BUG_ON( page_list_empty(&p2md->pod.single) );
  18.237 -        p = page_list_remove_head(&p2md->pod.single);
  18.238 -        p2md->pod.count -= 1;
  18.239 +        BUG_ON( page_list_empty(&p2m->pod.single) );
  18.240 +        p = page_list_remove_head(&p2m->pod.single);
  18.241 +        p2m->pod.count -= 1;
  18.242          break;
  18.243      default:
  18.244          BUG();
  18.245      }
  18.246  
  18.247      /* Put the pages back on the domain page_list */
  18.248 -    for ( i = 0 ; i < (1 << order) ; i++ )
  18.249 +    for ( i = 0 ; i < (1 << order); i++ )
  18.250      {
  18.251 -        BUG_ON(page_get_owner(p + i) != d);
  18.252 -        page_list_add_tail(p + i, &d->page_list);
  18.253 +        BUG_ON(page_get_owner(p + i) != p2m->domain);
  18.254 +        page_list_add_tail(p + i, &p2m->domain->page_list);
  18.255      }
  18.256  
  18.257      return p;
  18.258 @@ -429,18 +430,18 @@ static struct page_info * p2m_pod_cache_
  18.259  
  18.260  /* Set the size of the cache, allocating or freeing as necessary. */
  18.261  static int
  18.262 -p2m_pod_set_cache_target(struct domain *d, unsigned long pod_target)
  18.263 +p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target)
  18.264  {
  18.265 -    struct p2m_domain *p2md = d->arch.p2m;
  18.266 +    struct domain *d = p2m->domain;
  18.267      int ret = 0;
  18.268  
  18.269      /* Increasing the target */
  18.270 -    while ( pod_target > p2md->pod.count )
  18.271 +    while ( pod_target > p2m->pod.count )
  18.272      {
  18.273          struct page_info * page;
  18.274          int order;
  18.275  
  18.276 -        if ( (pod_target - p2md->pod.count) >= SUPERPAGE_PAGES )
  18.277 +        if ( (pod_target - p2m->pod.count) >= SUPERPAGE_PAGES )
  18.278              order = 9;
  18.279          else
  18.280              order = 0;
  18.281 @@ -456,18 +457,18 @@ p2m_pod_set_cache_target(struct domain *
  18.282              }   
  18.283              
  18.284              printk("%s: Unable to allocate domheap page for pod cache.  target %lu cachesize %d\n",
  18.285 -                   __func__, pod_target, p2md->pod.count);
  18.286 +                   __func__, pod_target, p2m->pod.count);
  18.287              ret = -ENOMEM;
  18.288              goto out;
  18.289          }
  18.290  
  18.291 -        p2m_pod_cache_add(d, page, order);
  18.292 +        p2m_pod_cache_add(p2m, page, order);
  18.293      }
  18.294  
  18.295      /* Decreasing the target */
  18.296      /* We hold the p2m lock here, so we don't need to worry about
  18.297       * cache disappearing under our feet. */
  18.298 -    while ( pod_target < p2md->pod.count )
  18.299 +    while ( pod_target < p2m->pod.count )
  18.300      {
  18.301          struct page_info * page;
  18.302          int order, i;
  18.303 @@ -476,13 +477,13 @@ p2m_pod_set_cache_target(struct domain *
  18.304           * entries may disappear before we grab the lock. */
  18.305          spin_lock(&d->page_alloc_lock);
  18.306  
  18.307 -        if ( (p2md->pod.count - pod_target) > SUPERPAGE_PAGES
  18.308 -             && !page_list_empty(&p2md->pod.super) )
  18.309 +        if ( (p2m->pod.count - pod_target) > SUPERPAGE_PAGES
  18.310 +             && !page_list_empty(&p2m->pod.super) )
  18.311              order = 9;
  18.312          else
  18.313              order = 0;
  18.314  
  18.315 -        page = p2m_pod_cache_get(d, order);
  18.316 +        page = p2m_pod_cache_get(p2m, order);
  18.317  
  18.318          ASSERT(page != NULL);
  18.319  
  18.320 @@ -553,14 +554,14 @@ int
  18.321  p2m_pod_set_mem_target(struct domain *d, unsigned long target)
  18.322  {
  18.323      unsigned pod_target;
  18.324 -    struct p2m_domain *p2md = d->arch.p2m;
  18.325 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
  18.326      int ret = 0;
  18.327      unsigned long populated;
  18.328  
  18.329 -    p2m_lock(p2md);
  18.330 +    p2m_lock(p2m);
  18.331  
  18.332      /* P == B: Nothing to do. */
  18.333 -    if ( p2md->pod.entry_count == 0 )
  18.334 +    if ( p2m->pod.entry_count == 0 )
  18.335          goto out;
  18.336  
  18.337      /* Don't do anything if the domain is being torn down */
  18.338 @@ -572,21 +573,21 @@ p2m_pod_set_mem_target(struct domain *d,
  18.339      if ( target < d->tot_pages )
  18.340          goto out;
  18.341  
  18.342 -    populated  = d->tot_pages - p2md->pod.count;
  18.343 +    populated  = d->tot_pages - p2m->pod.count;
  18.344  
  18.345      pod_target = target - populated;
  18.346  
  18.347      /* B < T': Set the cache size equal to # of outstanding entries,
  18.348       * let the balloon driver fill in the rest. */
  18.349 -    if ( pod_target > p2md->pod.entry_count )
  18.350 -        pod_target = p2md->pod.entry_count;
  18.351 -
  18.352 -    ASSERT( pod_target >= p2md->pod.count );
  18.353 -
  18.354 -    ret = p2m_pod_set_cache_target(d, pod_target);
  18.355 +    if ( pod_target > p2m->pod.entry_count )
  18.356 +        pod_target = p2m->pod.entry_count;
  18.357 +
  18.358 +    ASSERT( pod_target >= p2m->pod.count );
  18.359 +
  18.360 +    ret = p2m_pod_set_cache_target(p2m, pod_target);
  18.361  
  18.362  out:
  18.363 -    p2m_unlock(p2md);
  18.364 +    p2m_unlock(p2m);
  18.365  
  18.366      return ret;
  18.367  }
  18.368 @@ -594,16 +595,16 @@ out:
  18.369  void
  18.370  p2m_pod_empty_cache(struct domain *d)
  18.371  {
  18.372 -    struct p2m_domain *p2md = d->arch.p2m;
  18.373 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
  18.374      struct page_info *page;
  18.375  
  18.376      /* After this barrier no new PoD activities can happen. */
  18.377      BUG_ON(!d->is_dying);
  18.378 -    spin_barrier(&p2md->lock);
  18.379 +    spin_barrier(&p2m->lock);
  18.380  
  18.381      spin_lock(&d->page_alloc_lock);
  18.382  
  18.383 -    while ( (page = page_list_remove_head(&p2md->pod.super)) )
  18.384 +    while ( (page = page_list_remove_head(&p2m->pod.super)) )
  18.385      {
  18.386          int i;
  18.387              
  18.388 @@ -613,18 +614,18 @@ p2m_pod_empty_cache(struct domain *d)
  18.389              page_list_add_tail(page + i, &d->page_list);
  18.390          }
  18.391  
  18.392 -        p2md->pod.count -= SUPERPAGE_PAGES;
  18.393 +        p2m->pod.count -= SUPERPAGE_PAGES;
  18.394      }
  18.395  
  18.396 -    while ( (page = page_list_remove_head(&p2md->pod.single)) )
  18.397 +    while ( (page = page_list_remove_head(&p2m->pod.single)) )
  18.398      {
  18.399          BUG_ON(page_get_owner(page) != d);
  18.400          page_list_add_tail(page, &d->page_list);
  18.401  
  18.402 -        p2md->pod.count -= 1;
  18.403 +        p2m->pod.count -= 1;
  18.404      }
  18.405  
  18.406 -    BUG_ON(p2md->pod.count != 0);
  18.407 +    BUG_ON(p2m->pod.count != 0);
  18.408  
  18.409      spin_unlock(&d->page_alloc_lock);
  18.410  }
  18.411 @@ -642,9 +643,9 @@ p2m_pod_decrease_reservation(struct doma
  18.412                               xen_pfn_t gpfn,
  18.413                               unsigned int order)
  18.414  {
  18.415 -    struct p2m_domain *p2md = d->arch.p2m;
  18.416      int ret=0;
  18.417      int i;
  18.418 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
  18.419  
  18.420      int steal_for_cache = 0;
  18.421      int pod = 0, nonpod = 0, ram = 0;
  18.422 @@ -652,14 +653,14 @@ p2m_pod_decrease_reservation(struct doma
  18.423  
  18.424      /* If we don't have any outstanding PoD entries, let things take their
  18.425       * course */
  18.426 -    if ( p2md->pod.entry_count == 0 )
  18.427 +    if ( p2m->pod.entry_count == 0 )
  18.428          goto out;
  18.429  
  18.430      /* Figure out if we need to steal some freed memory for our cache */
  18.431 -    steal_for_cache =  ( p2md->pod.entry_count > p2md->pod.count );
  18.432 -
  18.433 -    p2m_lock(p2md);
  18.434 -    audit_p2m(d);
  18.435 +    steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
  18.436 +
  18.437 +    p2m_lock(p2m);
  18.438 +    audit_p2m(p2m);
  18.439  
  18.440      if ( unlikely(d->is_dying) )
  18.441          goto out_unlock;
  18.442 @@ -670,7 +671,7 @@ p2m_pod_decrease_reservation(struct doma
  18.443      {
  18.444          p2m_type_t t;
  18.445  
  18.446 -        gfn_to_mfn_query(d, gpfn + i, &t);
  18.447 +        gfn_to_mfn_query(p2m, gpfn + i, &t);
  18.448  
  18.449          if ( t == p2m_populate_on_demand )
  18.450              pod++;
  18.451 @@ -690,9 +691,9 @@ p2m_pod_decrease_reservation(struct doma
  18.452      {
  18.453          /* All PoD: Mark the whole region invalid and tell caller
  18.454           * we're done. */
  18.455 -        set_p2m_entry(d, gpfn, _mfn(INVALID_MFN), order, p2m_invalid);
  18.456 -        p2md->pod.entry_count-=(1<<order); /* Lock: p2m */
  18.457 -        BUG_ON(p2md->pod.entry_count < 0);
  18.458 +        set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid);
  18.459 +        p2m->pod.entry_count-=(1<<order); /* Lock: p2m */
  18.460 +        BUG_ON(p2m->pod.entry_count < 0);
  18.461          ret = 1;
  18.462          goto out_entry_check;
  18.463      }
  18.464 @@ -710,12 +711,12 @@ p2m_pod_decrease_reservation(struct doma
  18.465          mfn_t mfn;
  18.466          p2m_type_t t;
  18.467  
  18.468 -        mfn = gfn_to_mfn_query(d, gpfn + i, &t);
  18.469 +        mfn = gfn_to_mfn_query(p2m, gpfn + i, &t);
  18.470          if ( t == p2m_populate_on_demand )
  18.471          {
  18.472 -            set_p2m_entry(d, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
  18.473 -            p2md->pod.entry_count--; /* Lock: p2m */
  18.474 -            BUG_ON(p2md->pod.entry_count < 0);
  18.475 +            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
  18.476 +            p2m->pod.entry_count--; /* Lock: p2m */
  18.477 +            BUG_ON(p2m->pod.entry_count < 0);
  18.478              pod--;
  18.479          }
  18.480          else if ( steal_for_cache && p2m_is_ram(t) )
  18.481 @@ -726,12 +727,12 @@ p2m_pod_decrease_reservation(struct doma
  18.482  
  18.483              page = mfn_to_page(mfn);
  18.484  
  18.485 -            set_p2m_entry(d, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
  18.486 +            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
  18.487              set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
  18.488  
  18.489 -            p2m_pod_cache_add(d, page, 0);
  18.490 -
  18.491 -            steal_for_cache =  ( p2md->pod.entry_count > p2md->pod.count );
  18.492 +            p2m_pod_cache_add(p2m, page, 0);
  18.493 +
  18.494 +            steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
  18.495  
  18.496              nonpod--;
  18.497              ram--;
  18.498 @@ -745,33 +746,31 @@ p2m_pod_decrease_reservation(struct doma
  18.499  
  18.500  out_entry_check:
  18.501      /* If we've reduced our "liabilities" beyond our "assets", free some */
  18.502 -    if ( p2md->pod.entry_count < p2md->pod.count )
  18.503 +    if ( p2m->pod.entry_count < p2m->pod.count )
  18.504      {
  18.505 -        p2m_pod_set_cache_target(d, p2md->pod.entry_count);
  18.506 +        p2m_pod_set_cache_target(p2m, p2m->pod.entry_count);
  18.507      }
  18.508  
  18.509  out_unlock:
  18.510 -    audit_p2m(d);
  18.511 -    p2m_unlock(p2md);
  18.512 +    audit_p2m(p2m);
  18.513 +    p2m_unlock(p2m);
  18.514  
  18.515  out:
  18.516      return ret;
  18.517  }
  18.518  
  18.519  void
  18.520 -p2m_pod_dump_data(struct domain *d)
  18.521 +p2m_pod_dump_data(struct p2m_domain *p2m)
  18.522  {
  18.523 -    struct p2m_domain *p2md = d->arch.p2m;
  18.524 -    
  18.525      printk("    PoD entries=%d cachesize=%d\n",
  18.526 -           p2md->pod.entry_count, p2md->pod.count);
  18.527 +           p2m->pod.entry_count, p2m->pod.count);
  18.528  }
  18.529  
  18.530  
  18.531  /* Search for all-zero superpages to be reclaimed as superpages for the
  18.532   * PoD cache. Must be called w/ p2m lock held, page_alloc lock not held. */
  18.533  static int
  18.534 -p2m_pod_zero_check_superpage(struct domain *d, unsigned long gfn)
  18.535 +p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
  18.536  {
  18.537      mfn_t mfn, mfn0 = _mfn(INVALID_MFN);
  18.538      p2m_type_t type, type0 = 0;
  18.539 @@ -779,6 +778,7 @@ p2m_pod_zero_check_superpage(struct doma
  18.540      int ret=0, reset = 0;
  18.541      int i, j;
  18.542      int max_ref = 1;
  18.543 +    struct domain *d = p2m->domain;
  18.544  
  18.545      if ( !superpage_aligned(gfn) )
  18.546          goto out;
  18.547 @@ -792,7 +792,7 @@ p2m_pod_zero_check_superpage(struct doma
  18.548      for ( i=0; i<SUPERPAGE_PAGES; i++ )
  18.549      {
  18.550          
  18.551 -        mfn = gfn_to_mfn_query(d, gfn + i, &type);
  18.552 +        mfn = gfn_to_mfn_query(p2m, gfn + i, &type);
  18.553  
  18.554          if ( i == 0 )
  18.555          {
  18.556 @@ -840,7 +840,7 @@ p2m_pod_zero_check_superpage(struct doma
  18.557      }
  18.558  
  18.559      /* Try to remove the page, restoring old mapping if it fails. */
  18.560 -    set_p2m_entry(d, gfn,
  18.561 +    set_p2m_entry(p2m, gfn,
  18.562                    _mfn(POPULATE_ON_DEMAND_MFN), 9,
  18.563                    p2m_populate_on_demand);
  18.564  
  18.565 @@ -892,23 +892,24 @@ p2m_pod_zero_check_superpage(struct doma
  18.566  
  18.567      /* Finally!  We've passed all the checks, and can add the mfn superpage
  18.568       * back on the PoD cache, and account for the new p2m PoD entries */
  18.569 -    p2m_pod_cache_add(d, mfn_to_page(mfn0), 9);
  18.570 -    d->arch.p2m->pod.entry_count += SUPERPAGE_PAGES;
  18.571 +    p2m_pod_cache_add(p2m, mfn_to_page(mfn0), 9);
  18.572 +    p2m->pod.entry_count += SUPERPAGE_PAGES;
  18.573  
  18.574  out_reset:
  18.575      if ( reset )
  18.576 -        set_p2m_entry(d, gfn, mfn0, 9, type0);
  18.577 +        set_p2m_entry(p2m, gfn, mfn0, 9, type0);
  18.578      
  18.579  out:
  18.580      return ret;
  18.581  }
  18.582  
  18.583  static void
  18.584 -p2m_pod_zero_check(struct domain *d, unsigned long *gfns, int count)
  18.585 +p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
  18.586  {
  18.587      mfn_t mfns[count];
  18.588      p2m_type_t types[count];
  18.589      unsigned long * map[count];
  18.590 +    struct domain *d = p2m->domain;
  18.591  
  18.592      int i, j;
  18.593      int max_ref = 1;
  18.594 @@ -920,7 +921,7 @@ p2m_pod_zero_check(struct domain *d, uns
  18.595      /* First, get the gfn list, translate to mfns, and map the pages. */
  18.596      for ( i=0; i<count; i++ )
  18.597      {
  18.598 -        mfns[i] = gfn_to_mfn_query(d, gfns[i], types + i);
  18.599 +        mfns[i] = gfn_to_mfn_query(p2m, gfns[i], types + i);
  18.600          /* If this is ram, and not a pagetable or from the xen heap, and probably not mapped
  18.601             elsewhere, map it; otherwise, skip. */
  18.602          if ( p2m_is_ram(types[i])
  18.603 @@ -952,7 +953,7 @@ p2m_pod_zero_check(struct domain *d, uns
  18.604          }
  18.605  
  18.606          /* Try to remove the page, restoring old mapping if it fails. */
  18.607 -        set_p2m_entry(d, gfns[i],
  18.608 +        set_p2m_entry(p2m, gfns[i],
  18.609                        _mfn(POPULATE_ON_DEMAND_MFN), 0,
  18.610                        p2m_populate_on_demand);
  18.611  
  18.612 @@ -963,7 +964,7 @@ p2m_pod_zero_check(struct domain *d, uns
  18.613              unmap_domain_page(map[i]);
  18.614              map[i] = NULL;
  18.615  
  18.616 -            set_p2m_entry(d, gfns[i], mfns[i], 0, types[i]);
  18.617 +            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
  18.618  
  18.619              continue;
  18.620          }
  18.621 @@ -985,7 +986,7 @@ p2m_pod_zero_check(struct domain *d, uns
  18.622           * check timing.  */
  18.623          if ( j < PAGE_SIZE/sizeof(*map[i]) )
  18.624          {
  18.625 -            set_p2m_entry(d, gfns[i], mfns[i], 0, types[i]);
  18.626 +            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
  18.627          }
  18.628          else
  18.629          {
  18.630 @@ -1005,8 +1006,8 @@ p2m_pod_zero_check(struct domain *d, uns
  18.631              }
  18.632  
  18.633              /* Add to cache, and account for the new p2m PoD entry */
  18.634 -            p2m_pod_cache_add(d, mfn_to_page(mfns[i]), 0);
  18.635 -            d->arch.p2m->pod.entry_count++;
  18.636 +            p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), 0);
  18.637 +            p2m->pod.entry_count++;
  18.638          }
  18.639      }
  18.640      
  18.641 @@ -1014,56 +1015,53 @@ p2m_pod_zero_check(struct domain *d, uns
  18.642  
  18.643  #define POD_SWEEP_LIMIT 1024
  18.644  static void
  18.645 -p2m_pod_emergency_sweep_super(struct domain *d)
  18.646 +p2m_pod_emergency_sweep_super(struct p2m_domain *p2m)
  18.647  {
  18.648 -    struct p2m_domain *p2md = d->arch.p2m;
  18.649      unsigned long i, start, limit;
  18.650  
  18.651 -    if ( p2md->pod.reclaim_super == 0 )
  18.652 +    if ( p2m->pod.reclaim_super == 0 )
  18.653      {
  18.654 -        p2md->pod.reclaim_super = (p2md->pod.max_guest>>9)<<9;
  18.655 -        p2md->pod.reclaim_super -= SUPERPAGE_PAGES;
  18.656 +        p2m->pod.reclaim_super = (p2m->pod.max_guest>>9)<<9;
  18.657 +        p2m->pod.reclaim_super -= SUPERPAGE_PAGES;
  18.658      }
  18.659      
  18.660 -    start = p2md->pod.reclaim_super;
  18.661 +    start = p2m->pod.reclaim_super;
  18.662      limit = (start > POD_SWEEP_LIMIT) ? (start - POD_SWEEP_LIMIT) : 0;
  18.663  
  18.664 -    for ( i=p2md->pod.reclaim_super ; i > 0 ; i-=SUPERPAGE_PAGES )
  18.665 +    for ( i=p2m->pod.reclaim_super ; i > 0 ; i -= SUPERPAGE_PAGES )
  18.666      {
  18.667 -        p2m_pod_zero_check_superpage(d, i);
  18.668 +        p2m_pod_zero_check_superpage(p2m, i);
  18.669          /* Stop if we're past our limit and we have found *something*.
  18.670           *
  18.671           * NB that this is a zero-sum game; we're increasing our cache size
  18.672           * by increasing our 'debt'.  Since we hold the p2m lock,
  18.673           * (entry_count - count) must remain the same. */
  18.674 -        if ( !page_list_empty(&p2md->pod.super) &&  i < limit )
  18.675 +        if ( !page_list_empty(&p2m->pod.super) &&  i < limit )
  18.676              break;
  18.677      }
  18.678  
  18.679 -    p2md->pod.reclaim_super = i ? i - SUPERPAGE_PAGES : 0;
  18.680 -
  18.681 +    p2m->pod.reclaim_super = i ? i - SUPERPAGE_PAGES : 0;
  18.682  }
  18.683  
  18.684  #define POD_SWEEP_STRIDE  16
  18.685  static void
  18.686 -p2m_pod_emergency_sweep(struct domain *d)
  18.687 +p2m_pod_emergency_sweep(struct p2m_domain *p2m)
  18.688  {
  18.689 -    struct p2m_domain *p2md = d->arch.p2m;
  18.690      unsigned long gfns[POD_SWEEP_STRIDE];
  18.691      unsigned long i, j=0, start, limit;
  18.692      p2m_type_t t;
  18.693  
  18.694  
  18.695 -    if ( p2md->pod.reclaim_single == 0 )
  18.696 -        p2md->pod.reclaim_single = p2md->pod.max_guest;
  18.697 -
  18.698 -    start = p2md->pod.reclaim_single;
  18.699 +    if ( p2m->pod.reclaim_single == 0 )
  18.700 +        p2m->pod.reclaim_single = p2m->pod.max_guest;
  18.701 +
  18.702 +    start = p2m->pod.reclaim_single;
  18.703      limit = (start > POD_SWEEP_LIMIT) ? (start - POD_SWEEP_LIMIT) : 0;
  18.704  
  18.705      /* FIXME: Figure out how to avoid superpages */
  18.706 -    for ( i=p2md->pod.reclaim_single ; i > 0 ; i-- )
  18.707 +    for ( i=p2m->pod.reclaim_single; i > 0 ; i-- )
  18.708      {
  18.709 -        gfn_to_mfn_query(d, i, &t );
  18.710 +        gfn_to_mfn_query(p2m, i, &t );
  18.711          if ( p2m_is_ram(t) )
  18.712          {
  18.713              gfns[j] = i;
  18.714 @@ -1071,7 +1069,7 @@ p2m_pod_emergency_sweep(struct domain *d
  18.715              BUG_ON(j > POD_SWEEP_STRIDE);
  18.716              if ( j == POD_SWEEP_STRIDE )
  18.717              {
  18.718 -                p2m_pod_zero_check(d, gfns, j);
  18.719 +                p2m_pod_zero_check(p2m, gfns, j);
  18.720                  j = 0;
  18.721              }
  18.722          }
  18.723 @@ -1080,29 +1078,29 @@ p2m_pod_emergency_sweep(struct domain *d
  18.724           * NB that this is a zero-sum game; we're increasing our cache size
  18.725           * by re-increasing our 'debt'.  Since we hold the p2m lock,
  18.726           * (entry_count - count) must remain the same. */
  18.727 -        if ( p2md->pod.count > 0 && i < limit )
  18.728 +        if ( p2m->pod.count > 0 && i < limit )
  18.729              break;
  18.730      }
  18.731  
  18.732      if ( j )
  18.733 -        p2m_pod_zero_check(d, gfns, j);
  18.734 -
  18.735 -    p2md->pod.reclaim_single = i ? i - 1 : i;
  18.736 +        p2m_pod_zero_check(p2m, gfns, j);
  18.737 +
  18.738 +    p2m->pod.reclaim_single = i ? i - 1 : i;
  18.739  
  18.740  }
  18.741  
  18.742  int
  18.743 -p2m_pod_demand_populate(struct domain *d, unsigned long gfn,
  18.744 +p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
  18.745                          unsigned int order,
  18.746                          p2m_query_t q)
  18.747  {
  18.748 +    struct domain *d = p2m->domain;
  18.749      struct page_info *p = NULL; /* Compiler warnings */
  18.750      unsigned long gfn_aligned;
  18.751      mfn_t mfn;
  18.752 -    struct p2m_domain *p2md = d->arch.p2m;
  18.753      int i;
  18.754  
  18.755 -    ASSERT(p2m_locked_by_me(d->arch.p2m));
  18.756 +    ASSERT(p2m_locked_by_me(p2m));
  18.757  
  18.758      /* This check is done with the p2m lock held.  This will make sure that
  18.759       * even if d->is_dying changes under our feet, p2m_pod_empty_cache() 
  18.760 @@ -1120,34 +1118,34 @@ p2m_pod_demand_populate(struct domain *d
  18.761           * set_p2m_entry() should automatically shatter the 1GB page into 
  18.762           * 512 2MB pages. The rest of 511 calls are unnecessary.
  18.763           */
  18.764 -        set_p2m_entry(d, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
  18.765 +        set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
  18.766                        p2m_populate_on_demand);
  18.767 -        audit_p2m(d);
  18.768 -        p2m_unlock(p2md);
  18.769 +        audit_p2m(p2m);
  18.770 +        p2m_unlock(p2m);
  18.771          return 0;
  18.772      }
  18.773  
  18.774      /* If we're low, start a sweep */
  18.775 -    if ( order == 9 && page_list_empty(&p2md->pod.super) )
  18.776 -        p2m_pod_emergency_sweep_super(d);
  18.777 -
  18.778 -    if ( page_list_empty(&p2md->pod.single) &&
  18.779 +    if ( order == 9 && page_list_empty(&p2m->pod.super) )
  18.780 +        p2m_pod_emergency_sweep_super(p2m);
  18.781 +
  18.782 +    if ( page_list_empty(&p2m->pod.single) &&
  18.783           ( ( order == 0 )
  18.784 -           || (order == 9 && page_list_empty(&p2md->pod.super) ) ) )
  18.785 -        p2m_pod_emergency_sweep(d);
  18.786 +           || (order == 9 && page_list_empty(&p2m->pod.super) ) ) )
  18.787 +        p2m_pod_emergency_sweep(p2m);
  18.788  
  18.789      /* Keep track of the highest gfn demand-populated by a guest fault */
  18.790 -    if ( q == p2m_guest && gfn > p2md->pod.max_guest )
  18.791 -        p2md->pod.max_guest = gfn;
  18.792 +    if ( q == p2m_guest && gfn > p2m->pod.max_guest )
  18.793 +        p2m->pod.max_guest = gfn;
  18.794  
  18.795      spin_lock(&d->page_alloc_lock);
  18.796  
  18.797 -    if ( p2md->pod.count == 0 )
  18.798 +    if ( p2m->pod.count == 0 )
  18.799          goto out_of_memory;
  18.800  
  18.801      /* Get a page f/ the cache.  A NULL return value indicates that the
  18.802       * 2-meg range should be marked singleton PoD, and retried */
  18.803 -    if ( (p = p2m_pod_cache_get(d, order)) == NULL )
  18.804 +    if ( (p = p2m_pod_cache_get(p2m, order)) == NULL )
  18.805          goto remap_and_retry;
  18.806  
  18.807      mfn = page_to_mfn(p);
  18.808 @@ -1158,13 +1156,13 @@ p2m_pod_demand_populate(struct domain *d
  18.809  
  18.810      gfn_aligned = (gfn >> order) << order;
  18.811  
  18.812 -    set_p2m_entry(d, gfn_aligned, mfn, order, p2m_ram_rw);
  18.813 -
  18.814 -    for( i = 0 ; i < (1UL << order) ; i++ )
  18.815 +    set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw);
  18.816 +
  18.817 +    for( i = 0; i < (1UL << order); i++ )
  18.818          set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
  18.819      
  18.820 -    p2md->pod.entry_count -= (1 << order); /* Lock: p2m */
  18.821 -    BUG_ON(p2md->pod.entry_count < 0);
  18.822 +    p2m->pod.entry_count -= (1 << order); /* Lock: p2m */
  18.823 +    BUG_ON(p2m->pod.entry_count < 0);
  18.824  
  18.825      if ( tb_init_done )
  18.826      {
  18.827 @@ -1186,7 +1184,7 @@ out_of_memory:
  18.828      spin_unlock(&d->page_alloc_lock);
  18.829  
  18.830      printk("%s: Out of populate-on-demand memory! tot_pages %" PRIu32 " pod_entries %" PRIi32 "\n",
  18.831 -           __func__, d->tot_pages, p2md->pod.entry_count);
  18.832 +           __func__, d->tot_pages, p2m->pod.entry_count);
  18.833      domain_crash(d);
  18.834  out_fail:
  18.835      return -1;
  18.836 @@ -1197,7 +1195,7 @@ remap_and_retry:
  18.837      /* Remap this 2-meg region in singleton chunks */
  18.838      gfn_aligned = (gfn>>order)<<order;
  18.839      for(i=0; i<(1<<order); i++)
  18.840 -        set_p2m_entry(d, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
  18.841 +        set_p2m_entry(p2m, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
  18.842                        p2m_populate_on_demand);
  18.843      if ( tb_init_done )
  18.844      {
  18.845 @@ -1216,44 +1214,44 @@ remap_and_retry:
  18.846  }
  18.847  
  18.848  /* Non-ept "lock-and-check" wrapper */
  18.849 -static int p2m_pod_check_and_populate(struct domain *d, unsigned long gfn,
  18.850 +static int p2m_pod_check_and_populate(struct p2m_domain *p2m, unsigned long gfn,
  18.851                                        l1_pgentry_t *p2m_entry, int order,
  18.852                                        p2m_query_t q)
  18.853  {
  18.854      /* Only take the lock if we don't already have it.  Otherwise it
  18.855       * wouldn't be safe to do p2m lookups with the p2m lock held */
  18.856 -    int do_locking = !p2m_locked_by_me(d->arch.p2m);
  18.857 +    int do_locking = !p2m_locked_by_me(p2m);
  18.858      int r;
  18.859  
  18.860      if ( do_locking )
  18.861 -        p2m_lock(d->arch.p2m);
  18.862 -
  18.863 -    audit_p2m(d);
  18.864 +        p2m_lock(p2m);
  18.865 +
  18.866 +    audit_p2m(p2m);
  18.867  
  18.868      /* Check to make sure this is still PoD */
  18.869      if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) != p2m_populate_on_demand )
  18.870      {
  18.871          if ( do_locking )
  18.872 -            p2m_unlock(d->arch.p2m);
  18.873 +            p2m_unlock(p2m);
  18.874          return 0;
  18.875      }
  18.876  
  18.877 -    r = p2m_pod_demand_populate(d, gfn, order, q);
  18.878 -
  18.879 -    audit_p2m(d);
  18.880 +    r = p2m_pod_demand_populate(p2m, gfn, order, q);
  18.881 +
  18.882 +    audit_p2m(p2m);
  18.883      if ( do_locking )
  18.884 -        p2m_unlock(d->arch.p2m);
  18.885 +        p2m_unlock(p2m);
  18.886  
  18.887      return r;
  18.888  }
  18.889  
  18.890  // Returns 0 on error (out of memory)
  18.891  static int
  18.892 -p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, 
  18.893 +p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
  18.894                unsigned int page_order, p2m_type_t p2mt)
  18.895  {
  18.896      // XXX -- this might be able to be faster iff current->domain == d
  18.897 -    mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
  18.898 +    mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
  18.899      void *table =map_domain_page(mfn_x(table_mfn));
  18.900      unsigned long i, gfn_remainder = gfn;
  18.901      l1_pgentry_t *p2m_entry;
  18.902 @@ -1273,14 +1271,14 @@ p2m_set_entry(struct domain *d, unsigned
  18.903          t.gfn = gfn;
  18.904          t.mfn = mfn_x(mfn);
  18.905          t.p2mt = p2mt;
  18.906 -        t.d = d->domain_id;
  18.907 +        t.d = p2m->domain->domain_id;
  18.908          t.order = page_order;
  18.909  
  18.910          __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t);
  18.911      }
  18.912  
  18.913  #if CONFIG_PAGING_LEVELS >= 4
  18.914 -    if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
  18.915 +    if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
  18.916                           L4_PAGETABLE_SHIFT - PAGE_SHIFT,
  18.917                           L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
  18.918          goto out;
  18.919 @@ -1298,14 +1296,15 @@ p2m_set_entry(struct domain *d, unsigned
  18.920               !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
  18.921          {
  18.922              P2M_ERROR("configure P2M table L3 entry with large page\n");
  18.923 -            domain_crash(d);
  18.924 +            domain_crash(p2m->domain);
  18.925              goto out;
  18.926          }
  18.927          l3e_content = mfn_valid(mfn) 
  18.928              ? l3e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt) | _PAGE_PSE)
  18.929              : l3e_empty();
  18.930          entry_content.l1 = l3e_content.l3;
  18.931 -        paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 3);
  18.932 +        paging_write_p2m_entry(p2m->domain, gfn, p2m_entry,
  18.933 +                               table_mfn, entry_content, 3);
  18.934  
  18.935      }
  18.936      /*
  18.937 @@ -1315,17 +1314,17 @@ p2m_set_entry(struct domain *d, unsigned
  18.938       * in Xen's address space for translated PV guests.
  18.939       * When using AMD's NPT on PAE Xen, we are restricted to 4GB.
  18.940       */
  18.941 -    else if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
  18.942 +    else if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
  18.943                                L3_PAGETABLE_SHIFT - PAGE_SHIFT,
  18.944                                ((CONFIG_PAGING_LEVELS == 3)
  18.945 -                               ? (paging_mode_hap(d) ? 4 : 8)
  18.946 +                               ? (paging_mode_hap(p2m->domain) ? 4 : 8)
  18.947                                 : L3_PAGETABLE_ENTRIES),
  18.948                                PGT_l2_page_table) )
  18.949          goto out;
  18.950  
  18.951      if ( page_order == 0 )
  18.952      {
  18.953 -        if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
  18.954 +        if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
  18.955                               L2_PAGETABLE_SHIFT - PAGE_SHIFT,
  18.956                               L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
  18.957              goto out;
  18.958 @@ -1340,7 +1339,8 @@ p2m_set_entry(struct domain *d, unsigned
  18.959              entry_content = l1e_empty();
  18.960          
  18.961          /* level 1 entry */
  18.962 -        paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
  18.963 +        paging_write_p2m_entry(p2m->domain, gfn, p2m_entry,
  18.964 +                               table_mfn, entry_content, 1);
  18.965      }
  18.966      else if ( page_order == 9 )
  18.967      {
  18.968 @@ -1354,7 +1354,7 @@ p2m_set_entry(struct domain *d, unsigned
  18.969               !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
  18.970          {
  18.971              P2M_ERROR("configure P2M table 4KB L2 entry with large page\n");
  18.972 -            domain_crash(d);
  18.973 +            domain_crash(p2m->domain);
  18.974              goto out;
  18.975          }
  18.976          
  18.977 @@ -1365,23 +1365,24 @@ p2m_set_entry(struct domain *d, unsigned
  18.978              l2e_content = l2e_empty();
  18.979          
  18.980          entry_content.l1 = l2e_content.l2;
  18.981 -        paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 2);
  18.982 +        paging_write_p2m_entry(p2m->domain, gfn, p2m_entry,
  18.983 +                               table_mfn, entry_content, 2);
  18.984      }
  18.985  
  18.986      /* Track the highest gfn for which we have ever had a valid mapping */
  18.987      if ( mfn_valid(mfn) 
  18.988 -         && (gfn + (1UL << page_order) - 1 > d->arch.p2m->max_mapped_pfn) )
  18.989 -        d->arch.p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
  18.990 -
  18.991 -    if ( iommu_enabled && need_iommu(d) )
  18.992 +         && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
  18.993 +        p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
  18.994 +
  18.995 +    if ( iommu_enabled && need_iommu(p2m->domain) )
  18.996      {
  18.997          if ( p2mt == p2m_ram_rw )
  18.998              for ( i = 0; i < (1UL << page_order); i++ )
  18.999 -                iommu_map_page(d, gfn+i, mfn_x(mfn)+i,
 18.1000 +                iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
 18.1001                                 IOMMUF_readable|IOMMUF_writable);
 18.1002          else
 18.1003              for ( int i = 0; i < (1UL << page_order); i++ )
 18.1004 -                iommu_unmap_page(d, gfn+i);
 18.1005 +                iommu_unmap_page(p2m->domain, gfn+i);
 18.1006      }
 18.1007  
 18.1008      /* Success */
 18.1009 @@ -1393,7 +1394,7 @@ out:
 18.1010  }
 18.1011  
 18.1012  static mfn_t
 18.1013 -p2m_gfn_to_mfn(struct domain *d, unsigned long gfn, p2m_type_t *t,
 18.1014 +p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
 18.1015                 p2m_query_t q)
 18.1016  {
 18.1017      mfn_t mfn;
 18.1018 @@ -1401,7 +1402,7 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
 18.1019      l2_pgentry_t *l2e;
 18.1020      l1_pgentry_t *l1e;
 18.1021  
 18.1022 -    ASSERT(paging_mode_translate(d));
 18.1023 +    ASSERT(paging_mode_translate(p2m->domain));
 18.1024  
 18.1025      /* XXX This is for compatibility with the old model, where anything not 
 18.1026       * XXX marked as RAM was considered to be emulated MMIO space.
 18.1027 @@ -1409,9 +1410,9 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
 18.1028       * XXX we will return p2m_invalid for unmapped gfns */
 18.1029      *t = p2m_mmio_dm;
 18.1030  
 18.1031 -    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
 18.1032 -
 18.1033 -    if ( gfn > d->arch.p2m->max_mapped_pfn )
 18.1034 +    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
 18.1035 +
 18.1036 +    if ( gfn > p2m->max_mapped_pfn )
 18.1037          /* This pfn is higher than the highest the p2m map currently holds */
 18.1038          return _mfn(INVALID_MFN);
 18.1039  
 18.1040 @@ -1447,7 +1448,7 @@ pod_retry_l3:
 18.1041              {
 18.1042                  if ( q != p2m_query )
 18.1043                  {
 18.1044 -                    if ( !p2m_pod_demand_populate(d, gfn, 18, q) )
 18.1045 +                    if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
 18.1046                          goto pod_retry_l3;
 18.1047                  }
 18.1048                  else
 18.1049 @@ -1482,8 +1483,8 @@ pod_retry_l2:
 18.1050          if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand )
 18.1051          {
 18.1052              if ( q != p2m_query ) {
 18.1053 -                if ( !p2m_pod_check_and_populate(d, gfn,
 18.1054 -                                                       (l1_pgentry_t *)l2e, 9, q) )
 18.1055 +                if ( !p2m_pod_check_and_populate(p2m, gfn,
 18.1056 +                                                 (l1_pgentry_t *)l2e, 9, q) )
 18.1057                      goto pod_retry_l2;
 18.1058              } else
 18.1059                  *t = p2m_populate_on_demand;
 18.1060 @@ -1514,8 +1515,8 @@ pod_retry_l1:
 18.1061          if ( p2m_flags_to_type(l1e_get_flags(*l1e)) == p2m_populate_on_demand )
 18.1062          {
 18.1063              if ( q != p2m_query ) {
 18.1064 -                if ( !p2m_pod_check_and_populate(d, gfn,
 18.1065 -                                                       (l1_pgentry_t *)l1e, 0, q) )
 18.1066 +                if ( !p2m_pod_check_and_populate(p2m, gfn,
 18.1067 +                                                 (l1_pgentry_t *)l1e, 0, q) )
 18.1068                      goto pod_retry_l1;
 18.1069              } else
 18.1070                  *t = p2m_populate_on_demand;
 18.1071 @@ -1533,7 +1534,8 @@ pod_retry_l1:
 18.1072  }
 18.1073  
 18.1074  /* Read the current domain's p2m table (through the linear mapping). */
 18.1075 -static mfn_t p2m_gfn_to_mfn_current(unsigned long gfn, p2m_type_t *t,
 18.1076 +static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
 18.1077 +                                    unsigned long gfn, p2m_type_t *t,
 18.1078                                      p2m_query_t q)
 18.1079  {
 18.1080      mfn_t mfn = _mfn(INVALID_MFN);
 18.1081 @@ -1544,7 +1546,7 @@ static mfn_t p2m_gfn_to_mfn_current(unsi
 18.1082       * XXX Once we start explicitly registering MMIO regions in the p2m 
 18.1083       * XXX we will return p2m_invalid for unmapped gfns */
 18.1084  
 18.1085 -    if ( gfn <= current->domain->arch.p2m->max_mapped_pfn )
 18.1086 +    if ( gfn <= p2m->max_mapped_pfn )
 18.1087      {
 18.1088          l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
 18.1089          l2_pgentry_t l2e = l2e_empty();
 18.1090 @@ -1574,7 +1576,7 @@ static mfn_t p2m_gfn_to_mfn_current(unsi
 18.1091                  /* The read has succeeded, so we know that mapping exists */
 18.1092                  if ( q != p2m_query )
 18.1093                  {
 18.1094 -                    if ( !p2m_pod_demand_populate(current->domain, gfn, 18, q) )
 18.1095 +                    if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
 18.1096                          goto pod_retry_l3;
 18.1097                      p2mt = p2m_invalid;
 18.1098                      printk("%s: Allocate 1GB failed!\n", __func__);
 18.1099 @@ -1624,8 +1626,8 @@ static mfn_t p2m_gfn_to_mfn_current(unsi
 18.1100                   * exits at this point.  */
 18.1101                  if ( q != p2m_query )
 18.1102                  {
 18.1103 -                    if ( !p2m_pod_check_and_populate(current->domain, gfn,
 18.1104 -                                                            p2m_entry, 9, q) )
 18.1105 +                    if ( !p2m_pod_check_and_populate(p2m, gfn,
 18.1106 +                                                     p2m_entry, 9, q) )
 18.1107                          goto pod_retry_l2;
 18.1108  
 18.1109                      /* Allocate failed. */
 18.1110 @@ -1680,8 +1682,8 @@ static mfn_t p2m_gfn_to_mfn_current(unsi
 18.1111                   * exits at this point.  */
 18.1112                  if ( q != p2m_query )
 18.1113                  {
 18.1114 -                    if ( !p2m_pod_check_and_populate(current->domain, gfn,
 18.1115 -                                                            (l1_pgentry_t *)p2m_entry, 0, q) )
 18.1116 +                    if ( !p2m_pod_check_and_populate(p2m, gfn,
 18.1117 +                                                     (l1_pgentry_t *)p2m_entry, 0, q) )
 18.1118                          goto pod_retry_l1;
 18.1119  
 18.1120                      /* Allocate failed. */
 18.1121 @@ -1708,22 +1710,15 @@ out:
 18.1122  }
 18.1123  
 18.1124  /* Init the datastructures for later use by the p2m code */
 18.1125 -int p2m_init(struct domain *d)
 18.1126 +static void p2m_initialise(struct domain *d, struct p2m_domain *p2m)
 18.1127  {
 18.1128 -    struct p2m_domain *p2m;
 18.1129 -
 18.1130 -    p2m = xmalloc(struct p2m_domain);
 18.1131 -    if ( p2m == NULL )
 18.1132 -        return -ENOMEM;
 18.1133 -
 18.1134 -    d->arch.p2m = p2m;
 18.1135 -
 18.1136      memset(p2m, 0, sizeof(*p2m));
 18.1137      p2m_lock_init(p2m);
 18.1138      INIT_PAGE_LIST_HEAD(&p2m->pages);
 18.1139      INIT_PAGE_LIST_HEAD(&p2m->pod.super);
 18.1140      INIT_PAGE_LIST_HEAD(&p2m->pod.single);
 18.1141  
 18.1142 +    p2m->domain = d;
 18.1143      p2m->set_entry = p2m_set_entry;
 18.1144      p2m->get_entry = p2m_gfn_to_mfn;
 18.1145      p2m->get_entry_current = p2m_gfn_to_mfn_current;
 18.1146 @@ -1732,23 +1727,34 @@ int p2m_init(struct domain *d)
 18.1147      if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
 18.1148          ept_p2m_init(d);
 18.1149  
 18.1150 +    return;
 18.1151 +}
 18.1152 +
 18.1153 +int p2m_init(struct domain *d)
 18.1154 +{
 18.1155 +    struct p2m_domain *p2m;
 18.1156 +
 18.1157 +    p2m_get_hostp2m(d) = p2m = xmalloc(struct p2m_domain);
 18.1158 +    if ( p2m == NULL )
 18.1159 +        return -ENOMEM;
 18.1160 +    p2m_initialise(d, p2m);
 18.1161 +
 18.1162      return 0;
 18.1163  }
 18.1164  
 18.1165 -void p2m_change_entry_type_global(struct domain *d,
 18.1166 +void p2m_change_entry_type_global(struct p2m_domain *p2m,
 18.1167                                    p2m_type_t ot, p2m_type_t nt)
 18.1168  {
 18.1169 -    struct p2m_domain *p2m = d->arch.p2m;
 18.1170 -
 18.1171      p2m_lock(p2m);
 18.1172 -    p2m->change_entry_type_global(d, ot, nt);
 18.1173 +    p2m->change_entry_type_global(p2m, ot, nt);
 18.1174      p2m_unlock(p2m);
 18.1175  }
 18.1176  
 18.1177  static
 18.1178 -int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, 
 18.1179 +int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
 18.1180                      unsigned int page_order, p2m_type_t p2mt)
 18.1181  {
 18.1182 +    struct domain *d = p2m->domain;
 18.1183      unsigned long todo = 1ul << page_order;
 18.1184      unsigned int order;
 18.1185      int rc = 1;
 18.1186 @@ -1763,7 +1769,7 @@ int set_p2m_entry(struct domain *d, unsi
 18.1187          else
 18.1188              order = 0;
 18.1189  
 18.1190 -        if ( !d->arch.p2m->set_entry(d, gfn, mfn, order, p2mt) )
 18.1191 +        if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt) )
 18.1192              rc = 0;
 18.1193          gfn += 1ul << order;
 18.1194          if ( mfn_x(mfn) != INVALID_MFN )
 18.1195 @@ -1784,16 +1790,14 @@ int set_p2m_entry(struct domain *d, unsi
 18.1196  //
 18.1197  // Returns 0 for success or -errno.
 18.1198  //
 18.1199 -int p2m_alloc_table(struct domain *d,
 18.1200 -                    struct page_info * (*alloc_page)(struct domain *d),
 18.1201 -                    void (*free_page)(struct domain *d, struct page_info *pg))
 18.1202 -
 18.1203 +int p2m_alloc_table(struct p2m_domain *p2m,
 18.1204 +               struct page_info * (*alloc_page)(struct p2m_domain *p2m),
 18.1205 +               void (*free_page)(struct p2m_domain *p2m, struct page_info *pg))
 18.1206  {
 18.1207      mfn_t mfn = _mfn(INVALID_MFN);
 18.1208      struct page_info *page, *p2m_top;
 18.1209      unsigned int page_count = 0;
 18.1210      unsigned long gfn = -1UL;
 18.1211 -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 18.1212  
 18.1213      p2m_lock(p2m);
 18.1214  
 18.1215 @@ -1809,7 +1813,7 @@ int p2m_alloc_table(struct domain *d,
 18.1216      p2m->alloc_page = alloc_page;
 18.1217      p2m->free_page = free_page;
 18.1218  
 18.1219 -    p2m_top = p2m_alloc_ptp(d,
 18.1220 +    p2m_top = p2m_alloc_ptp(p2m,
 18.1221  #if CONFIG_PAGING_LEVELS == 4
 18.1222          PGT_l4_page_table
 18.1223  #else
 18.1224 @@ -1828,13 +1832,13 @@ int p2m_alloc_table(struct domain *d,
 18.1225      P2M_PRINTK("populating p2m table\n");
 18.1226  
 18.1227      /* Initialise physmap tables for slot zero. Other code assumes this. */
 18.1228 -    if ( !set_p2m_entry(d, 0, _mfn(INVALID_MFN), 0,
 18.1229 +    if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), 0,
 18.1230                          p2m_invalid) )
 18.1231          goto error;
 18.1232  
 18.1233      /* Copy all existing mappings from the page list and m2p */
 18.1234 -    spin_lock(&d->page_alloc_lock);
 18.1235 -    page_list_for_each(page, &d->page_list)
 18.1236 +    spin_lock(&p2m->domain->page_alloc_lock);
 18.1237 +    page_list_for_each(page, &p2m->domain->page_list)
 18.1238      {
 18.1239          mfn = page_to_mfn(page);
 18.1240          gfn = get_gpfn_from_mfn(mfn_x(mfn));
 18.1241 @@ -1848,17 +1852,17 @@ int p2m_alloc_table(struct domain *d,
 18.1242              (gfn != 0x55555555L)
 18.1243  #endif
 18.1244               && gfn != INVALID_M2P_ENTRY
 18.1245 -            && !set_p2m_entry(d, gfn, mfn, 0, p2m_ram_rw) )
 18.1246 +            && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw) )
 18.1247              goto error_unlock;
 18.1248      }
 18.1249 -    spin_unlock(&d->page_alloc_lock);
 18.1250 +    spin_unlock(&p2m->domain->page_alloc_lock);
 18.1251  
 18.1252      P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
 18.1253      p2m_unlock(p2m);
 18.1254      return 0;
 18.1255  
 18.1256  error_unlock:
 18.1257 -    spin_unlock(&d->page_alloc_lock);
 18.1258 +    spin_unlock(&p2m->domain->page_alloc_lock);
 18.1259   error:
 18.1260      P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%"
 18.1261                 PRI_mfn "\n", gfn, mfn_x(mfn));
 18.1262 @@ -1866,12 +1870,11 @@ error_unlock:
 18.1263      return -ENOMEM;
 18.1264  }
 18.1265  
 18.1266 -void p2m_teardown(struct domain *d)
 18.1267 +void p2m_teardown(struct p2m_domain *p2m)
 18.1268  /* Return all the p2m pages to Xen.
 18.1269   * We know we don't have any extra mappings to these pages */
 18.1270  {
 18.1271      struct page_info *pg;
 18.1272 -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 18.1273  #ifdef __x86_64__
 18.1274      unsigned long gfn;
 18.1275      p2m_type_t t;
 18.1276 @@ -1883,27 +1886,28 @@ void p2m_teardown(struct domain *d)
 18.1277  #ifdef __x86_64__
 18.1278      for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
 18.1279      {
 18.1280 -        mfn = p2m->get_entry(d, gfn, &t, p2m_query);
 18.1281 +        mfn = p2m->get_entry(p2m, gfn, &t, p2m_query);
 18.1282          if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
 18.1283 -            BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
 18.1284 +            BUG_ON(mem_sharing_unshare_page(p2m, gfn, MEM_SHARING_DESTROY_GFN));
 18.1285      }
 18.1286  #endif
 18.1287  
 18.1288      p2m->phys_table = pagetable_null();
 18.1289  
 18.1290      while ( (pg = page_list_remove_head(&p2m->pages)) )
 18.1291 -        p2m->free_page(d, pg);
 18.1292 +        p2m->free_page(p2m, pg);
 18.1293      p2m_unlock(p2m);
 18.1294  }
 18.1295  
 18.1296  void p2m_final_teardown(struct domain *d)
 18.1297  {
 18.1298 +    /* Iterate over all p2m tables per domain */
 18.1299      xfree(d->arch.p2m);
 18.1300      d->arch.p2m = NULL;
 18.1301  }
 18.1302  
 18.1303  #if P2M_AUDIT
 18.1304 -static void audit_p2m(struct domain *d)
 18.1305 +static void audit_p2m(struct p2m_domain *p2m)
 18.1306  {
 18.1307      struct page_info *page;
 18.1308      struct domain *od;
 18.1309 @@ -1913,6 +1917,7 @@ static void audit_p2m(struct domain *d)
 18.1310      unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0;
 18.1311      int test_linear;
 18.1312      p2m_type_t type;
 18.1313 +    struct domain *d = p2m->domain;
 18.1314  
 18.1315      if ( !paging_mode_translate(d) )
 18.1316          return;
 18.1317 @@ -1967,7 +1972,7 @@ static void audit_p2m(struct domain *d)
 18.1318              continue;
 18.1319          }
 18.1320  
 18.1321 -        p2mfn = gfn_to_mfn_type_foreign(d, gfn, &type, p2m_query);
 18.1322 +        p2mfn = gfn_to_mfn_type_p2m(p2m, gfn, &type, p2m_query);
 18.1323          if ( mfn_x(p2mfn) != mfn )
 18.1324          {
 18.1325              mpbad++;
 18.1326 @@ -1983,9 +1988,9 @@ static void audit_p2m(struct domain *d)
 18.1327              set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
 18.1328          }
 18.1329  
 18.1330 -        if ( test_linear && (gfn <= d->arch.p2m->max_mapped_pfn) )
 18.1331 +        if ( test_linear && (gfn <= p2m->max_mapped_pfn) )
 18.1332          {
 18.1333 -            lp2mfn = mfn_x(gfn_to_mfn_query(d, gfn, &type));
 18.1334 +            lp2mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &type));
 18.1335              if ( lp2mfn != mfn_x(p2mfn) )
 18.1336              {
 18.1337                  P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
 18.1338 @@ -2000,7 +2005,7 @@ static void audit_p2m(struct domain *d)
 18.1339      spin_unlock(&d->page_alloc_lock);
 18.1340  
 18.1341      /* Audit part two: walk the domain's p2m table, checking the entries. */
 18.1342 -    if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)) != 0 )
 18.1343 +    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
 18.1344      {
 18.1345          l2_pgentry_t *l2e;
 18.1346          l1_pgentry_t *l1e;
 18.1347 @@ -2009,12 +2014,12 @@ static void audit_p2m(struct domain *d)
 18.1348  #if CONFIG_PAGING_LEVELS == 4
 18.1349          l4_pgentry_t *l4e;
 18.1350          l3_pgentry_t *l3e;
 18.1351 -        int i3, i4;
 18.1352 -        l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
 18.1353 +        int i4, i3;
 18.1354 +        l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 18.1355  #else /* CONFIG_PAGING_LEVELS == 3 */
 18.1356          l3_pgentry_t *l3e;
 18.1357          int i3;
 18.1358 -        l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
 18.1359 +        l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 18.1360  #endif
 18.1361  
 18.1362          gfn = 0;
 18.1363 @@ -2144,11 +2149,11 @@ static void audit_p2m(struct domain *d)
 18.1364  
 18.1365      }
 18.1366  
 18.1367 -    if ( entry_count != d->arch.p2m->pod.entry_count )
 18.1368 +    if ( entry_count != p2m->pod.entry_count )
 18.1369      {
 18.1370          printk("%s: refcounted entry count %d, audit count %d!\n",
 18.1371                 __func__,
 18.1372 -               d->arch.p2m->pod.entry_count,
 18.1373 +               p2m->pod.entry_count,
 18.1374                 entry_count);
 18.1375          BUG();
 18.1376      }
 18.1377 @@ -2166,18 +2171,18 @@ static void audit_p2m(struct domain *d)
 18.1378  
 18.1379  
 18.1380  static void
 18.1381 -p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn,
 18.1382 +p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
 18.1383                  unsigned int page_order)
 18.1384  {
 18.1385      unsigned long i;
 18.1386      mfn_t mfn_return;
 18.1387      p2m_type_t t;
 18.1388  
 18.1389 -    if ( !paging_mode_translate(d) )
 18.1390 +    if ( !paging_mode_translate(p2m->domain) )
 18.1391      {
 18.1392 -        if ( need_iommu(d) )
 18.1393 +        if ( need_iommu(p2m->domain) )
 18.1394              for ( i = 0; i < (1 << page_order); i++ )
 18.1395 -                iommu_unmap_page(d, mfn + i);
 18.1396 +                iommu_unmap_page(p2m->domain, mfn + i);
 18.1397          return;
 18.1398      }
 18.1399  
 18.1400 @@ -2185,23 +2190,23 @@ p2m_remove_page(struct domain *d, unsign
 18.1401  
 18.1402      for ( i = 0; i < (1UL << page_order); i++ )
 18.1403      {
 18.1404 -        mfn_return = d->arch.p2m->get_entry(d, gfn + i, &t, p2m_query);
 18.1405 +        mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query);
 18.1406          if ( !p2m_is_grant(t) )
 18.1407              set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
 18.1408          ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
 18.1409      }
 18.1410 -    set_p2m_entry(d, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
 18.1411 +    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
 18.1412  }
 18.1413  
 18.1414  void
 18.1415 -guest_physmap_remove_page(struct domain *d, unsigned long gfn,
 18.1416 +guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
 18.1417                            unsigned long mfn, unsigned int page_order)
 18.1418  {
 18.1419 -    p2m_lock(d->arch.p2m);
 18.1420 -    audit_p2m(d);
 18.1421 -    p2m_remove_page(d, gfn, mfn, page_order);
 18.1422 -    audit_p2m(d);
 18.1423 -    p2m_unlock(d->arch.p2m);
 18.1424 +    p2m_lock(p2m);
 18.1425 +    audit_p2m(p2m);
 18.1426 +    p2m_remove_page(p2m, gfn, mfn, page_order);
 18.1427 +    audit_p2m(p2m);
 18.1428 +    p2m_unlock(p2m);
 18.1429  }
 18.1430  
 18.1431  #if CONFIG_PAGING_LEVELS == 3
 18.1432 @@ -2232,7 +2237,7 @@ int
 18.1433  guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
 18.1434                                        unsigned int order)
 18.1435  {
 18.1436 -    struct p2m_domain *p2md = d->arch.p2m;
 18.1437 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 18.1438      unsigned long i;
 18.1439      p2m_type_t ot;
 18.1440      mfn_t omfn;
 18.1441 @@ -2245,15 +2250,15 @@ guest_physmap_mark_populate_on_demand(st
 18.1442      if ( rc != 0 )
 18.1443          return rc;
 18.1444  
 18.1445 -    p2m_lock(p2md);
 18.1446 -    audit_p2m(d);
 18.1447 +    p2m_lock(p2m);
 18.1448 +    audit_p2m(p2m);
 18.1449  
 18.1450      P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
 18.1451  
 18.1452      /* Make sure all gpfns are unused */
 18.1453      for ( i = 0; i < (1UL << order); i++ )
 18.1454      {
 18.1455 -        omfn = gfn_to_mfn_query(d, gfn + i, &ot);
 18.1456 +        omfn = gfn_to_mfn_query(p2m, gfn + i, &ot);
 18.1457          if ( p2m_is_ram(ot) )
 18.1458          {
 18.1459              printk("%s: gfn_to_mfn returned type %d!\n",
 18.1460 @@ -2269,29 +2274,29 @@ guest_physmap_mark_populate_on_demand(st
 18.1461      }
 18.1462  
 18.1463      /* Now, actually do the two-way mapping */
 18.1464 -    if ( !set_p2m_entry(d, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order,
 18.1465 +    if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order,
 18.1466                          p2m_populate_on_demand) )
 18.1467          rc = -EINVAL;
 18.1468      else
 18.1469      {
 18.1470 -        p2md->pod.entry_count += 1 << order; /* Lock: p2m */
 18.1471 -        p2md->pod.entry_count -= pod_count;
 18.1472 -        BUG_ON(p2md->pod.entry_count < 0);
 18.1473 +        p2m->pod.entry_count += 1 << order; /* Lock: p2m */
 18.1474 +        p2m->pod.entry_count -= pod_count;
 18.1475 +        BUG_ON(p2m->pod.entry_count < 0);
 18.1476      }
 18.1477  
 18.1478 -    audit_p2m(d);
 18.1479 -    p2m_unlock(p2md);
 18.1480 +    audit_p2m(p2m);
 18.1481 +    p2m_unlock(p2m);
 18.1482  
 18.1483  out:
 18.1484      return rc;
 18.1485 -
 18.1486  }
 18.1487  
 18.1488  int
 18.1489 -guest_physmap_add_entry(struct domain *d, unsigned long gfn,
 18.1490 +guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
 18.1491                          unsigned long mfn, unsigned int page_order, 
 18.1492                          p2m_type_t t)
 18.1493  {
 18.1494 +    struct domain *d = p2m->domain;
 18.1495      unsigned long i, ogfn;
 18.1496      p2m_type_t ot;
 18.1497      mfn_t omfn;
 18.1498 @@ -2321,20 +2326,20 @@ guest_physmap_add_entry(struct domain *d
 18.1499      if ( rc != 0 )
 18.1500          return rc;
 18.1501  
 18.1502 -    p2m_lock(d->arch.p2m);
 18.1503 -    audit_p2m(d);
 18.1504 +    p2m_lock(p2m);
 18.1505 +    audit_p2m(p2m);
 18.1506  
 18.1507      P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn);
 18.1508  
 18.1509      /* First, remove m->p mappings for existing p->m mappings */
 18.1510      for ( i = 0; i < (1UL << page_order); i++ )
 18.1511      {
 18.1512 -        omfn = gfn_to_mfn_query(d, gfn + i, &ot);
 18.1513 +        omfn = gfn_to_mfn_query(p2m, gfn + i, &ot);
 18.1514          if ( p2m_is_grant(ot) )
 18.1515          {
 18.1516              /* Really shouldn't be unmapping grant maps this way */
 18.1517              domain_crash(d);
 18.1518 -            p2m_unlock(d->arch.p2m);
 18.1519 +            p2m_unlock(p2m);
 18.1520              return -EINVAL;
 18.1521          }
 18.1522          else if ( p2m_is_ram(ot) )
 18.1523 @@ -2368,7 +2373,7 @@ guest_physmap_add_entry(struct domain *d
 18.1524               * address */
 18.1525              P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
 18.1526                        mfn + i, ogfn, gfn + i);
 18.1527 -            omfn = gfn_to_mfn_query(d, ogfn, &ot);
 18.1528 +            omfn = gfn_to_mfn_query(p2m, ogfn, &ot);
 18.1529              /* If we get here, we know the local domain owns the page,
 18.1530                 so it can't have been grant mapped in. */
 18.1531              BUG_ON( p2m_is_grant(ot) );
 18.1532 @@ -2378,7 +2383,7 @@ guest_physmap_add_entry(struct domain *d
 18.1533                  P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
 18.1534                            ogfn , mfn_x(omfn));
 18.1535                  if ( mfn_x(omfn) == (mfn + i) )
 18.1536 -                    p2m_remove_page(d, ogfn, mfn + i, 0);
 18.1537 +                    p2m_remove_page(p2m, ogfn, mfn + i, 0);
 18.1538              }
 18.1539          }
 18.1540      }
 18.1541 @@ -2386,7 +2391,7 @@ guest_physmap_add_entry(struct domain *d
 18.1542      /* Now, actually do the two-way mapping */
 18.1543      if ( mfn_valid(_mfn(mfn)) ) 
 18.1544      {
 18.1545 -        if ( !set_p2m_entry(d, gfn, _mfn(mfn), page_order, t) )
 18.1546 +        if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t) )
 18.1547              rc = -EINVAL;
 18.1548          if ( !p2m_is_grant(t) )
 18.1549          {
 18.1550 @@ -2398,18 +2403,18 @@ guest_physmap_add_entry(struct domain *d
 18.1551      {
 18.1552          gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
 18.1553                   gfn, mfn);
 18.1554 -        if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), page_order, 
 18.1555 +        if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, 
 18.1556                              p2m_invalid) )
 18.1557              rc = -EINVAL;
 18.1558          else
 18.1559          {
 18.1560 -            d->arch.p2m->pod.entry_count -= pod_count; /* Lock: p2m */
 18.1561 -            BUG_ON(d->arch.p2m->pod.entry_count < 0);
 18.1562 +            p2m->pod.entry_count -= pod_count; /* Lock: p2m */
 18.1563 +            BUG_ON(p2m->pod.entry_count < 0);
 18.1564          }
 18.1565      }
 18.1566  
 18.1567 -    audit_p2m(d);
 18.1568 -    p2m_unlock(d->arch.p2m);
 18.1569 +    audit_p2m(p2m);
 18.1570 +    p2m_unlock(p2m);
 18.1571  
 18.1572      return rc;
 18.1573  }
 18.1574 @@ -2417,7 +2422,7 @@ guest_physmap_add_entry(struct domain *d
 18.1575  /* Walk the whole p2m table, changing any entries of the old type
 18.1576   * to the new type.  This is used in hardware-assisted paging to 
 18.1577   * quickly enable or diable log-dirty tracking */
 18.1578 -void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt)
 18.1579 +void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt)
 18.1580  {
 18.1581      unsigned long mfn, gfn, flags;
 18.1582      l1_pgentry_t l1e_content;
 18.1583 @@ -2430,17 +2435,16 @@ void p2m_change_type_global(struct domai
 18.1584      l4_pgentry_t *l4e;
 18.1585      unsigned long i4;
 18.1586  #endif /* CONFIG_PAGING_LEVELS == 4 */
 18.1587 -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 18.1588  
 18.1589      BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
 18.1590  
 18.1591 -    if ( !paging_mode_translate(d) )
 18.1592 +    if ( !paging_mode_translate(p2m->domain) )
 18.1593          return;
 18.1594  
 18.1595      if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
 18.1596          return;
 18.1597  
 18.1598 -    ASSERT(p2m_locked_by_me(d->arch.p2m));
 18.1599 +    ASSERT(p2m_locked_by_me(p2m));
 18.1600  
 18.1601  #if CONFIG_PAGING_LEVELS == 4
 18.1602      l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 18.1603 @@ -2476,7 +2480,8 @@ void p2m_change_type_global(struct domai
 18.1604                  gfn = get_gpfn_from_mfn(mfn);
 18.1605                  flags = p2m_type_to_flags(nt);
 18.1606                  l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE);
 18.1607 -                paging_write_p2m_entry(d, gfn, (l1_pgentry_t *)&l3e[i3],
 18.1608 +                paging_write_p2m_entry(p2m->domain, gfn,
 18.1609 +                                       (l1_pgentry_t *)&l3e[i3],
 18.1610                                         l3mfn, l1e_content, 3);
 18.1611                  continue;
 18.1612              }
 18.1613 @@ -2506,7 +2511,8 @@ void p2m_change_type_global(struct domai
 18.1614                             * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; 
 18.1615                      flags = p2m_type_to_flags(nt);
 18.1616                      l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE);
 18.1617 -                    paging_write_p2m_entry(d, gfn, (l1_pgentry_t *)&l2e[i2],
 18.1618 +                    paging_write_p2m_entry(p2m->domain, gfn,
 18.1619 +                                           (l1_pgentry_t *)&l2e[i2],
 18.1620                                             l2mfn, l1e_content, 2);
 18.1621                      continue;
 18.1622                  }
 18.1623 @@ -2529,7 +2535,7 @@ void p2m_change_type_global(struct domai
 18.1624                      /* create a new 1le entry with the new type */
 18.1625                      flags = p2m_type_to_flags(nt);
 18.1626                      l1e_content = l1e_from_pfn(mfn, flags);
 18.1627 -                    paging_write_p2m_entry(d, gfn, &l1e[i1],
 18.1628 +                    paging_write_p2m_entry(p2m->domain, gfn, &l1e[i1],
 18.1629                                             l1mfn, l1e_content, 1);
 18.1630                  }
 18.1631                  unmap_domain_page(l1e);
 18.1632 @@ -2551,7 +2557,7 @@ void p2m_change_type_global(struct domai
 18.1633  
 18.1634  /* Modify the p2m type of a single gfn from ot to nt, returning the 
 18.1635   * entry's previous type */
 18.1636 -p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, 
 18.1637 +p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn, 
 18.1638                             p2m_type_t ot, p2m_type_t nt)
 18.1639  {
 18.1640      p2m_type_t pt;
 18.1641 @@ -2559,31 +2565,31 @@ p2m_type_t p2m_change_type(struct domain
 18.1642  
 18.1643      BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
 18.1644  
 18.1645 -    p2m_lock(d->arch.p2m);
 18.1646 -
 18.1647 -    mfn = gfn_to_mfn_query(d, gfn, &pt);
 18.1648 +    p2m_lock(p2m);
 18.1649 +
 18.1650 +    mfn = gfn_to_mfn_query(p2m, gfn, &pt);
 18.1651      if ( pt == ot )
 18.1652 -        set_p2m_entry(d, gfn, mfn, 0, nt);
 18.1653 -
 18.1654 -    p2m_unlock(d->arch.p2m);
 18.1655 +        set_p2m_entry(p2m, gfn, mfn, 0, nt);
 18.1656 +
 18.1657 +    p2m_unlock(p2m);
 18.1658  
 18.1659      return pt;
 18.1660  }
 18.1661  
 18.1662  int
 18.1663 -set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 18.1664 +set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn)
 18.1665  {
 18.1666      int rc = 0;
 18.1667      p2m_type_t ot;
 18.1668      mfn_t omfn;
 18.1669  
 18.1670 -    if ( !paging_mode_translate(d) )
 18.1671 +    if ( !paging_mode_translate(p2m->domain) )
 18.1672          return 0;
 18.1673  
 18.1674 -    omfn = gfn_to_mfn_query(d, gfn, &ot);
 18.1675 +    omfn = gfn_to_mfn_query(p2m, gfn, &ot);
 18.1676      if ( p2m_is_grant(ot) )
 18.1677      {
 18.1678 -        domain_crash(d);
 18.1679 +        domain_crash(p2m->domain);
 18.1680          return 0;
 18.1681      }
 18.1682      else if ( p2m_is_ram(ot) )
 18.1683 @@ -2593,51 +2599,51 @@ set_mmio_p2m_entry(struct domain *d, uns
 18.1684      }
 18.1685  
 18.1686      P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
 18.1687 -    p2m_lock(d->arch.p2m);
 18.1688 -    rc = set_p2m_entry(d, gfn, mfn, 0, p2m_mmio_direct);
 18.1689 -    p2m_unlock(d->arch.p2m);
 18.1690 +    p2m_lock(p2m);
 18.1691 +    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct);
 18.1692 +    p2m_unlock(p2m);
 18.1693      if ( 0 == rc )
 18.1694          gdprintk(XENLOG_ERR,
 18.1695              "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
 18.1696 -            gmfn_to_mfn(d, gfn));
 18.1697 +            mfn_x(gfn_to_mfn(p2m, gfn, &ot)));
 18.1698      return rc;
 18.1699  }
 18.1700  
 18.1701  int
 18.1702 -clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
 18.1703 +clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn)
 18.1704  {
 18.1705      int rc = 0;
 18.1706 -    unsigned long mfn;
 18.1707 -
 18.1708 -    if ( !paging_mode_translate(d) )
 18.1709 +    mfn_t mfn;
 18.1710 +    p2m_type_t t;
 18.1711 +
 18.1712 +    if ( !paging_mode_translate(p2m->domain) )
 18.1713          return 0;
 18.1714  
 18.1715 -    mfn = gmfn_to_mfn(d, gfn);
 18.1716 -    if ( INVALID_MFN == mfn )
 18.1717 +    mfn = gfn_to_mfn(p2m, gfn, &t);
 18.1718 +    if ( !mfn_valid(mfn) )
 18.1719      {
 18.1720          gdprintk(XENLOG_ERR,
 18.1721              "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
 18.1722          return 0;
 18.1723      }
 18.1724 -    p2m_lock(d->arch.p2m);
 18.1725 -    rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0, 0);
 18.1726 -    p2m_unlock(d->arch.p2m);
 18.1727 +    p2m_lock(p2m);
 18.1728 +    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0);
 18.1729 +    p2m_unlock(p2m);
 18.1730  
 18.1731      return rc;
 18.1732  }
 18.1733  
 18.1734 -#ifdef __x86_64__
 18.1735  int
 18.1736 -set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 18.1737 +set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn)
 18.1738  {
 18.1739      int rc = 0;
 18.1740      p2m_type_t ot;
 18.1741      mfn_t omfn;
 18.1742  
 18.1743 -    if ( !paging_mode_translate(d) )
 18.1744 +    if ( !paging_mode_translate(p2m->domain) )
 18.1745          return 0;
 18.1746  
 18.1747 -    omfn = gfn_to_mfn_query(d, gfn, &ot);
 18.1748 +    omfn = gfn_to_mfn_query(p2m, gfn, &ot);
 18.1749      /* At the moment we only allow p2m change if gfn has already been made
 18.1750       * sharable first */
 18.1751      ASSERT(p2m_is_shared(ot));
 18.1752 @@ -2646,22 +2652,23 @@ set_shared_p2m_entry(struct domain *d, u
 18.1753      set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
 18.1754  
 18.1755      P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
 18.1756 -    rc = set_p2m_entry(d, gfn, mfn, 0, p2m_ram_shared);
 18.1757 +    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared);
 18.1758      if ( 0 == rc )
 18.1759          gdprintk(XENLOG_ERR,
 18.1760              "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
 18.1761 -            gmfn_to_mfn(d, gfn));
 18.1762 +            gmfn_to_mfn(p2m->domain, gfn));
 18.1763      return rc;
 18.1764  }
 18.1765  
 18.1766 -int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn)
 18.1767 +#ifdef __x86_64__
 18.1768 +int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn)
 18.1769  {
 18.1770      struct page_info *page;
 18.1771      p2m_type_t p2mt;
 18.1772      mfn_t mfn;
 18.1773      int ret;
 18.1774  
 18.1775 -    mfn = gfn_to_mfn(d, gfn, &p2mt);
 18.1776 +    mfn = gfn_to_mfn(p2m, gfn, &p2mt);
 18.1777  
 18.1778      /* Check if mfn is valid */
 18.1779      ret = -EINVAL;
 18.1780 @@ -2687,9 +2694,9 @@ int p2m_mem_paging_nominate(struct domai
 18.1781          goto out;
 18.1782  
 18.1783      /* Fix p2m entry */
 18.1784 -    p2m_lock(d->arch.p2m);
 18.1785 -    set_p2m_entry(d, gfn, mfn, 0, p2m_ram_paging_out);
 18.1786 -    p2m_unlock(d->arch.p2m);
 18.1787 +    p2m_lock(p2m);
 18.1788 +    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out);
 18.1789 +    p2m_unlock(p2m);
 18.1790  
 18.1791      ret = 0;
 18.1792  
 18.1793 @@ -2697,14 +2704,15 @@ int p2m_mem_paging_nominate(struct domai
 18.1794      return ret;
 18.1795  }
 18.1796  
 18.1797 -int p2m_mem_paging_evict(struct domain *d, unsigned long gfn)
 18.1798 +int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn)
 18.1799  {
 18.1800      struct page_info *page;
 18.1801      p2m_type_t p2mt;
 18.1802      mfn_t mfn;
 18.1803 +    struct domain *d = p2m->domain;
 18.1804  
 18.1805      /* Get mfn */
 18.1806 -    mfn = gfn_to_mfn(d, gfn, &p2mt);
 18.1807 +    mfn = gfn_to_mfn(p2m, gfn, &p2mt);
 18.1808      if ( unlikely(!mfn_valid(mfn)) )
 18.1809          return -EINVAL;
 18.1810  
 18.1811 @@ -2722,9 +2730,9 @@ int p2m_mem_paging_evict(struct domain *
 18.1812          put_page(page);
 18.1813  
 18.1814      /* Remove mapping from p2m table */
 18.1815 -    p2m_lock(d->arch.p2m);
 18.1816 -    set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
 18.1817 -    p2m_unlock(d->arch.p2m);
 18.1818 +    p2m_lock(p2m);
 18.1819 +    set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
 18.1820 +    p2m_unlock(p2m);
 18.1821  
 18.1822      /* Put the page back so it gets freed */
 18.1823      put_page(page);
 18.1824 @@ -2732,11 +2740,12 @@ int p2m_mem_paging_evict(struct domain *
 18.1825      return 0;
 18.1826  }
 18.1827  
 18.1828 -void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
 18.1829 +void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
 18.1830  {
 18.1831      struct vcpu *v = current;
 18.1832      mem_event_request_t req;
 18.1833      p2m_type_t p2mt;
 18.1834 +    struct domain *d = p2m->domain;
 18.1835  
 18.1836      memset(&req, 0, sizeof(req));
 18.1837  
 18.1838 @@ -2747,12 +2756,12 @@ void p2m_mem_paging_populate(struct doma
 18.1839      /* Fix p2m mapping */
 18.1840      /* XXX: It seems inefficient to have this here, as it's only needed
 18.1841       *      in one case (ept guest accessing paging out page) */
 18.1842 -    gfn_to_mfn(d, gfn, &p2mt);
 18.1843 +    gfn_to_mfn(p2m, gfn, &p2mt);
 18.1844      if ( p2mt != p2m_ram_paging_out )
 18.1845      {
 18.1846 -        p2m_lock(d->arch.p2m);
 18.1847 -        set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
 18.1848 -        p2m_unlock(d->arch.p2m);
 18.1849 +        p2m_lock(p2m);
 18.1850 +        set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
 18.1851 +        p2m_unlock(p2m);
 18.1852      }
 18.1853  
 18.1854      /* Pause domain */
 18.1855 @@ -2770,25 +2779,26 @@ void p2m_mem_paging_populate(struct doma
 18.1856      mem_event_put_request(d, &req);
 18.1857  }
 18.1858  
 18.1859 -int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
 18.1860 +int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn)
 18.1861  {
 18.1862      struct page_info *page;
 18.1863  
 18.1864      /* Get a free page */
 18.1865 -    page = alloc_domheap_page(d, 0);
 18.1866 +    page = alloc_domheap_page(p2m->domain, 0);
 18.1867      if ( unlikely(page == NULL) )
 18.1868          return -EINVAL;
 18.1869  
 18.1870      /* Fix p2m mapping */
 18.1871 -    p2m_lock(d->arch.p2m);
 18.1872 -    set_p2m_entry(d, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
 18.1873 -    p2m_unlock(d->arch.p2m);
 18.1874 +    p2m_lock(p2m);
 18.1875 +    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
 18.1876 +    p2m_unlock(p2m);
 18.1877  
 18.1878      return 0;
 18.1879  }
 18.1880  
 18.1881 -void p2m_mem_paging_resume(struct domain *d)
 18.1882 +void p2m_mem_paging_resume(struct p2m_domain *p2m)
 18.1883  {
 18.1884 +    struct domain *d = p2m->domain;
 18.1885      mem_event_response_t rsp;
 18.1886      p2m_type_t p2mt;
 18.1887      mfn_t mfn;
 18.1888 @@ -2797,10 +2807,10 @@ void p2m_mem_paging_resume(struct domain
 18.1889      mem_event_get_response(d, &rsp);
 18.1890  
 18.1891      /* Fix p2m entry */
 18.1892 -    mfn = gfn_to_mfn(d, rsp.gfn, &p2mt);
 18.1893 -    p2m_lock(d->arch.p2m);
 18.1894 -    set_p2m_entry(d, rsp.gfn, mfn, 0, p2m_ram_rw);
 18.1895 -    p2m_unlock(d->arch.p2m);
 18.1896 +    mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
 18.1897 +    p2m_lock(p2m);
 18.1898 +    set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw);
 18.1899 +    p2m_unlock(p2m);
 18.1900  
 18.1901      /* Unpause domain */
 18.1902      if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
    19.1 --- a/xen/arch/x86/mm/shadow/common.c	Mon Aug 09 16:40:18 2010 +0100
    19.2 +++ b/xen/arch/x86/mm/shadow/common.c	Mon Aug 09 16:46:42 2010 +0100
    19.3 @@ -1714,8 +1714,9 @@ sh_alloc_p2m_pages(struct domain *d)
    19.4  
    19.5  // Returns 0 if no memory is available...
    19.6  static struct page_info *
    19.7 -shadow_alloc_p2m_page(struct domain *d)
    19.8 +shadow_alloc_p2m_page(struct p2m_domain *p2m)
    19.9  {
   19.10 +    struct domain *d = p2m->domain;
   19.11      struct page_info *pg;
   19.12      mfn_t mfn;
   19.13      void *p;
   19.14 @@ -1741,8 +1742,9 @@ shadow_alloc_p2m_page(struct domain *d)
   19.15  }
   19.16  
   19.17  static void
   19.18 -shadow_free_p2m_page(struct domain *d, struct page_info *pg)
   19.19 +shadow_free_p2m_page(struct p2m_domain *p2m, struct page_info *pg)
   19.20  {
   19.21 +    struct domain *d = p2m->domain;
   19.22      ASSERT(page_get_owner(pg) == d);
   19.23      /* Should have just the one ref we gave it in alloc_p2m_page() */
   19.24      if ( (pg->count_info & PGC_count_mask) != 1 )
   19.25 @@ -3100,6 +3102,7 @@ int shadow_enable(struct domain *d, u32 
   19.26      struct page_info *pg = NULL;
   19.27      uint32_t *e;
   19.28      int i, rv = 0;
   19.29 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
   19.30  
   19.31      mode |= PG_SH_enable;
   19.32  
   19.33 @@ -3135,7 +3138,8 @@ int shadow_enable(struct domain *d, u32 
   19.34       * to avoid possible deadlock. */
   19.35      if ( mode & PG_translate )
   19.36      {
   19.37 -        rv = p2m_alloc_table(d, shadow_alloc_p2m_page, shadow_free_p2m_page);
   19.38 +        rv = p2m_alloc_table(p2m,
   19.39 +            shadow_alloc_p2m_page, shadow_free_p2m_page);
   19.40          if (rv != 0)
   19.41              goto out_unlocked;
   19.42      }
   19.43 @@ -3146,7 +3150,7 @@ int shadow_enable(struct domain *d, u32 
   19.44      {
   19.45          /* Get a single page from the shadow pool.  Take it via the 
   19.46           * P2M interface to make freeing it simpler afterwards. */
   19.47 -        pg = shadow_alloc_p2m_page(d);
   19.48 +        pg = shadow_alloc_p2m_page(p2m);
   19.49          if ( pg == NULL )
   19.50          {
   19.51              rv = -ENOMEM;
   19.52 @@ -3195,10 +3199,10 @@ int shadow_enable(struct domain *d, u32 
   19.53   out_locked:
   19.54      shadow_unlock(d);
   19.55   out_unlocked:
   19.56 -    if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m_get_hostp2m(d))) )
   19.57 -        p2m_teardown(d);
   19.58 +    if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
   19.59 +        p2m_teardown(p2m);
   19.60      if ( rv != 0 && pg != NULL )
   19.61 -        shadow_free_p2m_page(d, pg);
   19.62 +        shadow_free_p2m_page(p2m, pg);
   19.63      domain_unpause(d);
   19.64      return rv;
   19.65  }
   19.66 @@ -3210,6 +3214,7 @@ void shadow_teardown(struct domain *d)
   19.67      struct vcpu *v;
   19.68      mfn_t mfn;
   19.69      struct page_info *pg;
   19.70 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
   19.71  
   19.72      ASSERT(d->is_dying);
   19.73      ASSERT(d != current->domain);
   19.74 @@ -3264,7 +3269,7 @@ void shadow_teardown(struct domain *d)
   19.75  #endif /* (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) */
   19.76  
   19.77      while ( (pg = page_list_remove_head(&d->arch.paging.shadow.p2m_freelist)) )
   19.78 -        shadow_free_p2m_page(d, pg);
   19.79 +        shadow_free_p2m_page(p2m, pg);
   19.80  
   19.81      if ( d->arch.paging.shadow.total_pages != 0 )
   19.82      {
   19.83 @@ -3298,7 +3303,7 @@ void shadow_teardown(struct domain *d)
   19.84              if ( !hvm_paging_enabled(v) )
   19.85                  v->arch.guest_table = pagetable_null();
   19.86          }
   19.87 -        shadow_free_p2m_page(d, 
   19.88 +        shadow_free_p2m_page(p2m, 
   19.89              pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable));
   19.90          d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
   19.91      }
   19.92 @@ -3335,7 +3340,7 @@ void shadow_final_teardown(struct domain
   19.93          shadow_teardown(d);
   19.94  
   19.95      /* It is now safe to pull down the p2m map. */
   19.96 -    p2m_teardown(d);
   19.97 +    p2m_teardown(p2m_get_hostp2m(d));
   19.98  
   19.99      SHADOW_PRINTK("dom %u final teardown done."
  19.100                     "  Shadow pages total = %u, free = %u, p2m=%u\n",
  19.101 @@ -3657,10 +3662,11 @@ int shadow_track_dirty_vram(struct domai
  19.102      unsigned long i;
  19.103      p2m_type_t t;
  19.104      struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
  19.105 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
  19.106  
  19.107      if (end_pfn < begin_pfn
  19.108 -            || begin_pfn > d->arch.p2m->max_mapped_pfn
  19.109 -            || end_pfn >= d->arch.p2m->max_mapped_pfn)
  19.110 +            || begin_pfn > p2m->max_mapped_pfn
  19.111 +            || end_pfn >= p2m->max_mapped_pfn)
  19.112          return -EINVAL;
  19.113  
  19.114      shadow_lock(d);
  19.115 @@ -3729,7 +3735,7 @@ int shadow_track_dirty_vram(struct domai
  19.116  
  19.117          /* Iterate over VRAM to track dirty bits. */
  19.118          for ( i = 0; i < nr; i++ ) {
  19.119 -            mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t);
  19.120 +            mfn_t mfn = gfn_to_mfn(p2m, begin_pfn + i, &t);
  19.121              struct page_info *page;
  19.122              int dirty = 0;
  19.123              paddr_t sl1ma = dirty_vram->sl1ma[i];
  19.124 @@ -3814,7 +3820,7 @@ int shadow_track_dirty_vram(struct domai
  19.125                  /* was clean for more than two seconds, try to disable guest
  19.126                   * write access */
  19.127                  for ( i = begin_pfn; i < end_pfn; i++ ) {
  19.128 -                    mfn_t mfn = gfn_to_mfn(d, i, &t);
  19.129 +                    mfn_t mfn = gfn_to_mfn(p2m, i, &t);
  19.130                      if (mfn_x(mfn) != INVALID_MFN)
  19.131                          flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 1, 0);
  19.132                  }
    20.1 --- a/xen/arch/x86/mm/shadow/multi.c	Mon Aug 09 16:40:18 2010 +0100
    20.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Mon Aug 09 16:46:42 2010 +0100
    20.3 @@ -167,7 +167,7 @@ static inline uint32_t
    20.4  sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw, 
    20.5                       uint32_t pfec)
    20.6  {
    20.7 -    return guest_walk_tables(v, va, gw, pfec, 
    20.8 +    return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec, 
    20.9  #if GUEST_PAGING_LEVELS == 3 /* PAE */
   20.10                               _mfn(INVALID_MFN),
   20.11                               v->arch.paging.shadow.gl3e
   20.12 @@ -2240,6 +2240,7 @@ static int validate_gl4e(struct vcpu *v,
   20.13      shadow_l4e_t *sl4p = se;
   20.14      mfn_t sl3mfn = _mfn(INVALID_MFN);
   20.15      struct domain *d = v->domain;
   20.16 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
   20.17      p2m_type_t p2mt;
   20.18      int result = 0;
   20.19  
   20.20 @@ -2248,7 +2249,7 @@ static int validate_gl4e(struct vcpu *v,
   20.21      if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT )
   20.22      {
   20.23          gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
   20.24 -        mfn_t gl3mfn = gfn_to_mfn_query(d, gl3gfn, &p2mt);
   20.25 +        mfn_t gl3mfn = gfn_to_mfn_query(p2m, gl3gfn, &p2mt);
   20.26          if ( p2m_is_ram(p2mt) )
   20.27              sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
   20.28          else if ( p2mt != p2m_populate_on_demand )
   20.29 @@ -2299,13 +2300,14 @@ static int validate_gl3e(struct vcpu *v,
   20.30      mfn_t sl2mfn = _mfn(INVALID_MFN);
   20.31      p2m_type_t p2mt;
   20.32      int result = 0;
   20.33 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
   20.34  
   20.35      perfc_incr(shadow_validate_gl3e_calls);
   20.36  
   20.37      if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT )
   20.38      {
   20.39          gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
   20.40 -        mfn_t gl2mfn = gfn_to_mfn_query(v->domain, gl2gfn, &p2mt);
   20.41 +        mfn_t gl2mfn = gfn_to_mfn_query(p2m, gl2gfn, &p2mt);
   20.42          if ( p2m_is_ram(p2mt) )
   20.43              sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
   20.44          else if ( p2mt != p2m_populate_on_demand )
   20.45 @@ -2329,6 +2331,7 @@ static int validate_gl2e(struct vcpu *v,
   20.46      guest_l2e_t new_gl2e = *(guest_l2e_t *)new_ge;
   20.47      shadow_l2e_t *sl2p = se;
   20.48      mfn_t sl1mfn = _mfn(INVALID_MFN);
   20.49 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
   20.50      p2m_type_t p2mt;
   20.51      int result = 0;
   20.52  
   20.53 @@ -2354,7 +2357,7 @@ static int validate_gl2e(struct vcpu *v,
   20.54          }
   20.55          else
   20.56          {
   20.57 -            mfn_t gl1mfn = gfn_to_mfn_query(v->domain, gl1gfn, &p2mt);
   20.58 +            mfn_t gl1mfn = gfn_to_mfn_query(p2m, gl1gfn, &p2mt);
   20.59              if ( p2m_is_ram(p2mt) )
   20.60                  sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow); 
   20.61              else if ( p2mt != p2m_populate_on_demand )
   20.62 @@ -2415,6 +2418,7 @@ static int validate_gl1e(struct vcpu *v,
   20.63      shadow_l1e_t *sl1p = se;
   20.64      gfn_t gfn;
   20.65      mfn_t gmfn;
   20.66 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
   20.67      p2m_type_t p2mt;
   20.68      int result = 0;
   20.69  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
   20.70 @@ -2424,7 +2428,7 @@ static int validate_gl1e(struct vcpu *v,
   20.71      perfc_incr(shadow_validate_gl1e_calls);
   20.72  
   20.73      gfn = guest_l1e_get_gfn(new_gl1e);
   20.74 -    gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
   20.75 +    gmfn = gfn_to_mfn_query(p2m, gfn, &p2mt);
   20.76  
   20.77      l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
   20.78      result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn);
   20.79 @@ -2484,7 +2488,7 @@ void sh_resync_l1(struct vcpu *v, mfn_t 
   20.80              shadow_l1e_t nsl1e;
   20.81  
   20.82              gfn = guest_l1e_get_gfn(gl1e);
   20.83 -            gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
   20.84 +            gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt);
   20.85              l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
   20.86              rc |= shadow_set_l1e(v, sl1p, nsl1e, p2mt, sl1mfn);
   20.87  
   20.88 @@ -2810,7 +2814,7 @@ static void sh_prefetch(struct vcpu *v, 
   20.89  
   20.90          /* Look at the gfn that the l1e is pointing at */
   20.91          gfn = guest_l1e_get_gfn(gl1e);
   20.92 -        gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
   20.93 +        gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt);
   20.94  
   20.95          /* Propagate the entry.  */
   20.96          l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
   20.97 @@ -3166,7 +3170,7 @@ static int sh_page_fault(struct vcpu *v,
   20.98  
   20.99      /* What mfn is the guest trying to access? */
  20.100      gfn = guest_l1e_get_gfn(gw.l1e);
  20.101 -    gmfn = gfn_to_mfn_guest(d, gfn, &p2mt);
  20.102 +    gmfn = gfn_to_mfn_guest(p2m_get_hostp2m(d), gfn, &p2mt);
  20.103  
  20.104      if ( shadow_mode_refcounts(d) && 
  20.105           ((!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ||
  20.106 @@ -4272,7 +4276,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
  20.107              if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
  20.108              {
  20.109                  gl2gfn = guest_l3e_get_gfn(gl3e[i]);
  20.110 -                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
  20.111 +                gl2mfn = gfn_to_mfn_query(p2m_get_hostp2m(d), gl2gfn, &p2mt);
  20.112                  if ( p2m_is_ram(p2mt) )
  20.113                      flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
  20.114              }
  20.115 @@ -4285,7 +4289,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
  20.116              if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
  20.117              {
  20.118                  gl2gfn = guest_l3e_get_gfn(gl3e[i]);
  20.119 -                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
  20.120 +                gl2mfn = gfn_to_mfn_query(p2m_get_hostp2m(d), gl2gfn, &p2mt);
  20.121                  if ( p2m_is_ram(p2mt) )
  20.122                      sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 
  20.123                                             ? SH_type_l2h_shadow 
  20.124 @@ -4682,7 +4686,7 @@ static void sh_pagetable_dying(struct vc
  20.125      if ( gcr3 == gpa )
  20.126          fast_path = 1;
  20.127  
  20.128 -    gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
  20.129 +    gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gpa >> PAGE_SHIFT), &p2mt);
  20.130      if ( !mfn_valid(gmfn) || !p2m_is_ram(p2mt) )
  20.131      {
  20.132          printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n",
  20.133 @@ -4702,7 +4706,7 @@ static void sh_pagetable_dying(struct vc
  20.134          {
  20.135              /* retrieving the l2s */
  20.136              gl2a = guest_l3e_get_paddr(gl3e[i]);
  20.137 -            gmfn = gfn_to_mfn_query(v->domain, _gfn(gl2a >> PAGE_SHIFT), &p2mt);
  20.138 +            gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gl2a >> PAGE_SHIFT), &p2mt);
  20.139              smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_pae_shadow);
  20.140          }
  20.141  
  20.142 @@ -4737,7 +4741,7 @@ static void sh_pagetable_dying(struct vc
  20.143  
  20.144      shadow_lock(v->domain);
  20.145  
  20.146 -    gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
  20.147 +    gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gpa >> PAGE_SHIFT), &p2mt);
  20.148  #if GUEST_PAGING_LEVELS == 2
  20.149      smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_32_shadow);
  20.150  #else
  20.151 @@ -4777,6 +4781,7 @@ static mfn_t emulate_gva_to_mfn(struct v
  20.152      mfn_t mfn;
  20.153      p2m_type_t p2mt;
  20.154      uint32_t pfec = PFEC_page_present | PFEC_write_access;
  20.155 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
  20.156  
  20.157      /* Translate the VA to a GFN */
  20.158      gfn = sh_gva_to_gfn(v, vaddr, &pfec);
  20.159 @@ -4792,9 +4797,9 @@ static mfn_t emulate_gva_to_mfn(struct v
  20.160      /* Translate the GFN to an MFN */
  20.161      /* PoD: query only if shadow lock is held (to avoid deadlock) */
  20.162      if ( shadow_locked_by_me(v->domain) )
  20.163 -        mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt);
  20.164 +        mfn = gfn_to_mfn_query(p2m, _gfn(gfn), &p2mt);
  20.165      else
  20.166 -        mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
  20.167 +        mfn = gfn_to_mfn(p2m, _gfn(gfn), &p2mt);
  20.168          
  20.169      if ( p2m_is_readonly(p2mt) )
  20.170          return _mfn(READONLY_GFN);
  20.171 @@ -5199,7 +5204,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
  20.172              {
  20.173                  gfn = guest_l1e_get_gfn(*gl1e);
  20.174                  mfn = shadow_l1e_get_mfn(*sl1e);
  20.175 -                gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
  20.176 +                gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt);
  20.177                  if ( !p2m_is_grant(p2mt) && mfn_x(gmfn) != mfn_x(mfn) )
  20.178                      AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
  20.179                                 " --> %" PRI_mfn " != mfn %" PRI_mfn,
  20.180 @@ -5243,6 +5248,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
  20.181      shadow_l2e_t *sl2e;
  20.182      mfn_t mfn, gmfn, gl2mfn;
  20.183      gfn_t gfn;
  20.184 +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
  20.185      p2m_type_t p2mt;
  20.186      char *s;
  20.187      int done = 0;
  20.188 @@ -5269,7 +5275,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
  20.189              mfn = shadow_l2e_get_mfn(*sl2e);
  20.190              gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)  
  20.191                  ? get_fl1_shadow_status(v, gfn)
  20.192 -                : get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), 
  20.193 +                : get_shadow_status(v, gfn_to_mfn_query(p2m, gfn, &p2mt), 
  20.194                                      SH_type_l1_shadow);
  20.195              if ( mfn_x(gmfn) != mfn_x(mfn) )
  20.196                  AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
  20.197 @@ -5277,8 +5283,8 @@ int sh_audit_l2_table(struct vcpu *v, mf
  20.198                             " --> %" PRI_mfn " != mfn %" PRI_mfn,
  20.199                             gfn_x(gfn), 
  20.200                             (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
  20.201 -                           : mfn_x(gfn_to_mfn_query(v->domain, gfn, &p2mt)),
  20.202 -                           mfn_x(gmfn), mfn_x(mfn));
  20.203 +                           : mfn_x(gfn_to_mfn_query(p2m,
  20.204 +                                   gfn, &p2mt)), mfn_x(gmfn), mfn_x(mfn));
  20.205          }
  20.206      });
  20.207      sh_unmap_domain_page(gp);
  20.208 @@ -5316,7 +5322,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
  20.209          {
  20.210              gfn = guest_l3e_get_gfn(*gl3e);
  20.211              mfn = shadow_l3e_get_mfn(*sl3e);
  20.212 -            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), 
  20.213 +            gmfn = get_shadow_status(v, gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt), 
  20.214                                       ((GUEST_PAGING_LEVELS == 3 ||
  20.215                                         is_pv_32on64_vcpu(v))
  20.216                                        && !shadow_mode_external(v->domain)
  20.217 @@ -5363,7 +5369,8 @@ int sh_audit_l4_table(struct vcpu *v, mf
  20.218          {
  20.219              gfn = guest_l4e_get_gfn(*gl4e);
  20.220              mfn = shadow_l4e_get_mfn(*sl4e);
  20.221 -            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), 
  20.222 +            gmfn = get_shadow_status(v, gfn_to_mfn_query(p2m_get_hostp2m(v->domain),
  20.223 +                                     gfn, &p2mt), 
  20.224                                       SH_type_l3_shadow);
  20.225              if ( mfn_x(gmfn) != mfn_x(mfn) )
  20.226                  AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
    21.1 --- a/xen/common/grant_table.c	Mon Aug 09 16:40:18 2010 +0100
    21.2 +++ b/xen/common/grant_table.c	Mon Aug 09 16:46:42 2010 +0100
    21.3 @@ -109,7 +109,7 @@ static unsigned inline int max_nr_maptra
    21.4  #define gfn_to_mfn_private(_d, _gfn) ({                     \
    21.5      p2m_type_t __p2mt;                                      \
    21.6      unsigned long __x;                                      \
    21.7 -    __x = mfn_x(gfn_to_mfn_unshare(_d, _gfn, &__p2mt, 1));  \
    21.8 +    __x = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(_d), _gfn, &__p2mt, 1));  \
    21.9      if ( !p2m_is_valid(__p2mt) )                            \
   21.10          __x = INVALID_MFN;                                  \
   21.11      __x; })
   21.12 @@ -1933,12 +1933,13 @@ static void
   21.13      {
   21.14  #ifdef CONFIG_X86
   21.15          p2m_type_t p2mt;
   21.16 -        s_frame = mfn_x(gfn_to_mfn(sd, op->source.u.gmfn, &p2mt));
   21.17 +        struct p2m_domain *p2m = p2m_get_hostp2m(sd);
   21.18 +        s_frame = mfn_x(gfn_to_mfn(p2m, op->source.u.gmfn, &p2mt));
   21.19          if ( !p2m_is_valid(p2mt) )
   21.20            s_frame = INVALID_MFN;
   21.21          if ( p2m_is_paging(p2mt) )
   21.22          {
   21.23 -            p2m_mem_paging_populate(sd, op->source.u.gmfn);
   21.24 +            p2m_mem_paging_populate(p2m, op->source.u.gmfn);
   21.25              rc = -ENOENT;
   21.26              goto error_out;
   21.27          }
   21.28 @@ -1979,12 +1980,13 @@ static void
   21.29      {
   21.30  #ifdef CONFIG_X86
   21.31          p2m_type_t p2mt;
   21.32 -        d_frame = mfn_x(gfn_to_mfn_unshare(dd, op->dest.u.gmfn, &p2mt, 1));
   21.33 +        struct p2m_domain *p2m = p2m_get_hostp2m(dd);
   21.34 +        d_frame = mfn_x(gfn_to_mfn_unshare(p2m, op->dest.u.gmfn, &p2mt, 1));
   21.35          if ( !p2m_is_valid(p2mt) )
   21.36            d_frame = INVALID_MFN;
   21.37          if ( p2m_is_paging(p2mt) )
   21.38          {
   21.39 -            p2m_mem_paging_populate(dd, op->dest.u.gmfn);
   21.40 +            p2m_mem_paging_populate(p2m, op->dest.u.gmfn);
   21.41              rc = -ENOENT;
   21.42              goto error_out;
   21.43          }
    22.1 --- a/xen/common/memory.c	Mon Aug 09 16:40:18 2010 +0100
    22.2 +++ b/xen/common/memory.c	Mon Aug 09 16:46:42 2010 +0100
    22.3 @@ -161,7 +161,7 @@ int guest_remove_page(struct domain *d, 
    22.4      unsigned long mfn;
    22.5  
    22.6  #ifdef CONFIG_X86
    22.7 -    mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt)); 
    22.8 +    mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt)); 
    22.9  #else
   22.10      mfn = gmfn_to_mfn(d, gmfn);
   22.11  #endif
   22.12 @@ -356,7 +356,7 @@ static long memory_exchange(XEN_GUEST_HA
   22.13                  p2m_type_t p2mt;
   22.14  
   22.15                  /* Shared pages cannot be exchanged */
   22.16 -                mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn + k, &p2mt, 0));
   22.17 +                mfn = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d), gmfn + k, &p2mt, 0));
   22.18                  if ( p2m_is_shared(p2mt) )
   22.19                  {
   22.20                      rc = -ENOMEM;
    23.1 --- a/xen/common/tmem_xen.c	Mon Aug 09 16:40:18 2010 +0100
    23.2 +++ b/xen/common/tmem_xen.c	Mon Aug 09 16:46:42 2010 +0100
    23.3 @@ -100,7 +100,7 @@ static inline void *cli_mfn_to_va(tmem_c
    23.4      unsigned long cli_mfn;
    23.5      p2m_type_t t;
    23.6  
    23.7 -    cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
    23.8 +    cli_mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(current->domain), cmfn, &t));
    23.9      if (t != p2m_ram_rw || cli_mfn == INVALID_MFN)
   23.10          return NULL;
   23.11      if (pcli_mfn != NULL)
    24.1 --- a/xen/include/asm-x86/guest_pt.h	Mon Aug 09 16:40:18 2010 +0100
    24.2 +++ b/xen/include/asm-x86/guest_pt.h	Mon Aug 09 16:46:42 2010 +0100
    24.3 @@ -272,8 +272,8 @@ guest_walk_to_gpa(walk_t *gw)
    24.4  #define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS)
    24.5  
    24.6  extern uint32_t 
    24.7 -guest_walk_tables(struct vcpu *v, unsigned long va, walk_t *gw, 
    24.8 -                  uint32_t pfec, mfn_t top_mfn, void *top_map);
    24.9 +guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long va,
   24.10 +                  walk_t *gw, uint32_t pfec, mfn_t top_mfn, void *top_map);
   24.11  
   24.12  /* Pretty-print the contents of a guest-walk */
   24.13  static inline void print_gw(walk_t *gw)
    25.1 --- a/xen/include/asm-x86/mem_sharing.h	Mon Aug 09 16:40:18 2010 +0100
    25.2 +++ b/xen/include/asm-x86/mem_sharing.h	Mon Aug 09 16:46:42 2010 +0100
    25.3 @@ -30,17 +30,17 @@
    25.4  typedef uint64_t shr_handle_t; 
    25.5  
    25.6  unsigned int mem_sharing_get_nr_saved_mfns(void);
    25.7 -int mem_sharing_nominate_page(struct domain *d, 
    25.8 +int mem_sharing_nominate_page(struct p2m_domain *p2m, 
    25.9                                unsigned long gfn,
   25.10                                int expected_refcnt,
   25.11                                shr_handle_t *phandle);
   25.12  #define MEM_SHARING_MUST_SUCCEED      (1<<0)
   25.13  #define MEM_SHARING_DESTROY_GFN       (1<<1)
   25.14 -int mem_sharing_unshare_page(struct domain *d, 
   25.15 +int mem_sharing_unshare_page(struct p2m_domain *p2m, 
   25.16                               unsigned long gfn, 
   25.17                               uint16_t flags);
   25.18  int mem_sharing_sharing_resume(struct domain *d);
   25.19 -int mem_sharing_cache_resize(struct domain *d, int new_size);
   25.20 +int mem_sharing_cache_resize(struct p2m_domain *p2m, int new_size);
   25.21  int mem_sharing_domctl(struct domain *d, 
   25.22                         xen_domctl_mem_sharing_op_t *mec);
   25.23  void mem_sharing_init(void);
    26.1 --- a/xen/include/asm-x86/p2m.h	Mon Aug 09 16:40:18 2010 +0100
    26.2 +++ b/xen/include/asm-x86/p2m.h	Mon Aug 09 16:46:42 2010 +0100
    26.3 @@ -172,23 +172,28 @@ struct p2m_domain {
    26.4      /* Shadow translated domain: p2m mapping */
    26.5      pagetable_t        phys_table;
    26.6  
    26.7 +    struct domain     *domain;   /* back pointer to domain */
    26.8 +
    26.9      /* Pages used to construct the p2m */
   26.10      struct page_list_head pages;
   26.11  
   26.12      /* Functions to call to get or free pages for the p2m */
   26.13 -    struct page_info * (*alloc_page  )(struct domain *d);
   26.14 -    void               (*free_page   )(struct domain *d,
   26.15 +    struct page_info * (*alloc_page  )(struct p2m_domain *p2m);
   26.16 +    void               (*free_page   )(struct p2m_domain *p2m,
   26.17                                         struct page_info *pg);
   26.18 -    int                (*set_entry   )(struct domain *d, unsigned long gfn,
   26.19 +    int                (*set_entry   )(struct p2m_domain *p2m,
   26.20 +                                       unsigned long gfn,
   26.21                                         mfn_t mfn, unsigned int page_order,
   26.22                                         p2m_type_t p2mt);
   26.23 -    mfn_t              (*get_entry   )(struct domain *d, unsigned long gfn,
   26.24 +    mfn_t              (*get_entry   )(struct p2m_domain *p2m,
   26.25 +                                       unsigned long gfn,
   26.26                                         p2m_type_t *p2mt,
   26.27                                         p2m_query_t q);
   26.28 -    mfn_t              (*get_entry_current)(unsigned long gfn,
   26.29 +    mfn_t              (*get_entry_current)(struct p2m_domain *p2m,
   26.30 +                                            unsigned long gfn,
   26.31                                              p2m_type_t *p2mt,
   26.32                                              p2m_query_t q);
   26.33 -    void               (*change_entry_type_global)(struct domain *d,
   26.34 +    void               (*change_entry_type_global)(struct p2m_domain *p2m,
   26.35                                                     p2m_type_t ot,
   26.36                                                     p2m_type_t nt);
   26.37  
   26.38 @@ -279,65 +284,64 @@ static inline p2m_type_t p2m_flags_to_ty
   26.39  }
   26.40  
   26.41  /* Read the current domain's p2m table.  Do not populate PoD pages. */
   26.42 -static inline mfn_t gfn_to_mfn_type_current(unsigned long gfn, p2m_type_t *t,
   26.43 +static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
   26.44 +                                            unsigned long gfn, p2m_type_t *t,
   26.45                                              p2m_query_t q)
   26.46  {
   26.47 -    return current->domain->arch.p2m->get_entry_current(gfn, t, q);
   26.48 +    return p2m->get_entry_current(p2m, gfn, t, q);
   26.49  }
   26.50  
   26.51 -/* Read another domain's P2M table, mapping pages as we go.
   26.52 +/* Read P2M table, mapping pages as we go.
   26.53   * Do not populate PoD pages. */
   26.54 -static inline
   26.55 -mfn_t gfn_to_mfn_type_foreign(struct domain *d, unsigned long gfn, p2m_type_t *t,
   26.56 -                              p2m_query_t q)
   26.57 +static inline mfn_t
   26.58 +gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
   26.59 +                              p2m_type_t *t, p2m_query_t q)
   26.60  {
   26.61 -    return d->arch.p2m->get_entry(d, gfn, t, q);
   26.62 +    return p2m->get_entry(p2m, gfn, t, q);
   26.63  }
   26.64  
   26.65 +
   26.66  /* General conversion function from gfn to mfn */
   26.67 -static inline mfn_t _gfn_to_mfn_type(struct domain *d,
   26.68 +static inline mfn_t _gfn_to_mfn_type(struct p2m_domain *p2m,
   26.69                                       unsigned long gfn, p2m_type_t *t,
   26.70                                       p2m_query_t q)
   26.71  {
   26.72 -    if ( !paging_mode_translate(d) )
   26.73 +    if ( !p2m || !paging_mode_translate(p2m->domain) )
   26.74      {
   26.75          /* Not necessarily true, but for non-translated guests, we claim
   26.76           * it's the most generic kind of memory */
   26.77          *t = p2m_ram_rw;
   26.78          return _mfn(gfn);
   26.79      }
   26.80 -    if ( likely(current->domain == d) )
   26.81 -        return gfn_to_mfn_type_current(gfn, t, q);
   26.82 +    if ( likely(current->domain == p2m->domain) )
   26.83 +        return gfn_to_mfn_type_current(p2m, gfn, t, q);
   26.84      else
   26.85 -        return gfn_to_mfn_type_foreign(d, gfn, t, q);
   26.86 +        return gfn_to_mfn_type_p2m(p2m, gfn, t, q);
   26.87  }
   26.88  
   26.89 -#define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_alloc)
   26.90 -#define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_query)
   26.91 -#define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_guest)
   26.92 +#define gfn_to_mfn(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_alloc)
   26.93 +#define gfn_to_mfn_query(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_query)
   26.94 +#define gfn_to_mfn_guest(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_guest)
   26.95  
   26.96 -#define gfn_to_mfn_current(g, t) gfn_to_mfn_type_current((g), (t), p2m_alloc)
   26.97 -#define gfn_to_mfn_foreign(d, g, t) gfn_to_mfn_type_foreign((d), (g), (t), p2m_alloc)
   26.98 -
   26.99 -static inline mfn_t gfn_to_mfn_unshare(struct domain *d,
  26.100 +static inline mfn_t gfn_to_mfn_unshare(struct p2m_domain *p2m,
  26.101                                         unsigned long gfn,
  26.102                                         p2m_type_t *p2mt,
  26.103                                         int must_succeed)
  26.104  {
  26.105      mfn_t mfn;
  26.106  
  26.107 -    mfn = gfn_to_mfn(d, gfn, p2mt);
  26.108 +    mfn = gfn_to_mfn(p2m, gfn, p2mt);
  26.109  #ifdef __x86_64__
  26.110      if ( p2m_is_shared(*p2mt) )
  26.111      {
  26.112 -        if ( mem_sharing_unshare_page(d, gfn,
  26.113 +        if ( mem_sharing_unshare_page(p2m, gfn,
  26.114                                        must_succeed 
  26.115                                        ? MEM_SHARING_MUST_SUCCEED : 0) )
  26.116          {
  26.117              BUG_ON(must_succeed);
  26.118              return mfn;
  26.119          }
  26.120 -        mfn = gfn_to_mfn(d, gfn, p2mt);
  26.121 +        mfn = gfn_to_mfn(p2m, gfn, p2mt);
  26.122      }
  26.123  #endif
  26.124  
  26.125 @@ -350,7 +354,7 @@ static inline unsigned long gmfn_to_mfn(
  26.126  {
  26.127      mfn_t mfn;
  26.128      p2m_type_t t;
  26.129 -    mfn = gfn_to_mfn(d, gpfn, &t);
  26.130 +    mfn = gfn_to_mfn(d->arch.p2m, gpfn, &t);
  26.131      if ( p2m_is_valid(t) )
  26.132          return mfn_x(mfn);
  26.133      return INVALID_MFN;
  26.134 @@ -374,16 +378,16 @@ int p2m_init(struct domain *d);
  26.135   * build the p2m, and to release it again at the end of day. 
  26.136   *
  26.137   * Returns 0 for success or -errno. */
  26.138 -int p2m_alloc_table(struct domain *d,
  26.139 -                    struct page_info * (*alloc_page)(struct domain *d),
  26.140 -                    void (*free_page)(struct domain *d, struct page_info *pg));
  26.141 +int p2m_alloc_table(struct p2m_domain *p2m,
  26.142 +               struct page_info * (*alloc_page)(struct p2m_domain *p2m),
  26.143 +               void (*free_page)(struct p2m_domain *p2m, struct page_info *pg));
  26.144  
  26.145  /* Return all the p2m resources to Xen. */
  26.146 -void p2m_teardown(struct domain *d);
  26.147 +void p2m_teardown(struct p2m_domain *p2m);
  26.148  void p2m_final_teardown(struct domain *d);
  26.149  
  26.150  /* Dump PoD information about the domain */
  26.151 -void p2m_pod_dump_data(struct domain *d);
  26.152 +void p2m_pod_dump_data(struct p2m_domain *p2m);
  26.153  
  26.154  /* Move all pages from the populate-on-demand cache to the domain page_list
  26.155   * (usually in preparation for domain destruction) */
  26.156 @@ -402,15 +406,19 @@ p2m_pod_decrease_reservation(struct doma
  26.157  
  26.158  /* Called by p2m code when demand-populating a PoD page */
  26.159  int
  26.160 -p2m_pod_demand_populate(struct domain *d, unsigned long gfn,
  26.161 +p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
  26.162                          unsigned int order,
  26.163                          p2m_query_t q);
  26.164  
  26.165  /* Add a page to a domain's p2m table */
  26.166 -int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
  26.167 +int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
  26.168                              unsigned long mfn, unsigned int page_order, 
  26.169                              p2m_type_t t);
  26.170  
  26.171 +/* Remove a page from a domain's p2m table */
  26.172 +void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
  26.173 +                            unsigned long mfn, unsigned int page_order);
  26.174 +
  26.175  /* Set a p2m range as populate-on-demand */
  26.176  int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
  26.177                                            unsigned int order);
  26.178 @@ -419,49 +427,55 @@ int guest_physmap_mark_populate_on_deman
  26.179   *
  26.180   * Return 0 for success
  26.181   */
  26.182 -static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn,
  26.183 +static inline int guest_physmap_add_page(struct domain *d,
  26.184 +                                         unsigned long gfn,
  26.185                                           unsigned long mfn,
  26.186                                           unsigned int page_order)
  26.187  {
  26.188 -    return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
  26.189 +    return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order, p2m_ram_rw);
  26.190  }
  26.191  
  26.192  /* Remove a page from a domain's p2m table */
  26.193 -void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
  26.194 -                               unsigned long mfn, unsigned int page_order);
  26.195 +static inline void guest_physmap_remove_page(struct domain *d,
  26.196 +                               unsigned long gfn,
  26.197 +                               unsigned long mfn, unsigned int page_order)
  26.198 +{
  26.199 +    guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order);
  26.200 +}
  26.201  
  26.202  /* Change types across all p2m entries in a domain */
  26.203 -void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
  26.204 -void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
  26.205 +void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt);
  26.206 +void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt);
  26.207  
  26.208  /* Compare-exchange the type of a single p2m entry */
  26.209 -p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
  26.210 +p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
  26.211                             p2m_type_t ot, p2m_type_t nt);
  26.212  
  26.213  /* Set mmio addresses in the p2m table (for pass-through) */
  26.214 -int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
  26.215 -int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
  26.216 +int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
  26.217 +int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn);
  26.218  
  26.219  
  26.220  #ifdef __x86_64__
  26.221  /* Modify p2m table for shared gfn */
  26.222 -int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
  26.223 +int set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
  26.224 +
  26.225  /* Check if a nominated gfn is valid to be paged out */
  26.226 -int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
  26.227 +int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn);
  26.228  /* Evict a frame */
  26.229 -int p2m_mem_paging_evict(struct domain *d, unsigned long gfn);
  26.230 +int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn);
  26.231  /* Start populating a paged out frame */
  26.232 -void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
  26.233 +void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
  26.234  /* Prepare the p2m for paging a frame in */
  26.235 -int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
  26.236 +int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn);
  26.237  /* Resume normal operation (in case a domain was paused) */
  26.238 -void p2m_mem_paging_resume(struct domain *d);
  26.239 +void p2m_mem_paging_resume(struct p2m_domain *p2m);
  26.240  #else
  26.241 -static inline void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
  26.242 +static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
  26.243  { }
  26.244  #endif
  26.245  
  26.246 -struct page_info *p2m_alloc_ptp(struct domain *d, unsigned long type);
  26.247 +struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
  26.248  
  26.249  #endif /* _XEN_P2M_H */
  26.250