debuggers.hg

changeset 22713:f14b296d263f

mem_access: introduce P2M mem_access types

* Introduces access types for each page, giving independent read, write, and
execute permissions for each page. The permissions are restrictive from
what the page type gives: for example, a p2m_type_ro page with an access of
p2m_access_rw would have read-only permissions in total, as p2m_type_ro
removed write access and p2m_access_rw removed execute access.

* Implements the access flag storage for EPT, moving some bits from P2M type,
which had 10 bits of storage, to the four bits for access.

* Access flags are stored according to a loose consistency contract, where
pages can be reset to the default access permissions at any time. Right
now, that happens on page type changes, where one would want to reevaluate
whether permissions make sense for that page as they are anyway.

Signed-off-by: Joe Epstein <jepstein98@gmail.com>
Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
Acked-by: Keir Fraser <keir@xen.org>
author Joe Epstein <jepstein98@gmail.com>
date Fri Jan 07 11:54:36 2011 +0000 (2011-01-07)
parents cb756381087c
children 02efc054da7b
files xen/arch/x86/mm/hap/p2m-ept.c xen/arch/x86/mm/p2m.c xen/include/asm-x86/hvm/vmx/vmx.h xen/include/asm-x86/p2m.h
line diff
     1.1 --- a/xen/arch/x86/mm/hap/p2m-ept.c	Thu Jan 06 19:02:36 2011 +0000
     1.2 +++ b/xen/arch/x86/mm/hap/p2m-ept.c	Fri Jan 07 11:54:36 2011 +0000
     1.3 @@ -62,8 +62,9 @@ static int ept_pod_check_and_populate(st
     1.4      return r;
     1.5  }
     1.6  
     1.7 -static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type)
     1.8 +static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_access_t access)
     1.9  {
    1.10 +    /* First apply type permissions */
    1.11      switch(type)
    1.12      {
    1.13          case p2m_invalid:
    1.14 @@ -75,30 +76,61 @@ static void ept_p2m_type_to_flags(ept_en
    1.15          case p2m_ram_paging_in_start:
    1.16          default:
    1.17              entry->r = entry->w = entry->x = 0;
    1.18 -            return;
    1.19 +            break;
    1.20          case p2m_ram_rw:
    1.21              entry->r = entry->w = entry->x = 1;
    1.22 -            return;
    1.23 +            break;
    1.24          case p2m_mmio_direct:
    1.25              entry->r = entry->x = 1;
    1.26              entry->w = !rangeset_contains_singleton(mmio_ro_ranges,
    1.27                                                      entry->mfn);
    1.28 -            return;
    1.29 +            break;
    1.30          case p2m_ram_logdirty:
    1.31          case p2m_ram_ro:
    1.32          case p2m_ram_shared:
    1.33              entry->r = entry->x = 1;
    1.34              entry->w = 0;
    1.35 -            return;
    1.36 +            break;
    1.37          case p2m_grant_map_rw:
    1.38              entry->r = entry->w = 1;
    1.39              entry->x = 0;
    1.40 -            return;
    1.41 +            break;
    1.42          case p2m_grant_map_ro:
    1.43              entry->r = 1;
    1.44              entry->w = entry->x = 0;
    1.45 -            return;
    1.46 +            break;
    1.47      }
    1.48 +
    1.49 +
    1.50 +    /* Then restrict with access permissions */
    1.51 +    switch (access) 
    1.52 +    {
    1.53 +        case p2m_access_n:
    1.54 +            entry->r = entry->w = entry->x = 0;
    1.55 +            break;
    1.56 +        case p2m_access_r:
    1.57 +            entry->w = entry->x = 0;
    1.58 +            break;
    1.59 +        case p2m_access_w:
    1.60 +            entry->r = entry->x = 0;
    1.61 +            break;
    1.62 +        case p2m_access_x:
    1.63 +            entry->r = entry->w = 0;
    1.64 +            break;
    1.65 +        case p2m_access_rx:
    1.66 +        case p2m_access_rx2rw:
    1.67 +            entry->w = 0;
    1.68 +            break;
    1.69 +        case p2m_access_wx:
    1.70 +            entry->r = 0;
    1.71 +            break;
    1.72 +        case p2m_access_rw:
    1.73 +            entry->x = 0;
    1.74 +            break;           
    1.75 +        case p2m_access_rwx:
    1.76 +            break;
    1.77 +    }
    1.78 +    
    1.79  }
    1.80  
    1.81  #define GUEST_TABLE_MAP_FAILED  0
    1.82 @@ -117,6 +149,8 @@ static int ept_set_middle_entry(struct p
    1.83  
    1.84      ept_entry->epte = 0;
    1.85      ept_entry->mfn = page_to_mfn(pg);
    1.86 +    ept_entry->access = p2m->default_access;
    1.87 +
    1.88      ept_entry->r = ept_entry->w = ept_entry->x = 1;
    1.89  
    1.90      return 1;
    1.91 @@ -170,11 +204,12 @@ static int ept_split_super_page(struct p
    1.92          epte->emt = ept_entry->emt;
    1.93          epte->ipat = ept_entry->ipat;
    1.94          epte->sp = (level > 1) ? 1 : 0;
    1.95 +        epte->access = ept_entry->access;
    1.96          epte->sa_p2mt = ept_entry->sa_p2mt;
    1.97          epte->mfn = ept_entry->mfn + i * trunk;
    1.98          epte->rsvd2_snp = ( iommu_enabled && iommu_snoop ) ? 1 : 0;
    1.99  
   1.100 -        ept_p2m_type_to_flags(epte, epte->sa_p2mt);
   1.101 +        ept_p2m_type_to_flags(epte, epte->sa_p2mt, epte->access);
   1.102  
   1.103          if ( (level - 1) == target )
   1.104              continue;
   1.105 @@ -260,7 +295,7 @@ static int ept_next_level(struct p2m_dom
   1.106   */
   1.107  static int
   1.108  ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
   1.109 -              unsigned int order, p2m_type_t p2mt)
   1.110 +              unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma)
   1.111  {
   1.112      ept_entry_t *table, *ept_entry = NULL;
   1.113      unsigned long gfn_remainder = gfn;
   1.114 @@ -334,9 +369,11 @@ ept_set_entry(struct p2m_domain *p2m, un
   1.115              /* Construct the new entry, and then write it once */
   1.116              new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
   1.117                                                  direct_mmio);
   1.118 +
   1.119              new_entry.ipat = ipat;
   1.120              new_entry.sp = order ? 1 : 0;
   1.121              new_entry.sa_p2mt = p2mt;
   1.122 +            new_entry.access = p2ma;
   1.123              new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop);
   1.124  
   1.125              if ( new_entry.mfn == mfn_x(mfn) )
   1.126 @@ -344,7 +381,7 @@ ept_set_entry(struct p2m_domain *p2m, un
   1.127              else
   1.128                  new_entry.mfn = mfn_x(mfn);
   1.129  
   1.130 -            ept_p2m_type_to_flags(&new_entry, p2mt);
   1.131 +            ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
   1.132          }
   1.133  
   1.134          atomic_write_ept_entry(ept_entry, new_entry);
   1.135 @@ -384,6 +421,7 @@ ept_set_entry(struct p2m_domain *p2m, un
   1.136          new_entry.ipat = ipat;
   1.137          new_entry.sp = i ? 1 : 0;
   1.138          new_entry.sa_p2mt = p2mt;
   1.139 +        new_entry.access = p2ma;
   1.140          new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop);
   1.141  
   1.142          if ( new_entry.mfn == mfn_x(mfn) )
   1.143 @@ -391,7 +429,7 @@ ept_set_entry(struct p2m_domain *p2m, un
   1.144          else /* the caller should take care of the previous page */
   1.145              new_entry.mfn = mfn_x(mfn);
   1.146  
   1.147 -        ept_p2m_type_to_flags(&new_entry, p2mt);
   1.148 +        ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
   1.149  
   1.150          atomic_write_ept_entry(ept_entry, new_entry);
   1.151      }
   1.152 @@ -447,7 +485,7 @@ out:
   1.153  
   1.154  /* Read ept p2m entries */
   1.155  static mfn_t ept_get_entry(struct p2m_domain *p2m,
   1.156 -                           unsigned long gfn, p2m_type_t *t,
   1.157 +                           unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
   1.158                             p2m_query_t q)
   1.159  {
   1.160      struct domain *d = p2m->domain;
   1.161 @@ -460,6 +498,7 @@ static mfn_t ept_get_entry(struct p2m_do
   1.162      mfn_t mfn = _mfn(INVALID_MFN);
   1.163  
   1.164      *t = p2m_mmio_dm;
   1.165 +    *a = p2m_access_n;
   1.166  
   1.167      /* This pfn is higher than the highest the p2m map currently holds */
   1.168      if ( gfn > p2m->max_mapped_pfn )
   1.169 @@ -519,6 +558,8 @@ static mfn_t ept_get_entry(struct p2m_do
   1.170      if ( ept_entry->sa_p2mt != p2m_invalid )
   1.171      {
   1.172          *t = ept_entry->sa_p2mt;
   1.173 +        *a = ept_entry->access;
   1.174 +
   1.175          mfn = _mfn(ept_entry->mfn);
   1.176          if ( i )
   1.177          {
   1.178 @@ -626,10 +667,10 @@ out:
   1.179  }
   1.180  
   1.181  static mfn_t ept_get_entry_current(struct p2m_domain *p2m,
   1.182 -                                   unsigned long gfn, p2m_type_t *t,
   1.183 +                                   unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
   1.184                                     p2m_query_t q)
   1.185  {
   1.186 -    return ept_get_entry(p2m, gfn, t, q);
   1.187 +    return ept_get_entry(p2m, gfn, t, a, q);
   1.188  }
   1.189  
   1.190  /*
   1.191 @@ -689,7 +730,7 @@ void ept_change_entry_emt_with_range(str
   1.192                      order = level * EPT_TABLE_ORDER;
   1.193                      if ( need_modify_ept_entry(p2m, gfn, mfn, 
   1.194                            e.ipat, e.emt, e.sa_p2mt) )
   1.195 -                        ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
   1.196 +                        ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access);
   1.197                      gfn += trunk;
   1.198                      break;
   1.199                  }
   1.200 @@ -699,7 +740,7 @@ void ept_change_entry_emt_with_range(str
   1.201          else /* gfn assigned with 4k */
   1.202          {
   1.203              if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.sa_p2mt) )
   1.204 -                ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
   1.205 +                ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access);
   1.206          }
   1.207      }
   1.208      p2m_unlock(p2m);
   1.209 @@ -730,7 +771,7 @@ static void ept_change_entry_type_page(m
   1.210                  continue;
   1.211  
   1.212              e.sa_p2mt = nt;
   1.213 -            ept_p2m_type_to_flags(&e, nt);
   1.214 +            ept_p2m_type_to_flags(&e, nt, e.access);
   1.215              atomic_write_ept_entry(&epte[i], e);
   1.216          }
   1.217      }
     2.1 --- a/xen/arch/x86/mm/p2m.c	Thu Jan 06 19:02:36 2011 +0000
     2.2 +++ b/xen/arch/x86/mm/p2m.c	Fri Jan 07 11:54:36 2011 +0000
     2.3 @@ -285,7 +285,7 @@ p2m_next_level(struct p2m_domain *p2m, m
     2.4   */
     2.5  static
     2.6  int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
     2.7 -                  unsigned int page_order, p2m_type_t p2mt);
     2.8 +                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
     2.9  
    2.10  static int
    2.11  p2m_pod_cache_add(struct p2m_domain *p2m,
    2.12 @@ -693,7 +693,7 @@ p2m_pod_decrease_reservation(struct doma
    2.13      {
    2.14          /* All PoD: Mark the whole region invalid and tell caller
    2.15           * we're done. */
    2.16 -        set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid);
    2.17 +        set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid, p2m->default_access);
    2.18          p2m->pod.entry_count-=(1<<order); /* Lock: p2m */
    2.19          BUG_ON(p2m->pod.entry_count < 0);
    2.20          ret = 1;
    2.21 @@ -716,7 +716,7 @@ p2m_pod_decrease_reservation(struct doma
    2.22          mfn = gfn_to_mfn_query(p2m, gpfn + i, &t);
    2.23          if ( t == p2m_populate_on_demand )
    2.24          {
    2.25 -            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
    2.26 +            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
    2.27              p2m->pod.entry_count--; /* Lock: p2m */
    2.28              BUG_ON(p2m->pod.entry_count < 0);
    2.29              pod--;
    2.30 @@ -729,7 +729,7 @@ p2m_pod_decrease_reservation(struct doma
    2.31  
    2.32              page = mfn_to_page(mfn);
    2.33  
    2.34 -            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
    2.35 +            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
    2.36              set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
    2.37  
    2.38              p2m_pod_cache_add(p2m, page, 0);
    2.39 @@ -844,7 +844,7 @@ p2m_pod_zero_check_superpage(struct p2m_
    2.40      /* Try to remove the page, restoring old mapping if it fails. */
    2.41      set_p2m_entry(p2m, gfn,
    2.42                    _mfn(POPULATE_ON_DEMAND_MFN), 9,
    2.43 -                  p2m_populate_on_demand);
    2.44 +                  p2m_populate_on_demand, p2m->default_access);
    2.45  
    2.46      /* Make none of the MFNs are used elsewhere... for example, mapped
    2.47       * via the grant table interface, or by qemu.  Allow one refcount for
    2.48 @@ -899,7 +899,7 @@ p2m_pod_zero_check_superpage(struct p2m_
    2.49  
    2.50  out_reset:
    2.51      if ( reset )
    2.52 -        set_p2m_entry(p2m, gfn, mfn0, 9, type0);
    2.53 +        set_p2m_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
    2.54      
    2.55  out:
    2.56      return ret;
    2.57 @@ -957,7 +957,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
    2.58          /* Try to remove the page, restoring old mapping if it fails. */
    2.59          set_p2m_entry(p2m, gfns[i],
    2.60                        _mfn(POPULATE_ON_DEMAND_MFN), 0,
    2.61 -                      p2m_populate_on_demand);
    2.62 +                      p2m_populate_on_demand, p2m->default_access);
    2.63  
    2.64          /* See if the page was successfully unmapped.  (Allow one refcount
    2.65           * for being allocated to a domain.) */
    2.66 @@ -966,7 +966,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
    2.67              unmap_domain_page(map[i]);
    2.68              map[i] = NULL;
    2.69  
    2.70 -            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
    2.71 +            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access);
    2.72  
    2.73              continue;
    2.74          }
    2.75 @@ -988,7 +988,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
    2.76           * check timing.  */
    2.77          if ( j < PAGE_SIZE/sizeof(*map[i]) )
    2.78          {
    2.79 -            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
    2.80 +            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access);
    2.81          }
    2.82          else
    2.83          {
    2.84 @@ -1121,7 +1121,7 @@ p2m_pod_demand_populate(struct p2m_domai
    2.85           * 512 2MB pages. The rest of 511 calls are unnecessary.
    2.86           */
    2.87          set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
    2.88 -                      p2m_populate_on_demand);
    2.89 +                      p2m_populate_on_demand, p2m->default_access);
    2.90          audit_p2m(p2m, 1);
    2.91          p2m_unlock(p2m);
    2.92          return 0;
    2.93 @@ -1158,7 +1158,7 @@ p2m_pod_demand_populate(struct p2m_domai
    2.94  
    2.95      gfn_aligned = (gfn >> order) << order;
    2.96  
    2.97 -    set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw);
    2.98 +    set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, p2m->default_access);
    2.99  
   2.100      for( i = 0; i < (1UL << order); i++ )
   2.101          set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
   2.102 @@ -1198,7 +1198,7 @@ remap_and_retry:
   2.103      gfn_aligned = (gfn>>order)<<order;
   2.104      for(i=0; i<(1<<order); i++)
   2.105          set_p2m_entry(p2m, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
   2.106 -                      p2m_populate_on_demand);
   2.107 +                      p2m_populate_on_demand, p2m->default_access);
   2.108      if ( tb_init_done )
   2.109      {
   2.110          struct {
   2.111 @@ -1250,7 +1250,7 @@ static int p2m_pod_check_and_populate(st
   2.112  // Returns 0 on error (out of memory)
   2.113  static int
   2.114  p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
   2.115 -              unsigned int page_order, p2m_type_t p2mt)
   2.116 +              unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
   2.117  {
   2.118      // XXX -- this might be able to be faster iff current->domain == d
   2.119      mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
   2.120 @@ -1401,7 +1401,7 @@ out:
   2.121  }
   2.122  
   2.123  static mfn_t
   2.124 -p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
   2.125 +p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
   2.126                 p2m_query_t q)
   2.127  {
   2.128      mfn_t mfn;
   2.129 @@ -1416,6 +1416,8 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u
   2.130       * XXX Once we start explicitly registering MMIO regions in the p2m 
   2.131       * XXX we will return p2m_invalid for unmapped gfns */
   2.132      *t = p2m_mmio_dm;
   2.133 +    /* Not implemented except with EPT */
   2.134 +    *a = p2m_access_rwx; 
   2.135  
   2.136      mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
   2.137  
   2.138 @@ -1542,7 +1544,7 @@ pod_retry_l1:
   2.139  
   2.140  /* Read the current domain's p2m table (through the linear mapping). */
   2.141  static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
   2.142 -                                    unsigned long gfn, p2m_type_t *t,
   2.143 +                                    unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
   2.144                                      p2m_query_t q)
   2.145  {
   2.146      mfn_t mfn = _mfn(INVALID_MFN);
   2.147 @@ -1553,6 +1555,9 @@ static mfn_t p2m_gfn_to_mfn_current(stru
   2.148       * XXX Once we start explicitly registering MMIO regions in the p2m 
   2.149       * XXX we will return p2m_invalid for unmapped gfns */
   2.150  
   2.151 +    /* Not currently implemented except for EPT */
   2.152 +    *a = p2m_access_rwx;
   2.153 +
   2.154      if ( gfn <= p2m->max_mapped_pfn )
   2.155      {
   2.156          l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
   2.157 @@ -1726,6 +1731,8 @@ static void p2m_initialise(struct domain
   2.158      INIT_PAGE_LIST_HEAD(&p2m->pod.single);
   2.159  
   2.160      p2m->domain = d;
   2.161 +    p2m->default_access = p2m_access_rwx;
   2.162 +
   2.163      p2m->set_entry = p2m_set_entry;
   2.164      p2m->get_entry = p2m_gfn_to_mfn;
   2.165      p2m->get_entry_current = p2m_gfn_to_mfn_current;
   2.166 @@ -1745,7 +1752,7 @@ int p2m_init(struct domain *d)
   2.167      if ( p2m == NULL )
   2.168          return -ENOMEM;
   2.169      p2m_initialise(d, p2m);
   2.170 -
   2.171 +    
   2.172      return 0;
   2.173  }
   2.174  
   2.175 @@ -1759,7 +1766,7 @@ void p2m_change_entry_type_global(struct
   2.176  
   2.177  static
   2.178  int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
   2.179 -                    unsigned int page_order, p2m_type_t p2mt)
   2.180 +                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
   2.181  {
   2.182      struct domain *d = p2m->domain;
   2.183      unsigned long todo = 1ul << page_order;
   2.184 @@ -1776,7 +1783,7 @@ int set_p2m_entry(struct p2m_domain *p2m
   2.185          else
   2.186              order = 0;
   2.187  
   2.188 -        if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt) )
   2.189 +        if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma) )
   2.190              rc = 0;
   2.191          gfn += 1ul << order;
   2.192          if ( mfn_x(mfn) != INVALID_MFN )
   2.193 @@ -1837,7 +1844,7 @@ int p2m_alloc_table(struct p2m_domain *p
   2.194  
   2.195      /* Initialise physmap tables for slot zero. Other code assumes this. */
   2.196      if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), 0,
   2.197 -                        p2m_invalid) )
   2.198 +                        p2m_invalid, p2m->default_access) )
   2.199          goto error;
   2.200  
   2.201      /* Copy all existing mappings from the page list and m2p */
   2.202 @@ -1856,7 +1863,7 @@ int p2m_alloc_table(struct p2m_domain *p
   2.203              (gfn != 0x55555555L)
   2.204  #endif
   2.205               && gfn != INVALID_M2P_ENTRY
   2.206 -            && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw) )
   2.207 +            && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw, p2m->default_access) )
   2.208              goto error_unlock;
   2.209      }
   2.210      spin_unlock(&p2m->domain->page_alloc_lock);
   2.211 @@ -1883,6 +1890,7 @@ void p2m_teardown(struct p2m_domain *p2m
   2.212  #ifdef __x86_64__
   2.213      unsigned long gfn;
   2.214      p2m_type_t t;
   2.215 +    p2m_access_t a;
   2.216      mfn_t mfn;
   2.217  #endif
   2.218  
   2.219 @@ -1891,7 +1899,7 @@ void p2m_teardown(struct p2m_domain *p2m
   2.220  #ifdef __x86_64__
   2.221      for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
   2.222      {
   2.223 -        mfn = p2m->get_entry(p2m, gfn, &t, p2m_query);
   2.224 +        mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
   2.225          if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
   2.226              BUG_ON(mem_sharing_unshare_page(p2m, gfn, MEM_SHARING_DESTROY_GFN));
   2.227      }
   2.228 @@ -2188,6 +2196,7 @@ p2m_remove_page(struct p2m_domain *p2m, 
   2.229      unsigned long i;
   2.230      mfn_t mfn_return;
   2.231      p2m_type_t t;
   2.232 +    p2m_access_t a;
   2.233  
   2.234      if ( !paging_mode_translate(p2m->domain) )
   2.235      {
   2.236 @@ -2201,12 +2210,12 @@ p2m_remove_page(struct p2m_domain *p2m, 
   2.237  
   2.238      for ( i = 0; i < (1UL << page_order); i++ )
   2.239      {
   2.240 -        mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query);
   2.241 +        mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
   2.242          if ( !p2m_is_grant(t) )
   2.243              set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
   2.244          ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
   2.245      }
   2.246 -    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
   2.247 +    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access);
   2.248  }
   2.249  
   2.250  void
   2.251 @@ -2286,7 +2295,7 @@ guest_physmap_mark_populate_on_demand(st
   2.252  
   2.253      /* Now, actually do the two-way mapping */
   2.254      if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order,
   2.255 -                        p2m_populate_on_demand) )
   2.256 +                        p2m_populate_on_demand, p2m->default_access) )
   2.257          rc = -EINVAL;
   2.258      else
   2.259      {
   2.260 @@ -2399,7 +2408,7 @@ guest_physmap_add_entry(struct p2m_domai
   2.261      /* Now, actually do the two-way mapping */
   2.262      if ( mfn_valid(_mfn(mfn)) ) 
   2.263      {
   2.264 -        if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t) )
   2.265 +        if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t, p2m->default_access) )
   2.266              rc = -EINVAL;
   2.267          if ( !p2m_is_grant(t) )
   2.268          {
   2.269 @@ -2412,7 +2421,7 @@ guest_physmap_add_entry(struct p2m_domai
   2.270          gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
   2.271                   gfn, mfn);
   2.272          if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, 
   2.273 -                            p2m_invalid) )
   2.274 +                            p2m_invalid, p2m->default_access) )
   2.275              rc = -EINVAL;
   2.276          else
   2.277          {
   2.278 @@ -2565,7 +2574,7 @@ void p2m_change_type_global(struct p2m_d
   2.279  }
   2.280  
   2.281  /* Modify the p2m type of a single gfn from ot to nt, returning the 
   2.282 - * entry's previous type */
   2.283 + * entry's previous type.  Resets the access permissions. */
   2.284  p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn, 
   2.285                             p2m_type_t ot, p2m_type_t nt)
   2.286  {
   2.287 @@ -2578,7 +2587,7 @@ p2m_type_t p2m_change_type(struct p2m_do
   2.288  
   2.289      mfn = gfn_to_mfn_query(p2m, gfn, &pt);
   2.290      if ( pt == ot )
   2.291 -        set_p2m_entry(p2m, gfn, mfn, 0, nt);
   2.292 +        set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);
   2.293  
   2.294      p2m_unlock(p2m);
   2.295  
   2.296 @@ -2609,7 +2618,7 @@ set_mmio_p2m_entry(struct p2m_domain *p2
   2.297  
   2.298      P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
   2.299      p2m_lock(p2m);
   2.300 -    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct);
   2.301 +    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct, p2m->default_access);
   2.302      audit_p2m(p2m, 1);
   2.303      p2m_unlock(p2m);
   2.304      if ( 0 == rc )
   2.305 @@ -2639,7 +2648,7 @@ clear_mmio_p2m_entry(struct p2m_domain *
   2.306          return 0;
   2.307      }
   2.308      p2m_lock(p2m);
   2.309 -    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0);
   2.310 +    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0, p2m->default_access);
   2.311      audit_p2m(p2m, 1);
   2.312      p2m_unlock(p2m);
   2.313  
   2.314 @@ -2668,7 +2677,7 @@ set_shared_p2m_entry(struct p2m_domain *
   2.315      P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
   2.316      if ( need_lock ) 
   2.317          p2m_lock(p2m);
   2.318 -    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared);
   2.319 +    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared, p2m->default_access);
   2.320      if ( need_lock ) 
   2.321          p2m_unlock(p2m);
   2.322      if ( 0 == rc )
   2.323 @@ -2713,7 +2722,7 @@ int p2m_mem_paging_nominate(struct p2m_d
   2.324  
   2.325      /* Fix p2m entry */
   2.326      p2m_lock(p2m);
   2.327 -    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out);
   2.328 +    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, p2m->default_access);
   2.329      audit_p2m(p2m, 1);
   2.330      p2m_unlock(p2m);
   2.331  
   2.332 @@ -2750,7 +2759,7 @@ int p2m_mem_paging_evict(struct p2m_doma
   2.333  
   2.334      /* Remove mapping from p2m table */
   2.335      p2m_lock(p2m);
   2.336 -    set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
   2.337 +    set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged, p2m->default_access);
   2.338      audit_p2m(p2m, 1);
   2.339      p2m_unlock(p2m);
   2.340  
   2.341 @@ -2780,7 +2789,7 @@ void p2m_mem_paging_populate(struct p2m_
   2.342      if ( p2mt == p2m_ram_paged )
   2.343      {
   2.344          p2m_lock(p2m);
   2.345 -        set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
   2.346 +        set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, p2m->default_access);
   2.347          audit_p2m(p2m, 1);
   2.348          p2m_unlock(p2m);
   2.349      }
   2.350 @@ -2816,7 +2825,7 @@ int p2m_mem_paging_prep(struct p2m_domai
   2.351  
   2.352      /* Fix p2m mapping */
   2.353      p2m_lock(p2m);
   2.354 -    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
   2.355 +    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, p2m->default_access);
   2.356      audit_p2m(p2m, 1);
   2.357      p2m_unlock(p2m);
   2.358  
   2.359 @@ -2836,7 +2845,7 @@ void p2m_mem_paging_resume(struct p2m_do
   2.360      /* Fix p2m entry */
   2.361      mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
   2.362      p2m_lock(p2m);
   2.363 -    set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw);
   2.364 +    set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
   2.365      audit_p2m(p2m, 1);
   2.366      p2m_unlock(p2m);
   2.367  
     3.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Jan 06 19:02:36 2011 +0000
     3.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Fri Jan 07 11:54:36 2011 +0000
     3.3 @@ -42,7 +42,8 @@ typedef union {
     3.4          rsvd2_snp   :   1,  /* bit 11 - Used for VT-d snoop control
     3.5                                 in shared EPT/VT-d usage */
     3.6          mfn         :   40, /* bits 51:12 - Machine physical frame number */
     3.7 -        sa_p2mt     :   10, /* bits 61:52 - Software available 2 */
     3.8 +        sa_p2mt     :   6,  /* bits 57:52 - Software available 2 */
     3.9 +        access      :   4,  /* bits 61:58 - p2m_access_t */
    3.10          rsvd3_tm    :   1,  /* bit 62 - Used for VT-d transient-mapping
    3.11                                 hint in shared EPT/VT-d usage */
    3.12          avail3      :   1;  /* bit 63 - Software available 3 */
     4.1 --- a/xen/include/asm-x86/p2m.h	Thu Jan 06 19:02:36 2011 +0000
     4.2 +++ b/xen/include/asm-x86/p2m.h	Fri Jan 07 11:54:36 2011 +0000
     4.3 @@ -88,6 +88,31 @@ typedef enum {
     4.4      p2m_ram_broken  =14,          /* Broken page, access cause domain crash */
     4.5  } p2m_type_t;
     4.6  
     4.7 +/*
     4.8 + * Additional access types, which are used to further restrict
     4.9 + * the permissions given my the p2m_type_t memory type.  Violations
    4.10 + * caused by p2m_access_t restrictions are sent to the mem_event
    4.11 + * interface.
    4.12 + *
    4.13 + * The access permissions are soft state: when any ambigious change of page
    4.14 + * type or use occurs, or when pages are flushed, swapped, or at any other
    4.15 + * convenient type, the access permissions can get reset to the p2m_domain
    4.16 + * default.
    4.17 + */
    4.18 +typedef enum {
    4.19 +    p2m_access_n     = 0, /* No access permissions allowed */
    4.20 +    p2m_access_r     = 1,
    4.21 +    p2m_access_w     = 2, 
    4.22 +    p2m_access_rw    = 3,
    4.23 +    p2m_access_x     = 4, 
    4.24 +    p2m_access_rx    = 5,
    4.25 +    p2m_access_wx    = 6, 
    4.26 +    p2m_access_rwx   = 7,
    4.27 +    p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
    4.28 +
    4.29 +    /* NOTE: Assumed to be only 4 bits right now */
    4.30 +} p2m_access_t;
    4.31 +
    4.32  typedef enum {
    4.33      p2m_query = 0,              /* Do not populate a PoD entries      */
    4.34      p2m_alloc = 1,              /* Automatically populate PoD entries */
    4.35 @@ -182,18 +207,30 @@ struct p2m_domain {
    4.36      int                (*set_entry   )(struct p2m_domain *p2m,
    4.37                                         unsigned long gfn,
    4.38                                         mfn_t mfn, unsigned int page_order,
    4.39 -                                       p2m_type_t p2mt);
    4.40 +                                       p2m_type_t p2mt,
    4.41 +                                       p2m_access_t p2ma);
    4.42      mfn_t              (*get_entry   )(struct p2m_domain *p2m,
    4.43                                         unsigned long gfn,
    4.44                                         p2m_type_t *p2mt,
    4.45 +                                       p2m_access_t *p2ma,
    4.46                                         p2m_query_t q);
    4.47      mfn_t              (*get_entry_current)(struct p2m_domain *p2m,
    4.48                                              unsigned long gfn,
    4.49                                              p2m_type_t *p2mt,
    4.50 +                                            p2m_access_t *p2ma,
    4.51                                              p2m_query_t q);
    4.52      void               (*change_entry_type_global)(struct p2m_domain *p2m,
    4.53                                                     p2m_type_t ot,
    4.54                                                     p2m_type_t nt);
    4.55 +    
    4.56 +    /* Default P2M access type for each page in the the domain: new pages,
    4.57 +     * swapped in pages, cleared pages, and pages that are ambiquously
    4.58 +     * retyped get this access type.  See definition of p2m_access_t. */
    4.59 +    p2m_access_t default_access;
    4.60 +
    4.61 +    /* If true, and an access fault comes in and there is no mem_event listener, 
    4.62 +     * pause domain.  Otherwise, remove access restrictions. */
    4.63 +    bool_t       access_required;
    4.64  
    4.65      /* Highest guest frame that's ever been mapped in the p2m */
    4.66      unsigned long max_mapped_pfn;
    4.67 @@ -284,9 +321,10 @@ static inline p2m_type_t p2m_flags_to_ty
    4.68  /* Read the current domain's p2m table.  Do not populate PoD pages. */
    4.69  static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
    4.70                                              unsigned long gfn, p2m_type_t *t,
    4.71 +                                            p2m_access_t *a,
    4.72                                              p2m_query_t q)
    4.73  {
    4.74 -    return p2m->get_entry_current(p2m, gfn, t, q);
    4.75 +    return p2m->get_entry_current(p2m, gfn, t, a, q);
    4.76  }
    4.77  
    4.78  /* Read P2M table, mapping pages as we go.
    4.79 @@ -295,7 +333,8 @@ static inline mfn_t
    4.80  gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
    4.81                                p2m_type_t *t, p2m_query_t q)
    4.82  {
    4.83 -    return p2m->get_entry(p2m, gfn, t, q);
    4.84 +    p2m_access_t a = 0;
    4.85 +    return p2m->get_entry(p2m, gfn, t, &a, q);
    4.86  }
    4.87  
    4.88  
    4.89 @@ -305,6 +344,7 @@ static inline mfn_t _gfn_to_mfn_type(str
    4.90                                       p2m_query_t q)
    4.91  {
    4.92      mfn_t mfn;
    4.93 +    p2m_access_t a;
    4.94  
    4.95      if ( !p2m || !paging_mode_translate(p2m->domain) )
    4.96      {
    4.97 @@ -314,7 +354,7 @@ static inline mfn_t _gfn_to_mfn_type(str
    4.98          mfn = _mfn(gfn);
    4.99      }
   4.100      else if ( likely(current->domain == p2m->domain) )
   4.101 -        mfn = gfn_to_mfn_type_current(p2m, gfn, t, q);
   4.102 +        mfn = gfn_to_mfn_type_current(p2m, gfn, t, &a, q);
   4.103      else
   4.104          mfn = gfn_to_mfn_type_p2m(p2m, gfn, t, q);
   4.105