xen-vtx-unstable

diff xen/include/asm-x86/shadow_64.h @ 6586:291e816acbf4

merge?
author cl349@firebug.cl.cam.ac.uk
date Fri Sep 02 14:17:08 2005 +0000 (2005-09-02)
parents dd668f7527cb 8b87d43412bf
children 3feb7fa331ed 4d899a738d59 e7c7196fa329
line diff
     1.1 --- a/xen/include/asm-x86/shadow_64.h	Fri Sep 02 14:15:49 2005 +0000
     1.2 +++ b/xen/include/asm-x86/shadow_64.h	Fri Sep 02 14:17:08 2005 +0000
     1.3 @@ -27,6 +27,7 @@
     1.4  #ifndef _XEN_SHADOW_64_H
     1.5  #define _XEN_SHADOW_64_H
     1.6  #include <asm/shadow.h>
     1.7 +#include <asm/shadow_ops.h>
     1.8  
     1.9  #define READ_FAULT  0
    1.10  #define WRITE_FAULT 1
    1.11 @@ -42,14 +43,14 @@
    1.12  #define ESH_LOG(_f, _a...) ((void)0)
    1.13  #endif
    1.14  
    1.15 -#define L4      4UL
    1.16 -#define L3      3UL
    1.17 -#define L2      2UL
    1.18 -#define L1      1UL
    1.19 +#define PAGING_L4      4UL
    1.20 +#define PAGING_L3      3UL
    1.21 +#define PAGING_L2      2UL
    1.22 +#define PAGING_L1      1UL
    1.23  #define L_MASK  0xff
    1.24  
    1.25 -#define ROOT_LEVEL_64   L4
    1.26 -#define ROOT_LEVEL_32   L2
    1.27 +#define ROOT_LEVEL_64   PAGING_L4
    1.28 +#define ROOT_LEVEL_32   PAGING_L2
    1.29  
    1.30  #define SHADOW_ENTRY    (2UL << 16)
    1.31  #define GUEST_ENTRY     (1UL << 16)
    1.32 @@ -59,6 +60,10 @@
    1.33  
    1.34  #define PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
    1.35  
    1.36 +/* For 32-bit VMX guest to allocate shadow L1 & L2*/
    1.37 +#define SL1_ORDER   1
    1.38 +#define SL2_ORDER   2
    1.39 +
    1.40  typedef struct { intpte_t lo; } pgentry_64_t;
    1.41  #define shadow_level_to_type(l)    (l << 29)
    1.42  #define shadow_type_to_level(t)    (t >> 29)
    1.43 @@ -76,6 +81,10 @@ typedef struct { intpte_t lo; } pgentry_
    1.44  #define entry_remove_flags(x, flags) ((x).lo &= ~put_pte_flags(flags))
    1.45  #define entry_has_changed(x,y,flags) \
    1.46          ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
    1.47 +
    1.48 +#define PAE_SHADOW_SELF_ENTRY   259
    1.49 +#define PDP_ENTRIES   4
    1.50 +
    1.51  static inline int  table_offset_64(unsigned long va, int level)
    1.52  {
    1.53      switch(level) {
    1.54 @@ -86,8 +95,13 @@ static inline int  table_offset_64(unsig
    1.55          case 3:
    1.56              return  (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1));
    1.57  #if CONFIG_PAGING_LEVELS >= 4
    1.58 +#ifndef GUEST_PGENTRY_32
    1.59          case 4:
    1.60              return  (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
    1.61 +#else
    1.62 +        case 4:
    1.63 +            return PAE_SHADOW_SELF_ENTRY; 
    1.64 +#endif
    1.65  #endif
    1.66          default:
    1.67              //printk("<table_offset_64> level %d is too big\n", level);
    1.68 @@ -165,30 +179,30 @@ static inline pgentry_64_t *__rw_entry(
    1.69      return le_e;
    1.70  }
    1.71  #define __shadow_set_l4e(v, va, value) \
    1.72 -  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L4)
    1.73 +  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L4)
    1.74  #define __shadow_get_l4e(v, va, sl4e) \
    1.75 -  __rw_entry(v, va, sl4e, SHADOW_ENTRY | GET_ENTRY | L4)
    1.76 +  __rw_entry(v, va, sl4e, SHADOW_ENTRY | GET_ENTRY | PAGING_L4)
    1.77  #define __shadow_set_l3e(v, va, value) \
    1.78 -  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L3)
    1.79 +  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L3)
    1.80  #define __shadow_get_l3e(v, va, sl3e) \
    1.81 -  __rw_entry(v, va, sl3e, SHADOW_ENTRY | GET_ENTRY | L3)
    1.82 +  __rw_entry(v, va, sl3e, SHADOW_ENTRY | GET_ENTRY | PAGING_L3)
    1.83  #define __shadow_set_l2e(v, va, value) \
    1.84 -  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L2)
    1.85 +  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L2)
    1.86  #define __shadow_get_l2e(v, va, sl2e) \
    1.87 -  __rw_entry(v, va, sl2e, SHADOW_ENTRY | GET_ENTRY | L2)
    1.88 +  __rw_entry(v, va, sl2e, SHADOW_ENTRY | GET_ENTRY | PAGING_L2)
    1.89  #define __shadow_set_l1e(v, va, value) \
    1.90 -  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L1)
    1.91 +  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L1)
    1.92  #define __shadow_get_l1e(v, va, sl1e) \
    1.93 -  __rw_entry(v, va, sl1e, SHADOW_ENTRY | GET_ENTRY | L1)
    1.94 +  __rw_entry(v, va, sl1e, SHADOW_ENTRY | GET_ENTRY | PAGING_L1)
    1.95  
    1.96  #define __guest_set_l4e(v, va, value) \
    1.97 -  __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L4)
    1.98 +  __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L4)
    1.99  #define __guest_get_l4e(v, va, gl4e) \
   1.100 -  __rw_entry(v, va, gl4e, GUEST_ENTRY | GET_ENTRY | L4)
   1.101 +  __rw_entry(v, va, gl4e, GUEST_ENTRY | GET_ENTRY | PAGING_L4)
   1.102  #define __guest_set_l3e(v, va, value) \
   1.103 -  __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L3)
   1.104 +  __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L3)
   1.105  #define __guest_get_l3e(v, va, sl3e) \
   1.106 -  __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | L3)
   1.107 +  __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | PAGING_L3)
   1.108  
   1.109  static inline void *  __guest_set_l2e(
   1.110      struct vcpu *v, u64 va, void *value, int size)
   1.111 @@ -205,7 +219,7 @@ static inline void *  __guest_set_l2e(
   1.112                  return &l2va[l2_table_offset_32(va)];
   1.113              }
   1.114          case 8:
   1.115 -            return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L2);
   1.116 +            return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L2);
   1.117          default:
   1.118              BUG();
   1.119              return NULL;
   1.120 @@ -230,7 +244,7 @@ static inline void * __guest_get_l2e(
   1.121                  return &l2va[l2_table_offset_32(va)];
   1.122              }
   1.123          case 8:
   1.124 -            return __rw_entry(v, va, gl2e, GUEST_ENTRY | GET_ENTRY | L2);
   1.125 +            return __rw_entry(v, va, gl2e, GUEST_ENTRY | GET_ENTRY | PAGING_L2);
   1.126          default:
   1.127              BUG();
   1.128              return NULL;
   1.129 @@ -269,7 +283,7 @@ static inline void *  __guest_set_l1e(
   1.130              }
   1.131  
   1.132          case 8:
   1.133 -            return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L1);
   1.134 +            return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L1);
   1.135          default:
   1.136              BUG();
   1.137              return NULL;
   1.138 @@ -310,7 +324,7 @@ static inline void *  __guest_get_l1e(
   1.139              }
   1.140          case 8:
   1.141              // 64-bit guest
   1.142 -            return __rw_entry(v, va, gl1e, GUEST_ENTRY | GET_ENTRY | L1);
   1.143 +            return __rw_entry(v, va, gl1e, GUEST_ENTRY | GET_ENTRY | PAGING_L1);
   1.144          default:
   1.145              BUG();
   1.146              return NULL;
   1.147 @@ -334,7 +348,7 @@ static inline void entry_general(
   1.148      sle = entry_empty();
   1.149      if ( (entry_get_flags(gle) & _PAGE_PRESENT) && (smfn != 0) )
   1.150      {
   1.151 -        if ((entry_get_flags(gle) & _PAGE_PSE) && level == L2) {
   1.152 +        if ((entry_get_flags(gle) & _PAGE_PSE) && level == PAGING_L2) {
   1.153              sle = entry_from_pfn(smfn, entry_get_flags(gle));
   1.154              entry_remove_flags(sle, _PAGE_PSE);
   1.155  
   1.156 @@ -376,7 +390,7 @@ static inline void entry_propagate_from_
   1.157      unsigned long smfn = 0;
   1.158  
   1.159      if ( entry_get_flags(gle) & _PAGE_PRESENT ) {
   1.160 -        if ((entry_get_flags(gle) & _PAGE_PSE) && level == L2) {
   1.161 +        if ((entry_get_flags(gle) & _PAGE_PSE) && level == PAGING_L2) {
   1.162              smfn =  __shadow_status(d, entry_get_value(gle) >> PAGE_SHIFT, PGT_fl1_shadow);
   1.163          } else {
   1.164              smfn =  __shadow_status(d, entry_get_pfn(gle), 
   1.165 @@ -421,88 +435,6 @@ validate_entry_change(
   1.166      return 1;
   1.167  }
   1.168  
   1.169 -/*
   1.170 - * Check P, R/W, U/S bits in the guest page table.
   1.171 - * If the fault belongs to guest return 1,
   1.172 - * else return 0.
   1.173 - */
   1.174 -static inline int guest_page_fault(struct vcpu *v,
   1.175 -  unsigned long va, unsigned int error_code, pgentry_64_t *gpl2e, pgentry_64_t *gpl1e)
   1.176 -{
   1.177 -    struct domain *d = v->domain;
   1.178 -    pgentry_64_t gle, *lva;
   1.179 -    unsigned long mfn;
   1.180 -    int i;
   1.181 -
   1.182 -    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | L4);
   1.183 -    if (unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT)))
   1.184 -        return 1;
   1.185 -
   1.186 -    if (error_code & ERROR_W) {
   1.187 -        if (unlikely(!(entry_get_flags(gle) & _PAGE_RW)))
   1.188 -            return 1;
   1.189 -    }
   1.190 -    if (error_code & ERROR_U) {
   1.191 -        if (unlikely(!(entry_get_flags(gle) & _PAGE_USER)))
   1.192 -            return 1;
   1.193 -    }
   1.194 -    for (i = L3; i >= L1; i--) {
   1.195 -	/*
   1.196 -	 * If it's not external mode, then mfn should be machine physical.
   1.197 -	 */
   1.198 -	mfn = __gpfn_to_mfn(d, (entry_get_paddr(gle) >> PAGE_SHIFT));
   1.199 -        if (mfn == -1)
   1.200 -            return 1;
   1.201 -
   1.202 -        lva = (pgentry_64_t *) phys_to_virt(
   1.203 -	    mfn << PAGE_SHIFT);
   1.204 -        gle = lva[table_offset_64(va, i)];
   1.205 -
   1.206 -        if (unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT)))
   1.207 -            return 1;
   1.208 -
   1.209 -        if (error_code & ERROR_W) {
   1.210 -            if (unlikely(!(entry_get_flags(gle) & _PAGE_RW)))
   1.211 -                return 1;
   1.212 -        }
   1.213 -        if (error_code & ERROR_U) {
   1.214 -            if (unlikely(!(entry_get_flags(gle) & _PAGE_USER)))
   1.215 -                return 1;
   1.216 -        }
   1.217 -
   1.218 -        if (i == L2) {
   1.219 -            if (gpl2e)
   1.220 -                *gpl2e = gle;
   1.221 -
   1.222 -            if (likely(entry_get_flags(gle) & _PAGE_PSE))
   1.223 -                return 0;
   1.224 -
   1.225 -        }
   1.226 -
   1.227 -        if (i == L1)
   1.228 -            if (gpl1e)
   1.229 -                *gpl1e = gle;
   1.230 -    }
   1.231 -    return 0;
   1.232 -}
   1.233 -
   1.234 -static inline unsigned long gva_to_gpa(unsigned long gva)
   1.235 -{
   1.236 -    struct vcpu *v = current;
   1.237 -    pgentry_64_t gl1e = {0};
   1.238 -    pgentry_64_t gl2e = {0};
   1.239 -    unsigned long gpa;
   1.240 -
   1.241 -    if (guest_page_fault(v, gva, 0, &gl2e, &gl1e))
   1.242 -        return 0;
   1.243 -    if (entry_get_flags(gl2e) & _PAGE_PSE)
   1.244 -        gpa = entry_get_paddr(gl2e) + (gva & ((1 << L2_PAGETABLE_SHIFT) - 1));
   1.245 -    else
   1.246 -        gpa = entry_get_paddr(gl1e) + (gva & ~PAGE_MASK);
   1.247 -
   1.248 -    return gpa;
   1.249 -
   1.250 -}
   1.251  #endif
   1.252  
   1.253