xen-vtx-unstable
annotate xen/include/asm-x86/shadow_64.h @ 5726:a29b4174d39c
Remaining files for shadow 64 mode checkin.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Mon Jul 11 10:23:19 2005 +0000 (2005-07-11) |
parents | |
children | b3cfebba3b30 |
rev | line source |
---|---|
kaf24@5726 | 1 /****************************************************************************** |
kaf24@5726 | 2 * include/asm-x86/shadow_64.h |
kaf24@5726 | 3 * |
kaf24@5726 | 4 * Copyright (c) 2005 Michael A Fetterman |
kaf24@5726 | 5 * Based on an earlier implementation by Ian Pratt et al |
kaf24@5726 | 6 * |
kaf24@5726 | 7 * This program is free software; you can redistribute it and/or modify |
kaf24@5726 | 8 * it under the terms of the GNU General Public License as published by |
kaf24@5726 | 9 * the Free Software Foundation; either version 2 of the License, or |
kaf24@5726 | 10 * (at your option) any later version. |
kaf24@5726 | 11 * |
kaf24@5726 | 12 * This program is distributed in the hope that it will be useful, |
kaf24@5726 | 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
kaf24@5726 | 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
kaf24@5726 | 15 * GNU General Public License for more details. |
kaf24@5726 | 16 * |
kaf24@5726 | 17 * You should have received a copy of the GNU General Public License |
kaf24@5726 | 18 * along with this program; if not, write to the Free Software |
kaf24@5726 | 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
kaf24@5726 | 20 */ |
kaf24@5726 | 21 /* |
kaf24@5726 | 22 * Jun Nakajima <jun.nakajima@intel.com> |
kaf24@5726 | 23 * Chengyuan Li <chengyuan.li@intel.com> |
kaf24@5726 | 24 * |
kaf24@5726 | 25 * Extended to support 64-bit guests. |
kaf24@5726 | 26 */ |
kaf24@5726 | 27 #ifndef _XEN_SHADOW_64_H |
kaf24@5726 | 28 #define _XEN_SHADOW_64_H |
kaf24@5726 | 29 #include <asm/shadow.h> |
kaf24@5726 | 30 |
kaf24@5726 | 31 #define READ_FAULT 0 |
kaf24@5726 | 32 #define WRITE_FAULT 1 |
kaf24@5726 | 33 |
kaf24@5726 | 34 #define ERROR_W 2 |
kaf24@5726 | 35 #define ERROR_U 4 |
kaf24@5726 | 36 #define X86_64_SHADOW_DEBUG 0 |
kaf24@5726 | 37 |
kaf24@5726 | 38 #if X86_64_SHADOW_DEBUG |
kaf24@5726 | 39 #define ESH_LOG(_f, _a...) \ |
kaf24@5726 | 40 printk(_f, ##_a) |
kaf24@5726 | 41 #else |
kaf24@5726 | 42 #define ESH_LOG(_f, _a...) ((void)0) |
kaf24@5726 | 43 #endif |
kaf24@5726 | 44 |
kaf24@5726 | 45 #define L4 4UL |
kaf24@5726 | 46 #define L3 3UL |
kaf24@5726 | 47 #define L2 2UL |
kaf24@5726 | 48 #define L1 1UL |
kaf24@5726 | 49 #define L_MASK 0xff |
kaf24@5726 | 50 |
kaf24@5726 | 51 #define ROOT_LEVEL_64 L4 |
kaf24@5726 | 52 #define ROOT_LEVEL_32 L2 |
kaf24@5726 | 53 |
kaf24@5726 | 54 #define SHADOW_ENTRY (2UL << 16) |
kaf24@5726 | 55 #define GUEST_ENTRY (1UL << 16) |
kaf24@5726 | 56 |
kaf24@5726 | 57 #define GET_ENTRY (2UL << 8) |
kaf24@5726 | 58 #define SET_ENTRY (1UL << 8) |
kaf24@5726 | 59 |
kaf24@5726 | 60 #define PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER) |
kaf24@5726 | 61 |
kaf24@5726 | 62 typedef struct { intpte_t lo; } pgentry_64_t; |
kaf24@5726 | 63 #define shadow_level_to_type(l) (l << 29) |
kaf24@5726 | 64 #define shadow_type_to_level(t) (t >> 29) |
kaf24@5726 | 65 |
kaf24@5726 | 66 #define entry_get_value(_x) ((_x).lo) |
kaf24@5726 | 67 #define entry_get_pfn(_x) \ |
kaf24@5726 | 68 (((_x).lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT) |
kaf24@5726 | 69 #define entry_get_paddr(_x) (((_x).lo & (PADDR_MASK&PAGE_MASK))) |
kaf24@5726 | 70 #define entry_get_flags(_x) (get_pte_flags((_x).lo)) |
kaf24@5726 | 71 |
kaf24@5726 | 72 #define entry_empty() ((pgentry_64_t) { 0 }) |
kaf24@5726 | 73 #define entry_from_pfn(pfn, flags) \ |
kaf24@5726 | 74 ((pgentry_64_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) |
kaf24@5726 | 75 #define entry_add_flags(x, flags) ((x).lo |= put_pte_flags(flags)) |
kaf24@5726 | 76 #define entry_remove_flags(x, flags) ((x).lo &= ~put_pte_flags(flags)) |
kaf24@5726 | 77 #define entry_has_changed(x,y,flags) \ |
kaf24@5726 | 78 ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) |
kaf24@5726 | 79 static inline int table_offset_64(unsigned long va, int level) |
kaf24@5726 | 80 { |
kaf24@5726 | 81 switch(level) { |
kaf24@5726 | 82 case 1: |
kaf24@5726 | 83 return (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)); |
kaf24@5726 | 84 case 2: |
kaf24@5726 | 85 return (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)); |
kaf24@5726 | 86 case 3: |
kaf24@5726 | 87 return (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)); |
kaf24@5726 | 88 case 4: |
kaf24@5726 | 89 return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)); |
kaf24@5726 | 90 default: |
kaf24@5726 | 91 //printk("<table_offset_64> level %d is too big\n", level); |
kaf24@5726 | 92 return -1; |
kaf24@5726 | 93 } |
kaf24@5726 | 94 } |
kaf24@5726 | 95 |
kaf24@5726 | 96 static inline void free_out_of_sync_state(struct domain *d) |
kaf24@5726 | 97 { |
kaf24@5726 | 98 struct out_of_sync_entry *entry; |
kaf24@5726 | 99 |
kaf24@5726 | 100 // NB: Be careful not to call something that manipulates this list |
kaf24@5726 | 101 // while walking it. Remove one item at a time, and always |
kaf24@5726 | 102 // restart from start of list. |
kaf24@5726 | 103 // |
kaf24@5726 | 104 while ( (entry = d->arch.out_of_sync) ) |
kaf24@5726 | 105 { |
kaf24@5726 | 106 d->arch.out_of_sync = entry->next; |
kaf24@5726 | 107 release_out_of_sync_entry(d, entry); |
kaf24@5726 | 108 |
kaf24@5726 | 109 entry->next = d->arch.out_of_sync_free; |
kaf24@5726 | 110 d->arch.out_of_sync_free = entry; |
kaf24@5726 | 111 } |
kaf24@5726 | 112 } |
kaf24@5726 | 113 |
kaf24@5726 | 114 static inline pgentry_64_t *__entry( |
kaf24@5726 | 115 struct vcpu *v, u64 va, u32 flag) |
kaf24@5726 | 116 { |
kaf24@5726 | 117 int i; |
kaf24@5726 | 118 pgentry_64_t *le_e; |
kaf24@5726 | 119 pgentry_64_t *le_p; |
kaf24@5726 | 120 unsigned long mfn; |
kaf24@5726 | 121 int index; |
kaf24@5726 | 122 u32 level = flag & L_MASK; |
kaf24@5726 | 123 struct domain *d = v->domain; |
kaf24@5726 | 124 |
kaf24@5726 | 125 index = table_offset_64(va, ROOT_LEVEL_64); |
kaf24@5726 | 126 if (flag & SHADOW_ENTRY) |
kaf24@5726 | 127 le_e = (pgentry_64_t *)&v->arch.shadow_vtable[index]; |
kaf24@5726 | 128 else |
kaf24@5726 | 129 le_e = (pgentry_64_t *)&v->arch.guest_vtable[index]; |
kaf24@5726 | 130 |
kaf24@5726 | 131 /* |
kaf24@5726 | 132 * If it's not external mode, then mfn should be machine physical. |
kaf24@5726 | 133 */ |
kaf24@5726 | 134 for (i = ROOT_LEVEL_64 - level; i > 0; i--) { |
kaf24@5726 | 135 if (unlikely(!(entry_get_flags(*le_e) & _PAGE_PRESENT))) |
kaf24@5726 | 136 return NULL; |
kaf24@5726 | 137 mfn = entry_get_value(*le_e) >> PAGE_SHIFT; |
kaf24@5726 | 138 if ((flag & GUEST_ENTRY) && shadow_mode_translate(d)) |
kaf24@5726 | 139 mfn = phys_to_machine_mapping(mfn); |
kaf24@5726 | 140 le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT); |
kaf24@5726 | 141 index = table_offset_64(va, (level + i - 1)); |
kaf24@5726 | 142 le_e = &le_p[index]; |
kaf24@5726 | 143 |
kaf24@5726 | 144 } |
kaf24@5726 | 145 return le_e; |
kaf24@5726 | 146 |
kaf24@5726 | 147 } |
kaf24@5726 | 148 |
kaf24@5726 | 149 static inline pgentry_64_t *__rw_entry( |
kaf24@5726 | 150 struct vcpu *ed, u64 va, void *e_p, u32 flag) |
kaf24@5726 | 151 { |
kaf24@5726 | 152 pgentry_64_t *le_e = __entry(ed, va, flag); |
kaf24@5726 | 153 pgentry_64_t *e = (pgentry_64_t *)e_p; |
kaf24@5726 | 154 if (le_e == NULL) |
kaf24@5726 | 155 return NULL; |
kaf24@5726 | 156 |
kaf24@5726 | 157 if (e) { |
kaf24@5726 | 158 if (flag & SET_ENTRY) |
kaf24@5726 | 159 *le_e = *e; |
kaf24@5726 | 160 else |
kaf24@5726 | 161 *e = *le_e; |
kaf24@5726 | 162 } |
kaf24@5726 | 163 return le_e; |
kaf24@5726 | 164 } |
kaf24@5726 | 165 #define __shadow_set_l4e(v, va, value) \ |
kaf24@5726 | 166 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L4) |
kaf24@5726 | 167 #define __shadow_get_l4e(v, va, sl4e) \ |
kaf24@5726 | 168 __rw_entry(v, va, sl4e, SHADOW_ENTRY | GET_ENTRY | L4) |
kaf24@5726 | 169 #define __shadow_set_l3e(v, va, value) \ |
kaf24@5726 | 170 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L3) |
kaf24@5726 | 171 #define __shadow_get_l3e(v, va, sl3e) \ |
kaf24@5726 | 172 __rw_entry(v, va, sl3e, SHADOW_ENTRY | GET_ENTRY | L3) |
kaf24@5726 | 173 #define __shadow_set_l2e(v, va, value) \ |
kaf24@5726 | 174 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L2) |
kaf24@5726 | 175 #define __shadow_get_l2e(v, va, sl2e) \ |
kaf24@5726 | 176 __rw_entry(v, va, sl2e, SHADOW_ENTRY | GET_ENTRY | L2) |
kaf24@5726 | 177 #define __shadow_set_l1e(v, va, value) \ |
kaf24@5726 | 178 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L1) |
kaf24@5726 | 179 #define __shadow_get_l1e(v, va, sl1e) \ |
kaf24@5726 | 180 __rw_entry(v, va, sl1e, SHADOW_ENTRY | GET_ENTRY | L1) |
kaf24@5726 | 181 |
kaf24@5726 | 182 #define __guest_set_l4e(v, va, value) \ |
kaf24@5726 | 183 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L4) |
kaf24@5726 | 184 #define __guest_get_l4e(v, va, gl4e) \ |
kaf24@5726 | 185 __rw_entry(v, va, gl4e, GUEST_ENTRY | GET_ENTRY | L4) |
kaf24@5726 | 186 #define __guest_set_l3e(v, va, value) \ |
kaf24@5726 | 187 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L3) |
kaf24@5726 | 188 #define __guest_get_l3e(v, va, sl3e) \ |
kaf24@5726 | 189 __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | L3) |
kaf24@5726 | 190 |
kaf24@5726 | 191 static inline void * __guest_set_l2e( |
kaf24@5726 | 192 struct vcpu *v, u64 va, void *value, int size) |
kaf24@5726 | 193 { |
kaf24@5726 | 194 switch(size) { |
kaf24@5726 | 195 case 4: |
kaf24@5726 | 196 // 32-bit guest |
kaf24@5726 | 197 { |
kaf24@5726 | 198 l2_pgentry_32_t *l2va; |
kaf24@5726 | 199 |
kaf24@5726 | 200 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable; |
kaf24@5726 | 201 if (value) |
kaf24@5726 | 202 l2va[l2_table_offset_32(va)] = *(l2_pgentry_32_t *)value; |
kaf24@5726 | 203 return &l2va[l2_table_offset_32(va)]; |
kaf24@5726 | 204 } |
kaf24@5726 | 205 case 8: |
kaf24@5726 | 206 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L2); |
kaf24@5726 | 207 default: |
kaf24@5726 | 208 BUG(); |
kaf24@5726 | 209 return NULL; |
kaf24@5726 | 210 } |
kaf24@5726 | 211 return NULL; |
kaf24@5726 | 212 } |
kaf24@5726 | 213 |
kaf24@5726 | 214 #define __guest_set_l2e(v, va, value) \ |
kaf24@5726 | 215 ( __typeof__(value) )__guest_set_l2e(v, (u64)va, value, sizeof(*value)) |
kaf24@5726 | 216 |
kaf24@5726 | 217 static inline void * __guest_get_l2e( |
kaf24@5726 | 218 struct vcpu *v, u64 va, void *gl2e, int size) |
kaf24@5726 | 219 { |
kaf24@5726 | 220 switch(size) { |
kaf24@5726 | 221 case 4: |
kaf24@5726 | 222 // 32-bit guest |
kaf24@5726 | 223 { |
kaf24@5726 | 224 l2_pgentry_32_t *l2va; |
kaf24@5726 | 225 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable; |
kaf24@5726 | 226 if (gl2e) |
kaf24@5726 | 227 *(l2_pgentry_32_t *)gl2e = l2va[l2_table_offset_32(va)]; |
kaf24@5726 | 228 return &l2va[l2_table_offset_32(va)]; |
kaf24@5726 | 229 } |
kaf24@5726 | 230 case 8: |
kaf24@5726 | 231 return __rw_entry(v, va, gl2e, GUEST_ENTRY | GET_ENTRY | L2); |
kaf24@5726 | 232 default: |
kaf24@5726 | 233 BUG(); |
kaf24@5726 | 234 return NULL; |
kaf24@5726 | 235 } |
kaf24@5726 | 236 return NULL; |
kaf24@5726 | 237 } |
kaf24@5726 | 238 |
kaf24@5726 | 239 #define __guest_get_l2e(v, va, gl2e) \ |
kaf24@5726 | 240 (__typeof__ (gl2e))__guest_get_l2e(v, (u64)va, gl2e, sizeof(*gl2e)) |
kaf24@5726 | 241 |
kaf24@5726 | 242 static inline void * __guest_set_l1e( |
kaf24@5726 | 243 struct vcpu *v, u64 va, void *value, int size) |
kaf24@5726 | 244 { |
kaf24@5726 | 245 switch(size) { |
kaf24@5726 | 246 case 4: |
kaf24@5726 | 247 // 32-bit guest |
kaf24@5726 | 248 { |
kaf24@5726 | 249 l2_pgentry_32_t gl2e; |
kaf24@5726 | 250 l1_pgentry_32_t *l1va; |
kaf24@5726 | 251 unsigned long l1mfn; |
kaf24@5726 | 252 |
kaf24@5726 | 253 if (!__guest_get_l2e(v, va, &gl2e)) |
kaf24@5726 | 254 return NULL; |
kaf24@5726 | 255 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT))) |
kaf24@5726 | 256 return NULL; |
kaf24@5726 | 257 |
kaf24@5726 | 258 l1mfn = phys_to_machine_mapping( |
kaf24@5726 | 259 l2e_get_pfn(gl2e)); |
kaf24@5726 | 260 |
kaf24@5726 | 261 l1va = (l1_pgentry_32_t *) |
kaf24@5726 | 262 phys_to_virt(l1mfn << L1_PAGETABLE_SHIFT); |
kaf24@5726 | 263 if (value) |
kaf24@5726 | 264 l1va[l1_table_offset_32(va)] = *(l1_pgentry_32_t *)value; |
kaf24@5726 | 265 |
kaf24@5726 | 266 return &l1va[l1_table_offset_32(va)]; |
kaf24@5726 | 267 } |
kaf24@5726 | 268 |
kaf24@5726 | 269 case 8: |
kaf24@5726 | 270 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L1); |
kaf24@5726 | 271 default: |
kaf24@5726 | 272 BUG(); |
kaf24@5726 | 273 return NULL; |
kaf24@5726 | 274 } |
kaf24@5726 | 275 return NULL; |
kaf24@5726 | 276 } |
kaf24@5726 | 277 |
kaf24@5726 | 278 #define __guest_set_l1e(v, va, value) \ |
kaf24@5726 | 279 ( __typeof__(value) )__guest_set_l1e(v, (u64)va, value, sizeof(*value)) |
kaf24@5726 | 280 |
kaf24@5726 | 281 static inline void * __guest_get_l1e( |
kaf24@5726 | 282 struct vcpu *v, u64 va, void *gl1e, int size) |
kaf24@5726 | 283 { |
kaf24@5726 | 284 switch(size) { |
kaf24@5726 | 285 case 4: |
kaf24@5726 | 286 // 32-bit guest |
kaf24@5726 | 287 { |
kaf24@5726 | 288 l2_pgentry_32_t gl2e; |
kaf24@5726 | 289 l1_pgentry_32_t *l1va; |
kaf24@5726 | 290 unsigned long l1mfn; |
kaf24@5726 | 291 |
kaf24@5726 | 292 if (!(__guest_get_l2e(v, va, &gl2e))) |
kaf24@5726 | 293 return NULL; |
kaf24@5726 | 294 |
kaf24@5726 | 295 |
kaf24@5726 | 296 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT))) |
kaf24@5726 | 297 return NULL; |
kaf24@5726 | 298 |
kaf24@5726 | 299 |
kaf24@5726 | 300 l1mfn = phys_to_machine_mapping( |
kaf24@5726 | 301 l2e_get_pfn(gl2e)); |
kaf24@5726 | 302 l1va = (l1_pgentry_32_t *) phys_to_virt( |
kaf24@5726 | 303 l1mfn << L1_PAGETABLE_SHIFT); |
kaf24@5726 | 304 if (gl1e) |
kaf24@5726 | 305 *(l1_pgentry_32_t *)gl1e = l1va[l1_table_offset_32(va)]; |
kaf24@5726 | 306 |
kaf24@5726 | 307 return &l1va[l1_table_offset_32(va)]; |
kaf24@5726 | 308 } |
kaf24@5726 | 309 case 8: |
kaf24@5726 | 310 // 64-bit guest |
kaf24@5726 | 311 return __rw_entry(v, va, gl1e, GUEST_ENTRY | GET_ENTRY | L1); |
kaf24@5726 | 312 default: |
kaf24@5726 | 313 BUG(); |
kaf24@5726 | 314 return NULL; |
kaf24@5726 | 315 } |
kaf24@5726 | 316 return NULL; |
kaf24@5726 | 317 } |
kaf24@5726 | 318 |
kaf24@5726 | 319 #define __guest_get_l1e(v, va, gl1e) \ |
kaf24@5726 | 320 ( __typeof__(gl1e) )__guest_get_l1e(v, (u64)va, gl1e, sizeof(*gl1e)) |
kaf24@5726 | 321 |
kaf24@5726 | 322 static inline void entry_general( |
kaf24@5726 | 323 struct domain *d, |
kaf24@5726 | 324 pgentry_64_t *gle_p, |
kaf24@5726 | 325 pgentry_64_t *sle_p, |
kaf24@5726 | 326 unsigned long smfn, u32 level) |
kaf24@5726 | 327 |
kaf24@5726 | 328 { |
kaf24@5726 | 329 pgentry_64_t gle = *gle_p; |
kaf24@5726 | 330 pgentry_64_t sle; |
kaf24@5726 | 331 |
kaf24@5726 | 332 sle = entry_empty(); |
kaf24@5726 | 333 if ( (entry_get_flags(gle) & _PAGE_PRESENT) && (smfn != 0) ) |
kaf24@5726 | 334 { |
kaf24@5726 | 335 if ((entry_get_flags(gle) & _PAGE_PSE) && level == L2) { |
kaf24@5726 | 336 sle = entry_from_pfn(smfn, entry_get_flags(gle)); |
kaf24@5726 | 337 entry_remove_flags(sle, _PAGE_PSE); |
kaf24@5726 | 338 |
kaf24@5726 | 339 if ( shadow_mode_log_dirty(d) || |
kaf24@5726 | 340 !(entry_get_flags(gle) & _PAGE_DIRTY) ) |
kaf24@5726 | 341 { |
kaf24@5726 | 342 pgentry_64_t *l1_p; |
kaf24@5726 | 343 int i; |
kaf24@5726 | 344 |
kaf24@5726 | 345 l1_p =(pgentry_64_t *)map_domain_page(smfn); |
kaf24@5726 | 346 for (i = 0; i < L1_PAGETABLE_ENTRIES; i++) |
kaf24@5726 | 347 entry_remove_flags(l1_p[i], _PAGE_RW); |
kaf24@5726 | 348 |
kaf24@5726 | 349 unmap_domain_page(l1_p); |
kaf24@5726 | 350 } |
kaf24@5726 | 351 } else { |
kaf24@5726 | 352 sle = entry_from_pfn(smfn, |
kaf24@5726 | 353 (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL); |
kaf24@5726 | 354 entry_add_flags(gle, _PAGE_ACCESSED); |
kaf24@5726 | 355 } |
kaf24@5726 | 356 // XXX mafetter: Hmm... |
kaf24@5726 | 357 // Shouldn't the dirty log be checked/updated here? |
kaf24@5726 | 358 // Actually, it needs to be done in this function's callers. |
kaf24@5726 | 359 // |
kaf24@5726 | 360 *gle_p = gle; |
kaf24@5726 | 361 } |
kaf24@5726 | 362 |
kaf24@5726 | 363 if ( entry_get_value(sle) || entry_get_value(gle) ) |
kaf24@5726 | 364 SH_VVLOG("%s: gpde=%lx, new spde=%lx", __func__, |
kaf24@5726 | 365 entry_get_value(gle), entry_get_value(sle)); |
kaf24@5726 | 366 |
kaf24@5726 | 367 *sle_p = sle; |
kaf24@5726 | 368 } |
kaf24@5726 | 369 |
kaf24@5726 | 370 static inline void entry_propagate_from_guest( |
kaf24@5726 | 371 struct domain *d, pgentry_64_t *gle_p, pgentry_64_t *sle_p, u32 level) |
kaf24@5726 | 372 { |
kaf24@5726 | 373 pgentry_64_t gle = *gle_p; |
kaf24@5726 | 374 unsigned long smfn = 0; |
kaf24@5726 | 375 |
kaf24@5726 | 376 if ( entry_get_flags(gle) & _PAGE_PRESENT ) { |
kaf24@5726 | 377 if ((entry_get_flags(gle) & _PAGE_PSE) && level == L2) { |
kaf24@5726 | 378 smfn = __shadow_status(d, entry_get_value(gle) >> PAGE_SHIFT, PGT_fl1_shadow); |
kaf24@5726 | 379 } else { |
kaf24@5726 | 380 smfn = __shadow_status(d, entry_get_pfn(gle), |
kaf24@5726 | 381 shadow_level_to_type((level -1 ))); |
kaf24@5726 | 382 } |
kaf24@5726 | 383 } |
kaf24@5726 | 384 entry_general(d, gle_p, sle_p, smfn, level); |
kaf24@5726 | 385 |
kaf24@5726 | 386 } |
kaf24@5726 | 387 |
kaf24@5726 | 388 static int inline |
kaf24@5726 | 389 validate_entry_change( |
kaf24@5726 | 390 struct domain *d, |
kaf24@5726 | 391 pgentry_64_t *new_gle_p, |
kaf24@5726 | 392 pgentry_64_t *shadow_le_p, |
kaf24@5726 | 393 u32 level) |
kaf24@5726 | 394 { |
kaf24@5726 | 395 pgentry_64_t old_sle, new_sle; |
kaf24@5726 | 396 pgentry_64_t new_gle = *new_gle_p; |
kaf24@5726 | 397 |
kaf24@5726 | 398 old_sle = *shadow_le_p; |
kaf24@5726 | 399 entry_propagate_from_guest(d, &new_gle, &new_sle, level); |
kaf24@5726 | 400 |
kaf24@5726 | 401 ESH_LOG("old_sle: %lx, new_gle: %lx, new_sle: %lx\n", |
kaf24@5726 | 402 entry_get_value(old_sle), entry_get_value(new_gle), |
kaf24@5726 | 403 entry_get_value(new_sle)); |
kaf24@5726 | 404 |
kaf24@5726 | 405 if ( ((entry_get_value(old_sle) | entry_get_value(new_sle)) & _PAGE_PRESENT) && |
kaf24@5726 | 406 entry_has_changed(old_sle, new_sle, _PAGE_PRESENT) ) |
kaf24@5726 | 407 { |
kaf24@5726 | 408 perfc_incrc(validate_entry_changes); |
kaf24@5726 | 409 |
kaf24@5726 | 410 if ( (entry_get_flags(new_sle) & _PAGE_PRESENT) && |
kaf24@5726 | 411 !get_shadow_ref(entry_get_pfn(new_sle)) ) |
kaf24@5726 | 412 BUG(); |
kaf24@5726 | 413 if ( entry_get_flags(old_sle) & _PAGE_PRESENT ) |
kaf24@5726 | 414 put_shadow_ref(entry_get_pfn(old_sle)); |
kaf24@5726 | 415 } |
kaf24@5726 | 416 |
kaf24@5726 | 417 *shadow_le_p = new_sle; |
kaf24@5726 | 418 |
kaf24@5726 | 419 return 1; |
kaf24@5726 | 420 } |
kaf24@5726 | 421 |
kaf24@5726 | 422 /* |
kaf24@5726 | 423 * Check P, R/W, U/S bits in the guest page table. |
kaf24@5726 | 424 * If the fault belongs to guest return 1, |
kaf24@5726 | 425 * else return 0. |
kaf24@5726 | 426 */ |
kaf24@5726 | 427 static inline int guest_page_fault(struct vcpu *v, |
kaf24@5726 | 428 unsigned long va, unsigned int error_code, pgentry_64_t *gpl2e, pgentry_64_t *gpl1e) |
kaf24@5726 | 429 { |
kaf24@5726 | 430 struct domain *d = v->domain; |
kaf24@5726 | 431 pgentry_64_t gle, *lva; |
kaf24@5726 | 432 unsigned long mfn; |
kaf24@5726 | 433 int i; |
kaf24@5726 | 434 |
kaf24@5726 | 435 __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | L4); |
kaf24@5726 | 436 if (unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT))) |
kaf24@5726 | 437 return 1; |
kaf24@5726 | 438 |
kaf24@5726 | 439 if (error_code & ERROR_W) { |
kaf24@5726 | 440 if (unlikely(!(entry_get_flags(gle) & _PAGE_RW))) |
kaf24@5726 | 441 return 1; |
kaf24@5726 | 442 } |
kaf24@5726 | 443 if (error_code & ERROR_U) { |
kaf24@5726 | 444 if (unlikely(!(entry_get_flags(gle) & _PAGE_USER))) |
kaf24@5726 | 445 return 1; |
kaf24@5726 | 446 } |
kaf24@5726 | 447 for (i = L3; i >= L1; i--) { |
kaf24@5726 | 448 /* |
kaf24@5726 | 449 * If it's not external mode, then mfn should be machine physical. |
kaf24@5726 | 450 */ |
kaf24@5726 | 451 mfn = __gpfn_to_mfn(d, (entry_get_value(gle) >> PAGE_SHIFT)); |
kaf24@5726 | 452 |
kaf24@5726 | 453 lva = (pgentry_64_t *) phys_to_virt( |
kaf24@5726 | 454 mfn << PAGE_SHIFT); |
kaf24@5726 | 455 gle = lva[table_offset_64(va, i)]; |
kaf24@5726 | 456 |
kaf24@5726 | 457 if (unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT))) |
kaf24@5726 | 458 return 1; |
kaf24@5726 | 459 |
kaf24@5726 | 460 if (error_code & ERROR_W) { |
kaf24@5726 | 461 if (unlikely(!(entry_get_flags(gle) & _PAGE_RW))) |
kaf24@5726 | 462 return 1; |
kaf24@5726 | 463 } |
kaf24@5726 | 464 if (error_code & ERROR_U) { |
kaf24@5726 | 465 if (unlikely(!(entry_get_flags(gle) & _PAGE_USER))) |
kaf24@5726 | 466 return 1; |
kaf24@5726 | 467 } |
kaf24@5726 | 468 |
kaf24@5726 | 469 if (i == L2) { |
kaf24@5726 | 470 if (gpl2e) |
kaf24@5726 | 471 *gpl2e = gle; |
kaf24@5726 | 472 |
kaf24@5726 | 473 if (likely(entry_get_flags(gle) & _PAGE_PSE)) |
kaf24@5726 | 474 return 0; |
kaf24@5726 | 475 |
kaf24@5726 | 476 } |
kaf24@5726 | 477 |
kaf24@5726 | 478 if (i == L1) |
kaf24@5726 | 479 if (gpl1e) |
kaf24@5726 | 480 *gpl1e = gle; |
kaf24@5726 | 481 } |
kaf24@5726 | 482 return 0; |
kaf24@5726 | 483 } |
kaf24@5726 | 484 |
kaf24@5726 | 485 static inline unsigned long gva_to_gpa(unsigned long gva) |
kaf24@5726 | 486 { |
kaf24@5726 | 487 struct vcpu *v = current; |
kaf24@5726 | 488 pgentry_64_t gl1e; |
kaf24@5726 | 489 pgentry_64_t gl2e; |
kaf24@5726 | 490 unsigned long gpa; |
kaf24@5726 | 491 |
kaf24@5726 | 492 if (guest_page_fault(v, gva, 0, &gl2e, &gl1e)) |
kaf24@5726 | 493 return 0; |
kaf24@5726 | 494 if (entry_get_flags(gl2e) & _PAGE_PSE) |
kaf24@5726 | 495 gpa = entry_get_paddr(gl2e) + (gva & ((1 << L2_PAGETABLE_SHIFT) - 1)); |
kaf24@5726 | 496 else |
kaf24@5726 | 497 gpa = entry_get_paddr(gl1e) + (gva & ~PAGE_MASK); |
kaf24@5726 | 498 |
kaf24@5726 | 499 return gpa; |
kaf24@5726 | 500 |
kaf24@5726 | 501 } |
kaf24@5726 | 502 #endif |
kaf24@5726 | 503 |
kaf24@5726 | 504 |