xen-vtx-unstable

annotate xen/include/asm-x86/shadow_64.h @ 5909:691cd6f65739

Really just basic preparation: switch over PAE builds to the new
shadow code, drop old dummy functions, add (fewer) new ones.
author kaf24@firebug.cl.cam.ac.uk
date Fri Jul 29 10:23:07 2005 +0000 (2005-07-29)
parents b3cfebba3b30
children b53a65034532 1efe6f4163ee e173a853dc46 d4fd332df775 04dfb5158f3a f294acb25858
rev   line source
kaf24@5726 1 /******************************************************************************
kaf24@5726 2 * include/asm-x86/shadow_64.h
kaf24@5726 3 *
kaf24@5726 4 * Copyright (c) 2005 Michael A Fetterman
kaf24@5726 5 * Based on an earlier implementation by Ian Pratt et al
kaf24@5726 6 *
kaf24@5726 7 * This program is free software; you can redistribute it and/or modify
kaf24@5726 8 * it under the terms of the GNU General Public License as published by
kaf24@5726 9 * the Free Software Foundation; either version 2 of the License, or
kaf24@5726 10 * (at your option) any later version.
kaf24@5726 11 *
kaf24@5726 12 * This program is distributed in the hope that it will be useful,
kaf24@5726 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@5726 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@5726 15 * GNU General Public License for more details.
kaf24@5726 16 *
kaf24@5726 17 * You should have received a copy of the GNU General Public License
kaf24@5726 18 * along with this program; if not, write to the Free Software
kaf24@5726 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
kaf24@5726 20 */
kaf24@5726 21 /*
kaf24@5726 22 * Jun Nakajima <jun.nakajima@intel.com>
kaf24@5726 23 * Chengyuan Li <chengyuan.li@intel.com>
kaf24@5726 24 *
kaf24@5726 25 * Extended to support 64-bit guests.
kaf24@5726 26 */
kaf24@5726 27 #ifndef _XEN_SHADOW_64_H
kaf24@5726 28 #define _XEN_SHADOW_64_H
kaf24@5726 29 #include <asm/shadow.h>
kaf24@5726 30
kaf24@5726 31 #define READ_FAULT 0
kaf24@5726 32 #define WRITE_FAULT 1
kaf24@5726 33
kaf24@5726 34 #define ERROR_W 2
kaf24@5726 35 #define ERROR_U 4
kaf24@5726 36 #define X86_64_SHADOW_DEBUG 0
kaf24@5726 37
kaf24@5726 38 #if X86_64_SHADOW_DEBUG
kaf24@5726 39 #define ESH_LOG(_f, _a...) \
kaf24@5726 40 printk(_f, ##_a)
kaf24@5726 41 #else
kaf24@5726 42 #define ESH_LOG(_f, _a...) ((void)0)
kaf24@5726 43 #endif
kaf24@5726 44
kaf24@5726 45 #define L4 4UL
kaf24@5726 46 #define L3 3UL
kaf24@5726 47 #define L2 2UL
kaf24@5726 48 #define L1 1UL
kaf24@5726 49 #define L_MASK 0xff
kaf24@5726 50
kaf24@5726 51 #define ROOT_LEVEL_64 L4
kaf24@5726 52 #define ROOT_LEVEL_32 L2
kaf24@5726 53
kaf24@5726 54 #define SHADOW_ENTRY (2UL << 16)
kaf24@5726 55 #define GUEST_ENTRY (1UL << 16)
kaf24@5726 56
kaf24@5726 57 #define GET_ENTRY (2UL << 8)
kaf24@5726 58 #define SET_ENTRY (1UL << 8)
kaf24@5726 59
kaf24@5726 60 #define PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
kaf24@5726 61
kaf24@5726 62 typedef struct { intpte_t lo; } pgentry_64_t;
kaf24@5726 63 #define shadow_level_to_type(l) (l << 29)
kaf24@5726 64 #define shadow_type_to_level(t) (t >> 29)
kaf24@5726 65
kaf24@5726 66 #define entry_get_value(_x) ((_x).lo)
kaf24@5726 67 #define entry_get_pfn(_x) \
kaf24@5726 68 (((_x).lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)
kaf24@5726 69 #define entry_get_paddr(_x) (((_x).lo & (PADDR_MASK&PAGE_MASK)))
kaf24@5726 70 #define entry_get_flags(_x) (get_pte_flags((_x).lo))
kaf24@5726 71
kaf24@5726 72 #define entry_empty() ((pgentry_64_t) { 0 })
kaf24@5726 73 #define entry_from_pfn(pfn, flags) \
kaf24@5726 74 ((pgentry_64_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
kaf24@5726 75 #define entry_add_flags(x, flags) ((x).lo |= put_pte_flags(flags))
kaf24@5726 76 #define entry_remove_flags(x, flags) ((x).lo &= ~put_pte_flags(flags))
kaf24@5726 77 #define entry_has_changed(x,y,flags) \
kaf24@5726 78 ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
kaf24@5726 79 static inline int table_offset_64(unsigned long va, int level)
kaf24@5726 80 {
kaf24@5726 81 switch(level) {
kaf24@5726 82 case 1:
kaf24@5726 83 return (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1));
kaf24@5726 84 case 2:
kaf24@5726 85 return (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1));
kaf24@5726 86 case 3:
kaf24@5726 87 return (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1));
kaf24@5909 88 #if CONFIG_PAGING_LEVELS >= 4
kaf24@5726 89 case 4:
kaf24@5726 90 return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
kaf24@5909 91 #endif
kaf24@5726 92 default:
kaf24@5726 93 //printk("<table_offset_64> level %d is too big\n", level);
kaf24@5726 94 return -1;
kaf24@5726 95 }
kaf24@5726 96 }
kaf24@5726 97
kaf24@5726 98 static inline void free_out_of_sync_state(struct domain *d)
kaf24@5726 99 {
kaf24@5726 100 struct out_of_sync_entry *entry;
kaf24@5726 101
kaf24@5726 102 // NB: Be careful not to call something that manipulates this list
kaf24@5726 103 // while walking it. Remove one item at a time, and always
kaf24@5726 104 // restart from start of list.
kaf24@5726 105 //
kaf24@5726 106 while ( (entry = d->arch.out_of_sync) )
kaf24@5726 107 {
kaf24@5726 108 d->arch.out_of_sync = entry->next;
kaf24@5726 109 release_out_of_sync_entry(d, entry);
kaf24@5726 110
kaf24@5726 111 entry->next = d->arch.out_of_sync_free;
kaf24@5726 112 d->arch.out_of_sync_free = entry;
kaf24@5726 113 }
kaf24@5726 114 }
kaf24@5726 115
kaf24@5726 116 static inline pgentry_64_t *__entry(
kaf24@5726 117 struct vcpu *v, u64 va, u32 flag)
kaf24@5726 118 {
kaf24@5726 119 int i;
kaf24@5726 120 pgentry_64_t *le_e;
kaf24@5726 121 pgentry_64_t *le_p;
kaf24@5726 122 unsigned long mfn;
kaf24@5726 123 int index;
kaf24@5726 124 u32 level = flag & L_MASK;
kaf24@5726 125 struct domain *d = v->domain;
kaf24@5726 126
kaf24@5726 127 index = table_offset_64(va, ROOT_LEVEL_64);
kaf24@5726 128 if (flag & SHADOW_ENTRY)
kaf24@5726 129 le_e = (pgentry_64_t *)&v->arch.shadow_vtable[index];
kaf24@5726 130 else
kaf24@5726 131 le_e = (pgentry_64_t *)&v->arch.guest_vtable[index];
kaf24@5726 132
kaf24@5726 133 /*
kaf24@5726 134 * If it's not external mode, then mfn should be machine physical.
kaf24@5726 135 */
kaf24@5726 136 for (i = ROOT_LEVEL_64 - level; i > 0; i--) {
kaf24@5726 137 if (unlikely(!(entry_get_flags(*le_e) & _PAGE_PRESENT)))
kaf24@5726 138 return NULL;
kaf24@5726 139 mfn = entry_get_value(*le_e) >> PAGE_SHIFT;
kaf24@5726 140 if ((flag & GUEST_ENTRY) && shadow_mode_translate(d))
kaf24@5726 141 mfn = phys_to_machine_mapping(mfn);
kaf24@5726 142 le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT);
kaf24@5726 143 index = table_offset_64(va, (level + i - 1));
kaf24@5726 144 le_e = &le_p[index];
kaf24@5726 145
kaf24@5726 146 }
kaf24@5726 147 return le_e;
kaf24@5726 148
kaf24@5726 149 }
kaf24@5726 150
kaf24@5726 151 static inline pgentry_64_t *__rw_entry(
kaf24@5726 152 struct vcpu *ed, u64 va, void *e_p, u32 flag)
kaf24@5726 153 {
kaf24@5726 154 pgentry_64_t *le_e = __entry(ed, va, flag);
kaf24@5726 155 pgentry_64_t *e = (pgentry_64_t *)e_p;
kaf24@5726 156 if (le_e == NULL)
kaf24@5726 157 return NULL;
kaf24@5726 158
kaf24@5726 159 if (e) {
kaf24@5726 160 if (flag & SET_ENTRY)
kaf24@5726 161 *le_e = *e;
kaf24@5726 162 else
kaf24@5726 163 *e = *le_e;
kaf24@5726 164 }
kaf24@5726 165 return le_e;
kaf24@5726 166 }
kaf24@5726 167 #define __shadow_set_l4e(v, va, value) \
kaf24@5726 168 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L4)
kaf24@5726 169 #define __shadow_get_l4e(v, va, sl4e) \
kaf24@5726 170 __rw_entry(v, va, sl4e, SHADOW_ENTRY | GET_ENTRY | L4)
kaf24@5726 171 #define __shadow_set_l3e(v, va, value) \
kaf24@5726 172 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L3)
kaf24@5726 173 #define __shadow_get_l3e(v, va, sl3e) \
kaf24@5726 174 __rw_entry(v, va, sl3e, SHADOW_ENTRY | GET_ENTRY | L3)
kaf24@5726 175 #define __shadow_set_l2e(v, va, value) \
kaf24@5726 176 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L2)
kaf24@5726 177 #define __shadow_get_l2e(v, va, sl2e) \
kaf24@5726 178 __rw_entry(v, va, sl2e, SHADOW_ENTRY | GET_ENTRY | L2)
kaf24@5726 179 #define __shadow_set_l1e(v, va, value) \
kaf24@5726 180 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L1)
kaf24@5726 181 #define __shadow_get_l1e(v, va, sl1e) \
kaf24@5726 182 __rw_entry(v, va, sl1e, SHADOW_ENTRY | GET_ENTRY | L1)
kaf24@5726 183
kaf24@5726 184 #define __guest_set_l4e(v, va, value) \
kaf24@5726 185 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L4)
kaf24@5726 186 #define __guest_get_l4e(v, va, gl4e) \
kaf24@5726 187 __rw_entry(v, va, gl4e, GUEST_ENTRY | GET_ENTRY | L4)
kaf24@5726 188 #define __guest_set_l3e(v, va, value) \
kaf24@5726 189 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L3)
kaf24@5726 190 #define __guest_get_l3e(v, va, sl3e) \
kaf24@5726 191 __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | L3)
kaf24@5726 192
kaf24@5726 193 static inline void * __guest_set_l2e(
kaf24@5726 194 struct vcpu *v, u64 va, void *value, int size)
kaf24@5726 195 {
kaf24@5726 196 switch(size) {
kaf24@5726 197 case 4:
kaf24@5726 198 // 32-bit guest
kaf24@5726 199 {
kaf24@5726 200 l2_pgentry_32_t *l2va;
kaf24@5726 201
kaf24@5726 202 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable;
kaf24@5726 203 if (value)
kaf24@5726 204 l2va[l2_table_offset_32(va)] = *(l2_pgentry_32_t *)value;
kaf24@5726 205 return &l2va[l2_table_offset_32(va)];
kaf24@5726 206 }
kaf24@5726 207 case 8:
kaf24@5726 208 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L2);
kaf24@5726 209 default:
kaf24@5726 210 BUG();
kaf24@5726 211 return NULL;
kaf24@5726 212 }
kaf24@5726 213 return NULL;
kaf24@5726 214 }
kaf24@5726 215
kaf24@5726 216 #define __guest_set_l2e(v, va, value) \
kaf24@5726 217 ( __typeof__(value) )__guest_set_l2e(v, (u64)va, value, sizeof(*value))
kaf24@5726 218
kaf24@5726 219 static inline void * __guest_get_l2e(
kaf24@5726 220 struct vcpu *v, u64 va, void *gl2e, int size)
kaf24@5726 221 {
kaf24@5726 222 switch(size) {
kaf24@5726 223 case 4:
kaf24@5726 224 // 32-bit guest
kaf24@5726 225 {
kaf24@5726 226 l2_pgentry_32_t *l2va;
kaf24@5726 227 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable;
kaf24@5726 228 if (gl2e)
kaf24@5726 229 *(l2_pgentry_32_t *)gl2e = l2va[l2_table_offset_32(va)];
kaf24@5726 230 return &l2va[l2_table_offset_32(va)];
kaf24@5726 231 }
kaf24@5726 232 case 8:
kaf24@5726 233 return __rw_entry(v, va, gl2e, GUEST_ENTRY | GET_ENTRY | L2);
kaf24@5726 234 default:
kaf24@5726 235 BUG();
kaf24@5726 236 return NULL;
kaf24@5726 237 }
kaf24@5726 238 return NULL;
kaf24@5726 239 }
kaf24@5726 240
kaf24@5726 241 #define __guest_get_l2e(v, va, gl2e) \
kaf24@5726 242 (__typeof__ (gl2e))__guest_get_l2e(v, (u64)va, gl2e, sizeof(*gl2e))
kaf24@5726 243
kaf24@5726 244 static inline void * __guest_set_l1e(
kaf24@5726 245 struct vcpu *v, u64 va, void *value, int size)
kaf24@5726 246 {
kaf24@5726 247 switch(size) {
kaf24@5726 248 case 4:
kaf24@5726 249 // 32-bit guest
kaf24@5726 250 {
kaf24@5726 251 l2_pgentry_32_t gl2e;
kaf24@5726 252 l1_pgentry_32_t *l1va;
kaf24@5726 253 unsigned long l1mfn;
kaf24@5726 254
kaf24@5726 255 if (!__guest_get_l2e(v, va, &gl2e))
kaf24@5726 256 return NULL;
kaf24@5726 257 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
kaf24@5726 258 return NULL;
kaf24@5726 259
kaf24@5726 260 l1mfn = phys_to_machine_mapping(
kaf24@5726 261 l2e_get_pfn(gl2e));
kaf24@5726 262
kaf24@5726 263 l1va = (l1_pgentry_32_t *)
kaf24@5726 264 phys_to_virt(l1mfn << L1_PAGETABLE_SHIFT);
kaf24@5726 265 if (value)
kaf24@5726 266 l1va[l1_table_offset_32(va)] = *(l1_pgentry_32_t *)value;
kaf24@5726 267
kaf24@5726 268 return &l1va[l1_table_offset_32(va)];
kaf24@5726 269 }
kaf24@5726 270
kaf24@5726 271 case 8:
kaf24@5726 272 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L1);
kaf24@5726 273 default:
kaf24@5726 274 BUG();
kaf24@5726 275 return NULL;
kaf24@5726 276 }
kaf24@5726 277 return NULL;
kaf24@5726 278 }
kaf24@5726 279
kaf24@5726 280 #define __guest_set_l1e(v, va, value) \
kaf24@5726 281 ( __typeof__(value) )__guest_set_l1e(v, (u64)va, value, sizeof(*value))
kaf24@5726 282
kaf24@5726 283 static inline void * __guest_get_l1e(
kaf24@5726 284 struct vcpu *v, u64 va, void *gl1e, int size)
kaf24@5726 285 {
kaf24@5726 286 switch(size) {
kaf24@5726 287 case 4:
kaf24@5726 288 // 32-bit guest
kaf24@5726 289 {
kaf24@5726 290 l2_pgentry_32_t gl2e;
kaf24@5726 291 l1_pgentry_32_t *l1va;
kaf24@5726 292 unsigned long l1mfn;
kaf24@5726 293
kaf24@5726 294 if (!(__guest_get_l2e(v, va, &gl2e)))
kaf24@5726 295 return NULL;
kaf24@5726 296
kaf24@5726 297
kaf24@5726 298 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
kaf24@5726 299 return NULL;
kaf24@5726 300
kaf24@5726 301
kaf24@5726 302 l1mfn = phys_to_machine_mapping(
kaf24@5726 303 l2e_get_pfn(gl2e));
kaf24@5726 304 l1va = (l1_pgentry_32_t *) phys_to_virt(
kaf24@5726 305 l1mfn << L1_PAGETABLE_SHIFT);
kaf24@5726 306 if (gl1e)
kaf24@5726 307 *(l1_pgentry_32_t *)gl1e = l1va[l1_table_offset_32(va)];
kaf24@5726 308
kaf24@5726 309 return &l1va[l1_table_offset_32(va)];
kaf24@5726 310 }
kaf24@5726 311 case 8:
kaf24@5726 312 // 64-bit guest
kaf24@5726 313 return __rw_entry(v, va, gl1e, GUEST_ENTRY | GET_ENTRY | L1);
kaf24@5726 314 default:
kaf24@5726 315 BUG();
kaf24@5726 316 return NULL;
kaf24@5726 317 }
kaf24@5726 318 return NULL;
kaf24@5726 319 }
kaf24@5726 320
kaf24@5726 321 #define __guest_get_l1e(v, va, gl1e) \
kaf24@5726 322 ( __typeof__(gl1e) )__guest_get_l1e(v, (u64)va, gl1e, sizeof(*gl1e))
kaf24@5726 323
kaf24@5726 324 static inline void entry_general(
kaf24@5726 325 struct domain *d,
kaf24@5726 326 pgentry_64_t *gle_p,
kaf24@5726 327 pgentry_64_t *sle_p,
kaf24@5726 328 unsigned long smfn, u32 level)
kaf24@5726 329
kaf24@5726 330 {
kaf24@5726 331 pgentry_64_t gle = *gle_p;
kaf24@5726 332 pgentry_64_t sle;
kaf24@5726 333
kaf24@5726 334 sle = entry_empty();
kaf24@5726 335 if ( (entry_get_flags(gle) & _PAGE_PRESENT) && (smfn != 0) )
kaf24@5726 336 {
kaf24@5726 337 if ((entry_get_flags(gle) & _PAGE_PSE) && level == L2) {
kaf24@5726 338 sle = entry_from_pfn(smfn, entry_get_flags(gle));
kaf24@5726 339 entry_remove_flags(sle, _PAGE_PSE);
kaf24@5726 340
kaf24@5726 341 if ( shadow_mode_log_dirty(d) ||
kaf24@5726 342 !(entry_get_flags(gle) & _PAGE_DIRTY) )
kaf24@5726 343 {
kaf24@5726 344 pgentry_64_t *l1_p;
kaf24@5726 345 int i;
kaf24@5726 346
kaf24@5726 347 l1_p =(pgentry_64_t *)map_domain_page(smfn);
kaf24@5726 348 for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
kaf24@5726 349 entry_remove_flags(l1_p[i], _PAGE_RW);
kaf24@5726 350
kaf24@5726 351 unmap_domain_page(l1_p);
kaf24@5726 352 }
kaf24@5726 353 } else {
kaf24@5726 354 sle = entry_from_pfn(smfn,
kaf24@5726 355 (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL);
kaf24@5726 356 entry_add_flags(gle, _PAGE_ACCESSED);
kaf24@5726 357 }
kaf24@5726 358 // XXX mafetter: Hmm...
kaf24@5726 359 // Shouldn't the dirty log be checked/updated here?
kaf24@5726 360 // Actually, it needs to be done in this function's callers.
kaf24@5726 361 //
kaf24@5726 362 *gle_p = gle;
kaf24@5726 363 }
kaf24@5726 364
kaf24@5726 365 if ( entry_get_value(sle) || entry_get_value(gle) )
kaf24@5726 366 SH_VVLOG("%s: gpde=%lx, new spde=%lx", __func__,
kaf24@5726 367 entry_get_value(gle), entry_get_value(sle));
kaf24@5726 368
kaf24@5726 369 *sle_p = sle;
kaf24@5726 370 }
kaf24@5726 371
kaf24@5726 372 static inline void entry_propagate_from_guest(
kaf24@5726 373 struct domain *d, pgentry_64_t *gle_p, pgentry_64_t *sle_p, u32 level)
kaf24@5726 374 {
kaf24@5726 375 pgentry_64_t gle = *gle_p;
kaf24@5726 376 unsigned long smfn = 0;
kaf24@5726 377
kaf24@5726 378 if ( entry_get_flags(gle) & _PAGE_PRESENT ) {
kaf24@5726 379 if ((entry_get_flags(gle) & _PAGE_PSE) && level == L2) {
kaf24@5726 380 smfn = __shadow_status(d, entry_get_value(gle) >> PAGE_SHIFT, PGT_fl1_shadow);
kaf24@5726 381 } else {
kaf24@5726 382 smfn = __shadow_status(d, entry_get_pfn(gle),
kaf24@5726 383 shadow_level_to_type((level -1 )));
kaf24@5726 384 }
kaf24@5726 385 }
kaf24@5726 386 entry_general(d, gle_p, sle_p, smfn, level);
kaf24@5726 387
kaf24@5726 388 }
kaf24@5726 389
kaf24@5726 390 static int inline
kaf24@5726 391 validate_entry_change(
kaf24@5726 392 struct domain *d,
kaf24@5726 393 pgentry_64_t *new_gle_p,
kaf24@5726 394 pgentry_64_t *shadow_le_p,
kaf24@5726 395 u32 level)
kaf24@5726 396 {
kaf24@5726 397 pgentry_64_t old_sle, new_sle;
kaf24@5726 398 pgentry_64_t new_gle = *new_gle_p;
kaf24@5726 399
kaf24@5726 400 old_sle = *shadow_le_p;
kaf24@5726 401 entry_propagate_from_guest(d, &new_gle, &new_sle, level);
kaf24@5726 402
kaf24@5726 403 ESH_LOG("old_sle: %lx, new_gle: %lx, new_sle: %lx\n",
kaf24@5726 404 entry_get_value(old_sle), entry_get_value(new_gle),
kaf24@5726 405 entry_get_value(new_sle));
kaf24@5726 406
kaf24@5726 407 if ( ((entry_get_value(old_sle) | entry_get_value(new_sle)) & _PAGE_PRESENT) &&
kaf24@5726 408 entry_has_changed(old_sle, new_sle, _PAGE_PRESENT) )
kaf24@5726 409 {
kaf24@5726 410 perfc_incrc(validate_entry_changes);
kaf24@5726 411
kaf24@5726 412 if ( (entry_get_flags(new_sle) & _PAGE_PRESENT) &&
kaf24@5726 413 !get_shadow_ref(entry_get_pfn(new_sle)) )
kaf24@5726 414 BUG();
kaf24@5726 415 if ( entry_get_flags(old_sle) & _PAGE_PRESENT )
kaf24@5726 416 put_shadow_ref(entry_get_pfn(old_sle));
kaf24@5726 417 }
kaf24@5726 418
kaf24@5726 419 *shadow_le_p = new_sle;
kaf24@5726 420
kaf24@5726 421 return 1;
kaf24@5726 422 }
kaf24@5726 423
kaf24@5726 424 /*
kaf24@5726 425 * Check P, R/W, U/S bits in the guest page table.
kaf24@5726 426 * If the fault belongs to guest return 1,
kaf24@5726 427 * else return 0.
kaf24@5726 428 */
kaf24@5726 429 static inline int guest_page_fault(struct vcpu *v,
kaf24@5726 430 unsigned long va, unsigned int error_code, pgentry_64_t *gpl2e, pgentry_64_t *gpl1e)
kaf24@5726 431 {
kaf24@5726 432 struct domain *d = v->domain;
kaf24@5726 433 pgentry_64_t gle, *lva;
kaf24@5726 434 unsigned long mfn;
kaf24@5726 435 int i;
kaf24@5726 436
kaf24@5726 437 __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | L4);
kaf24@5726 438 if (unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT)))
kaf24@5726 439 return 1;
kaf24@5726 440
kaf24@5726 441 if (error_code & ERROR_W) {
kaf24@5726 442 if (unlikely(!(entry_get_flags(gle) & _PAGE_RW)))
kaf24@5726 443 return 1;
kaf24@5726 444 }
kaf24@5726 445 if (error_code & ERROR_U) {
kaf24@5726 446 if (unlikely(!(entry_get_flags(gle) & _PAGE_USER)))
kaf24@5726 447 return 1;
kaf24@5726 448 }
kaf24@5726 449 for (i = L3; i >= L1; i--) {
kaf24@5726 450 /*
kaf24@5726 451 * If it's not external mode, then mfn should be machine physical.
kaf24@5726 452 */
kaf24@5726 453 mfn = __gpfn_to_mfn(d, (entry_get_value(gle) >> PAGE_SHIFT));
kaf24@5726 454
kaf24@5726 455 lva = (pgentry_64_t *) phys_to_virt(
kaf24@5726 456 mfn << PAGE_SHIFT);
kaf24@5726 457 gle = lva[table_offset_64(va, i)];
kaf24@5726 458
kaf24@5726 459 if (unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT)))
kaf24@5726 460 return 1;
kaf24@5726 461
kaf24@5726 462 if (error_code & ERROR_W) {
kaf24@5726 463 if (unlikely(!(entry_get_flags(gle) & _PAGE_RW)))
kaf24@5726 464 return 1;
kaf24@5726 465 }
kaf24@5726 466 if (error_code & ERROR_U) {
kaf24@5726 467 if (unlikely(!(entry_get_flags(gle) & _PAGE_USER)))
kaf24@5726 468 return 1;
kaf24@5726 469 }
kaf24@5726 470
kaf24@5726 471 if (i == L2) {
kaf24@5726 472 if (gpl2e)
kaf24@5726 473 *gpl2e = gle;
kaf24@5726 474
kaf24@5726 475 if (likely(entry_get_flags(gle) & _PAGE_PSE))
kaf24@5726 476 return 0;
kaf24@5726 477
kaf24@5726 478 }
kaf24@5726 479
kaf24@5726 480 if (i == L1)
kaf24@5726 481 if (gpl1e)
kaf24@5726 482 *gpl1e = gle;
kaf24@5726 483 }
kaf24@5726 484 return 0;
kaf24@5726 485 }
kaf24@5726 486
kaf24@5726 487 static inline unsigned long gva_to_gpa(unsigned long gva)
kaf24@5726 488 {
kaf24@5726 489 struct vcpu *v = current;
kaf24@5732 490 pgentry_64_t gl1e = {0};
kaf24@5732 491 pgentry_64_t gl2e = {0};
kaf24@5726 492 unsigned long gpa;
kaf24@5726 493
kaf24@5726 494 if (guest_page_fault(v, gva, 0, &gl2e, &gl1e))
kaf24@5726 495 return 0;
kaf24@5726 496 if (entry_get_flags(gl2e) & _PAGE_PSE)
kaf24@5726 497 gpa = entry_get_paddr(gl2e) + (gva & ((1 << L2_PAGETABLE_SHIFT) - 1));
kaf24@5726 498 else
kaf24@5726 499 gpa = entry_get_paddr(gl1e) + (gva & ~PAGE_MASK);
kaf24@5726 500
kaf24@5726 501 return gpa;
kaf24@5726 502
kaf24@5726 503 }
kaf24@5726 504 #endif
kaf24@5726 505
kaf24@5726 506