xen-vtx-unstable
annotate xen/include/asm-x86/shadow_64.h @ 6730:3feb7fa331ed
Re-indent vmx code.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Sun Sep 11 16:44:23 2005 +0000 (2005-09-11) |
parents | 291e816acbf4 |
children | 4d899a738d59 8ca0f98ba8e2 |
rev | line source |
---|---|
kaf24@5726 | 1 /****************************************************************************** |
kaf24@5726 | 2 * include/asm-x86/shadow_64.h |
kaf24@5726 | 3 * |
kaf24@5726 | 4 * Copyright (c) 2005 Michael A Fetterman |
kaf24@5726 | 5 * Based on an earlier implementation by Ian Pratt et al |
kaf24@5726 | 6 * |
kaf24@5726 | 7 * This program is free software; you can redistribute it and/or modify |
kaf24@5726 | 8 * it under the terms of the GNU General Public License as published by |
kaf24@5726 | 9 * the Free Software Foundation; either version 2 of the License, or |
kaf24@5726 | 10 * (at your option) any later version. |
kaf24@5726 | 11 * |
kaf24@5726 | 12 * This program is distributed in the hope that it will be useful, |
kaf24@5726 | 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
kaf24@5726 | 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
kaf24@5726 | 15 * GNU General Public License for more details. |
kaf24@5726 | 16 * |
kaf24@5726 | 17 * You should have received a copy of the GNU General Public License |
kaf24@5726 | 18 * along with this program; if not, write to the Free Software |
kaf24@5726 | 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
kaf24@5726 | 20 */ |
kaf24@5726 | 21 /* |
kaf24@5726 | 22 * Jun Nakajima <jun.nakajima@intel.com> |
kaf24@5726 | 23 * Chengyuan Li <chengyuan.li@intel.com> |
kaf24@5726 | 24 * |
kaf24@5726 | 25 * Extended to support 64-bit guests. |
kaf24@5726 | 26 */ |
kaf24@5726 | 27 #ifndef _XEN_SHADOW_64_H |
kaf24@5726 | 28 #define _XEN_SHADOW_64_H |
kaf24@5726 | 29 #include <asm/shadow.h> |
kaf24@6582 | 30 #include <asm/shadow_ops.h> |
kaf24@5726 | 31 |
kaf24@5726 | 32 #define READ_FAULT 0 |
kaf24@5726 | 33 #define WRITE_FAULT 1 |
kaf24@5726 | 34 |
kaf24@5726 | 35 #define ERROR_W 2 |
kaf24@5726 | 36 #define ERROR_U 4 |
kaf24@5726 | 37 #define X86_64_SHADOW_DEBUG 0 |
kaf24@5726 | 38 |
kaf24@5726 | 39 #if X86_64_SHADOW_DEBUG |
kaf24@5726 | 40 #define ESH_LOG(_f, _a...) \ |
kaf24@5726 | 41 printk(_f, ##_a) |
kaf24@5726 | 42 #else |
kaf24@5726 | 43 #define ESH_LOG(_f, _a...) ((void)0) |
kaf24@5726 | 44 #endif |
kaf24@5726 | 45 |
kaf24@6582 | 46 #define PAGING_L4 4UL |
kaf24@6582 | 47 #define PAGING_L3 3UL |
kaf24@6582 | 48 #define PAGING_L2 2UL |
kaf24@6582 | 49 #define PAGING_L1 1UL |
kaf24@5726 | 50 #define L_MASK 0xff |
kaf24@5726 | 51 |
kaf24@6582 | 52 #define ROOT_LEVEL_64 PAGING_L4 |
kaf24@6582 | 53 #define ROOT_LEVEL_32 PAGING_L2 |
kaf24@5726 | 54 |
kaf24@5726 | 55 #define SHADOW_ENTRY (2UL << 16) |
kaf24@5726 | 56 #define GUEST_ENTRY (1UL << 16) |
kaf24@5726 | 57 |
kaf24@5726 | 58 #define GET_ENTRY (2UL << 8) |
kaf24@5726 | 59 #define SET_ENTRY (1UL << 8) |
kaf24@5726 | 60 |
kaf24@5726 | 61 #define PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER) |
kaf24@5726 | 62 |
kaf24@6582 | 63 /* For 32-bit VMX guest to allocate shadow L1 & L2*/ |
kaf24@6582 | 64 #define SL1_ORDER 1 |
kaf24@6582 | 65 #define SL2_ORDER 2 |
kaf24@6582 | 66 |
kaf24@5726 | 67 typedef struct { intpte_t lo; } pgentry_64_t; |
kaf24@5726 | 68 #define shadow_level_to_type(l) (l << 29) |
kaf24@5726 | 69 #define shadow_type_to_level(t) (t >> 29) |
kaf24@5726 | 70 |
kaf24@5726 | 71 #define entry_get_value(_x) ((_x).lo) |
kaf24@5726 | 72 #define entry_get_pfn(_x) \ |
kaf24@5726 | 73 (((_x).lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT) |
kaf24@5726 | 74 #define entry_get_paddr(_x) (((_x).lo & (PADDR_MASK&PAGE_MASK))) |
kaf24@5726 | 75 #define entry_get_flags(_x) (get_pte_flags((_x).lo)) |
kaf24@5726 | 76 |
kaf24@5726 | 77 #define entry_empty() ((pgentry_64_t) { 0 }) |
kaf24@5726 | 78 #define entry_from_pfn(pfn, flags) \ |
kaf24@5726 | 79 ((pgentry_64_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) |
kaf24@5726 | 80 #define entry_add_flags(x, flags) ((x).lo |= put_pte_flags(flags)) |
kaf24@5726 | 81 #define entry_remove_flags(x, flags) ((x).lo &= ~put_pte_flags(flags)) |
kaf24@5726 | 82 #define entry_has_changed(x,y,flags) \ |
kaf24@5726 | 83 ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) |
kaf24@6582 | 84 |
kaf24@6582 | 85 #define PAE_SHADOW_SELF_ENTRY 259 |
kaf24@6582 | 86 #define PDP_ENTRIES 4 |
kaf24@6582 | 87 |
kaf24@5726 | 88 static inline int table_offset_64(unsigned long va, int level) |
kaf24@5726 | 89 { |
kaf24@5726 | 90 switch(level) { |
kaf24@5726 | 91 case 1: |
kaf24@5726 | 92 return (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)); |
kaf24@5726 | 93 case 2: |
kaf24@5726 | 94 return (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)); |
kaf24@5726 | 95 case 3: |
kaf24@5726 | 96 return (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)); |
kaf24@5909 | 97 #if CONFIG_PAGING_LEVELS >= 4 |
kaf24@6582 | 98 #ifndef GUEST_PGENTRY_32 |
kaf24@5726 | 99 case 4: |
kaf24@5726 | 100 return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)); |
kaf24@6582 | 101 #else |
kaf24@6582 | 102 case 4: |
kaf24@6582 | 103 return PAE_SHADOW_SELF_ENTRY; |
kaf24@6582 | 104 #endif |
kaf24@5909 | 105 #endif |
kaf24@5726 | 106 default: |
kaf24@5726 | 107 //printk("<table_offset_64> level %d is too big\n", level); |
kaf24@5726 | 108 return -1; |
kaf24@5726 | 109 } |
kaf24@5726 | 110 } |
kaf24@5726 | 111 |
kaf24@5726 | 112 static inline void free_out_of_sync_state(struct domain *d) |
kaf24@5726 | 113 { |
kaf24@5726 | 114 struct out_of_sync_entry *entry; |
kaf24@5726 | 115 |
kaf24@5726 | 116 // NB: Be careful not to call something that manipulates this list |
kaf24@5726 | 117 // while walking it. Remove one item at a time, and always |
kaf24@5726 | 118 // restart from start of list. |
kaf24@5726 | 119 // |
kaf24@5726 | 120 while ( (entry = d->arch.out_of_sync) ) |
kaf24@5726 | 121 { |
kaf24@5726 | 122 d->arch.out_of_sync = entry->next; |
kaf24@5726 | 123 release_out_of_sync_entry(d, entry); |
kaf24@5726 | 124 |
kaf24@5726 | 125 entry->next = d->arch.out_of_sync_free; |
kaf24@5726 | 126 d->arch.out_of_sync_free = entry; |
kaf24@5726 | 127 } |
kaf24@5726 | 128 } |
kaf24@5726 | 129 |
kaf24@5726 | 130 static inline pgentry_64_t *__entry( |
kaf24@5726 | 131 struct vcpu *v, u64 va, u32 flag) |
kaf24@5726 | 132 { |
kaf24@5726 | 133 int i; |
kaf24@5726 | 134 pgentry_64_t *le_e; |
kaf24@5726 | 135 pgentry_64_t *le_p; |
kaf24@5726 | 136 unsigned long mfn; |
kaf24@5726 | 137 int index; |
kaf24@5726 | 138 u32 level = flag & L_MASK; |
kaf24@5726 | 139 struct domain *d = v->domain; |
kaf24@5726 | 140 |
kaf24@5726 | 141 index = table_offset_64(va, ROOT_LEVEL_64); |
kaf24@5726 | 142 if (flag & SHADOW_ENTRY) |
kaf24@5726 | 143 le_e = (pgentry_64_t *)&v->arch.shadow_vtable[index]; |
kaf24@5726 | 144 else |
kaf24@5726 | 145 le_e = (pgentry_64_t *)&v->arch.guest_vtable[index]; |
kaf24@5726 | 146 |
kaf24@5726 | 147 /* |
kaf24@5726 | 148 * If it's not external mode, then mfn should be machine physical. |
kaf24@5726 | 149 */ |
kaf24@5726 | 150 for (i = ROOT_LEVEL_64 - level; i > 0; i--) { |
kaf24@5726 | 151 if (unlikely(!(entry_get_flags(*le_e) & _PAGE_PRESENT))) |
kaf24@5726 | 152 return NULL; |
kaf24@5726 | 153 mfn = entry_get_value(*le_e) >> PAGE_SHIFT; |
kaf24@5726 | 154 if ((flag & GUEST_ENTRY) && shadow_mode_translate(d)) |
kaf24@6481 | 155 mfn = get_mfn_from_pfn(mfn); |
kaf24@5726 | 156 le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT); |
kaf24@5726 | 157 index = table_offset_64(va, (level + i - 1)); |
kaf24@5726 | 158 le_e = &le_p[index]; |
kaf24@5726 | 159 |
kaf24@5726 | 160 } |
kaf24@5726 | 161 return le_e; |
kaf24@5726 | 162 |
kaf24@5726 | 163 } |
kaf24@5726 | 164 |
kaf24@5726 | 165 static inline pgentry_64_t *__rw_entry( |
kaf24@5726 | 166 struct vcpu *ed, u64 va, void *e_p, u32 flag) |
kaf24@5726 | 167 { |
kaf24@5726 | 168 pgentry_64_t *le_e = __entry(ed, va, flag); |
kaf24@5726 | 169 pgentry_64_t *e = (pgentry_64_t *)e_p; |
kaf24@5726 | 170 if (le_e == NULL) |
kaf24@5726 | 171 return NULL; |
kaf24@5726 | 172 |
kaf24@5726 | 173 if (e) { |
kaf24@5726 | 174 if (flag & SET_ENTRY) |
kaf24@5726 | 175 *le_e = *e; |
kaf24@5726 | 176 else |
kaf24@5726 | 177 *e = *le_e; |
kaf24@5726 | 178 } |
kaf24@5726 | 179 return le_e; |
kaf24@5726 | 180 } |
kaf24@5726 | 181 #define __shadow_set_l4e(v, va, value) \ |
kaf24@6582 | 182 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L4) |
kaf24@5726 | 183 #define __shadow_get_l4e(v, va, sl4e) \ |
kaf24@6582 | 184 __rw_entry(v, va, sl4e, SHADOW_ENTRY | GET_ENTRY | PAGING_L4) |
kaf24@5726 | 185 #define __shadow_set_l3e(v, va, value) \ |
kaf24@6582 | 186 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L3) |
kaf24@5726 | 187 #define __shadow_get_l3e(v, va, sl3e) \ |
kaf24@6582 | 188 __rw_entry(v, va, sl3e, SHADOW_ENTRY | GET_ENTRY | PAGING_L3) |
kaf24@5726 | 189 #define __shadow_set_l2e(v, va, value) \ |
kaf24@6582 | 190 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L2) |
kaf24@5726 | 191 #define __shadow_get_l2e(v, va, sl2e) \ |
kaf24@6582 | 192 __rw_entry(v, va, sl2e, SHADOW_ENTRY | GET_ENTRY | PAGING_L2) |
kaf24@5726 | 193 #define __shadow_set_l1e(v, va, value) \ |
kaf24@6582 | 194 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L1) |
kaf24@5726 | 195 #define __shadow_get_l1e(v, va, sl1e) \ |
kaf24@6582 | 196 __rw_entry(v, va, sl1e, SHADOW_ENTRY | GET_ENTRY | PAGING_L1) |
kaf24@5726 | 197 |
kaf24@5726 | 198 #define __guest_set_l4e(v, va, value) \ |
kaf24@6582 | 199 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L4) |
kaf24@5726 | 200 #define __guest_get_l4e(v, va, gl4e) \ |
kaf24@6582 | 201 __rw_entry(v, va, gl4e, GUEST_ENTRY | GET_ENTRY | PAGING_L4) |
kaf24@5726 | 202 #define __guest_set_l3e(v, va, value) \ |
kaf24@6582 | 203 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L3) |
kaf24@5726 | 204 #define __guest_get_l3e(v, va, sl3e) \ |
kaf24@6582 | 205 __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | PAGING_L3) |
kaf24@5726 | 206 |
kaf24@5726 | 207 static inline void * __guest_set_l2e( |
kaf24@5726 | 208 struct vcpu *v, u64 va, void *value, int size) |
kaf24@5726 | 209 { |
kaf24@5726 | 210 switch(size) { |
kaf24@5726 | 211 case 4: |
kaf24@5726 | 212 // 32-bit guest |
kaf24@5726 | 213 { |
kaf24@5726 | 214 l2_pgentry_32_t *l2va; |
kaf24@5726 | 215 |
kaf24@5726 | 216 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable; |
kaf24@5726 | 217 if (value) |
kaf24@5726 | 218 l2va[l2_table_offset_32(va)] = *(l2_pgentry_32_t *)value; |
kaf24@5726 | 219 return &l2va[l2_table_offset_32(va)]; |
kaf24@5726 | 220 } |
kaf24@5726 | 221 case 8: |
kaf24@6582 | 222 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L2); |
kaf24@5726 | 223 default: |
kaf24@5726 | 224 BUG(); |
kaf24@5726 | 225 return NULL; |
kaf24@5726 | 226 } |
kaf24@5726 | 227 return NULL; |
kaf24@5726 | 228 } |
kaf24@5726 | 229 |
kaf24@5726 | 230 #define __guest_set_l2e(v, va, value) \ |
kaf24@5726 | 231 ( __typeof__(value) )__guest_set_l2e(v, (u64)va, value, sizeof(*value)) |
kaf24@5726 | 232 |
kaf24@5726 | 233 static inline void * __guest_get_l2e( |
kaf24@5726 | 234 struct vcpu *v, u64 va, void *gl2e, int size) |
kaf24@5726 | 235 { |
kaf24@5726 | 236 switch(size) { |
kaf24@5726 | 237 case 4: |
kaf24@5726 | 238 // 32-bit guest |
kaf24@5726 | 239 { |
kaf24@5726 | 240 l2_pgentry_32_t *l2va; |
kaf24@5726 | 241 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable; |
kaf24@5726 | 242 if (gl2e) |
kaf24@5726 | 243 *(l2_pgentry_32_t *)gl2e = l2va[l2_table_offset_32(va)]; |
kaf24@5726 | 244 return &l2va[l2_table_offset_32(va)]; |
kaf24@5726 | 245 } |
kaf24@5726 | 246 case 8: |
kaf24@6582 | 247 return __rw_entry(v, va, gl2e, GUEST_ENTRY | GET_ENTRY | PAGING_L2); |
kaf24@5726 | 248 default: |
kaf24@5726 | 249 BUG(); |
kaf24@5726 | 250 return NULL; |
kaf24@5726 | 251 } |
kaf24@5726 | 252 return NULL; |
kaf24@5726 | 253 } |
kaf24@5726 | 254 |
kaf24@5726 | 255 #define __guest_get_l2e(v, va, gl2e) \ |
kaf24@5726 | 256 (__typeof__ (gl2e))__guest_get_l2e(v, (u64)va, gl2e, sizeof(*gl2e)) |
kaf24@5726 | 257 |
kaf24@5726 | 258 static inline void * __guest_set_l1e( |
kaf24@5726 | 259 struct vcpu *v, u64 va, void *value, int size) |
kaf24@5726 | 260 { |
kaf24@5726 | 261 switch(size) { |
kaf24@5726 | 262 case 4: |
kaf24@5726 | 263 // 32-bit guest |
kaf24@5726 | 264 { |
kaf24@5726 | 265 l2_pgentry_32_t gl2e; |
kaf24@5726 | 266 l1_pgentry_32_t *l1va; |
kaf24@5726 | 267 unsigned long l1mfn; |
kaf24@5726 | 268 |
kaf24@5726 | 269 if (!__guest_get_l2e(v, va, &gl2e)) |
kaf24@5726 | 270 return NULL; |
kaf24@5726 | 271 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT))) |
kaf24@5726 | 272 return NULL; |
kaf24@5726 | 273 |
kaf24@6481 | 274 l1mfn = get_mfn_from_pfn( |
kaf24@5726 | 275 l2e_get_pfn(gl2e)); |
kaf24@5726 | 276 |
kaf24@5726 | 277 l1va = (l1_pgentry_32_t *) |
kaf24@5726 | 278 phys_to_virt(l1mfn << L1_PAGETABLE_SHIFT); |
kaf24@5726 | 279 if (value) |
kaf24@5726 | 280 l1va[l1_table_offset_32(va)] = *(l1_pgentry_32_t *)value; |
kaf24@5726 | 281 |
kaf24@5726 | 282 return &l1va[l1_table_offset_32(va)]; |
kaf24@5726 | 283 } |
kaf24@5726 | 284 |
kaf24@5726 | 285 case 8: |
kaf24@6582 | 286 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L1); |
kaf24@5726 | 287 default: |
kaf24@5726 | 288 BUG(); |
kaf24@5726 | 289 return NULL; |
kaf24@5726 | 290 } |
kaf24@5726 | 291 return NULL; |
kaf24@5726 | 292 } |
kaf24@5726 | 293 |
kaf24@5726 | 294 #define __guest_set_l1e(v, va, value) \ |
kaf24@5726 | 295 ( __typeof__(value) )__guest_set_l1e(v, (u64)va, value, sizeof(*value)) |
kaf24@5726 | 296 |
kaf24@5726 | 297 static inline void * __guest_get_l1e( |
kaf24@5726 | 298 struct vcpu *v, u64 va, void *gl1e, int size) |
kaf24@5726 | 299 { |
kaf24@5726 | 300 switch(size) { |
kaf24@5726 | 301 case 4: |
kaf24@5726 | 302 // 32-bit guest |
kaf24@5726 | 303 { |
kaf24@5726 | 304 l2_pgentry_32_t gl2e; |
kaf24@5726 | 305 l1_pgentry_32_t *l1va; |
kaf24@5726 | 306 unsigned long l1mfn; |
kaf24@5726 | 307 |
kaf24@5726 | 308 if (!(__guest_get_l2e(v, va, &gl2e))) |
kaf24@5726 | 309 return NULL; |
kaf24@5726 | 310 |
kaf24@5726 | 311 |
kaf24@5726 | 312 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT))) |
kaf24@5726 | 313 return NULL; |
kaf24@5726 | 314 |
kaf24@5726 | 315 |
kaf24@6481 | 316 l1mfn = get_mfn_from_pfn( |
kaf24@5726 | 317 l2e_get_pfn(gl2e)); |
kaf24@5726 | 318 l1va = (l1_pgentry_32_t *) phys_to_virt( |
kaf24@5726 | 319 l1mfn << L1_PAGETABLE_SHIFT); |
kaf24@5726 | 320 if (gl1e) |
kaf24@5726 | 321 *(l1_pgentry_32_t *)gl1e = l1va[l1_table_offset_32(va)]; |
kaf24@5726 | 322 |
kaf24@5726 | 323 return &l1va[l1_table_offset_32(va)]; |
kaf24@5726 | 324 } |
kaf24@5726 | 325 case 8: |
kaf24@5726 | 326 // 64-bit guest |
kaf24@6582 | 327 return __rw_entry(v, va, gl1e, GUEST_ENTRY | GET_ENTRY | PAGING_L1); |
kaf24@5726 | 328 default: |
kaf24@5726 | 329 BUG(); |
kaf24@5726 | 330 return NULL; |
kaf24@5726 | 331 } |
kaf24@5726 | 332 return NULL; |
kaf24@5726 | 333 } |
kaf24@5726 | 334 |
kaf24@5726 | 335 #define __guest_get_l1e(v, va, gl1e) \ |
kaf24@5726 | 336 ( __typeof__(gl1e) )__guest_get_l1e(v, (u64)va, gl1e, sizeof(*gl1e)) |
kaf24@5726 | 337 |
kaf24@5726 | 338 static inline void entry_general( |
kaf24@5726 | 339 struct domain *d, |
kaf24@5726 | 340 pgentry_64_t *gle_p, |
kaf24@5726 | 341 pgentry_64_t *sle_p, |
kaf24@5726 | 342 unsigned long smfn, u32 level) |
kaf24@5726 | 343 |
kaf24@5726 | 344 { |
kaf24@5726 | 345 pgentry_64_t gle = *gle_p; |
kaf24@5726 | 346 pgentry_64_t sle; |
kaf24@5726 | 347 |
kaf24@5726 | 348 sle = entry_empty(); |
kaf24@5726 | 349 if ( (entry_get_flags(gle) & _PAGE_PRESENT) && (smfn != 0) ) |
kaf24@5726 | 350 { |
kaf24@6582 | 351 if ((entry_get_flags(gle) & _PAGE_PSE) && level == PAGING_L2) { |
kaf24@5726 | 352 sle = entry_from_pfn(smfn, entry_get_flags(gle)); |
kaf24@5726 | 353 entry_remove_flags(sle, _PAGE_PSE); |
kaf24@5726 | 354 |
kaf24@5726 | 355 if ( shadow_mode_log_dirty(d) || |
kaf24@6730 | 356 !(entry_get_flags(gle) & _PAGE_DIRTY) ) |
kaf24@5726 | 357 { |
kaf24@5726 | 358 pgentry_64_t *l1_p; |
kaf24@5726 | 359 int i; |
kaf24@5726 | 360 |
kaf24@5726 | 361 l1_p =(pgentry_64_t *)map_domain_page(smfn); |
kaf24@5726 | 362 for (i = 0; i < L1_PAGETABLE_ENTRIES; i++) |
kaf24@5726 | 363 entry_remove_flags(l1_p[i], _PAGE_RW); |
kaf24@5726 | 364 |
kaf24@5726 | 365 unmap_domain_page(l1_p); |
kaf24@5726 | 366 } |
kaf24@5726 | 367 } else { |
kaf24@6730 | 368 sle = entry_from_pfn( |
kaf24@6730 | 369 smfn, |
kaf24@6730 | 370 (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL); |
kaf24@5726 | 371 entry_add_flags(gle, _PAGE_ACCESSED); |
kaf24@5726 | 372 } |
kaf24@5726 | 373 // XXX mafetter: Hmm... |
kaf24@5726 | 374 // Shouldn't the dirty log be checked/updated here? |
kaf24@5726 | 375 // Actually, it needs to be done in this function's callers. |
kaf24@5726 | 376 // |
kaf24@5726 | 377 *gle_p = gle; |
kaf24@5726 | 378 } |
kaf24@5726 | 379 |
kaf24@5726 | 380 if ( entry_get_value(sle) || entry_get_value(gle) ) |
kaf24@5726 | 381 SH_VVLOG("%s: gpde=%lx, new spde=%lx", __func__, |
kaf24@5726 | 382 entry_get_value(gle), entry_get_value(sle)); |
kaf24@5726 | 383 |
kaf24@5726 | 384 *sle_p = sle; |
kaf24@5726 | 385 } |
kaf24@5726 | 386 |
kaf24@5726 | 387 static inline void entry_propagate_from_guest( |
kaf24@5726 | 388 struct domain *d, pgentry_64_t *gle_p, pgentry_64_t *sle_p, u32 level) |
kaf24@5726 | 389 { |
kaf24@5726 | 390 pgentry_64_t gle = *gle_p; |
kaf24@5726 | 391 unsigned long smfn = 0; |
kaf24@5726 | 392 |
kaf24@5726 | 393 if ( entry_get_flags(gle) & _PAGE_PRESENT ) { |
kaf24@6582 | 394 if ((entry_get_flags(gle) & _PAGE_PSE) && level == PAGING_L2) { |
kaf24@5726 | 395 smfn = __shadow_status(d, entry_get_value(gle) >> PAGE_SHIFT, PGT_fl1_shadow); |
kaf24@5726 | 396 } else { |
kaf24@5726 | 397 smfn = __shadow_status(d, entry_get_pfn(gle), |
kaf24@5726 | 398 shadow_level_to_type((level -1 ))); |
kaf24@5726 | 399 } |
kaf24@5726 | 400 } |
kaf24@5726 | 401 entry_general(d, gle_p, sle_p, smfn, level); |
kaf24@5726 | 402 |
kaf24@5726 | 403 } |
kaf24@5726 | 404 |
kaf24@5726 | 405 static int inline |
kaf24@5726 | 406 validate_entry_change( |
kaf24@5726 | 407 struct domain *d, |
kaf24@5726 | 408 pgentry_64_t *new_gle_p, |
kaf24@5726 | 409 pgentry_64_t *shadow_le_p, |
kaf24@5726 | 410 u32 level) |
kaf24@5726 | 411 { |
kaf24@5726 | 412 pgentry_64_t old_sle, new_sle; |
kaf24@5726 | 413 pgentry_64_t new_gle = *new_gle_p; |
kaf24@5726 | 414 |
kaf24@5726 | 415 old_sle = *shadow_le_p; |
kaf24@5726 | 416 entry_propagate_from_guest(d, &new_gle, &new_sle, level); |
kaf24@5726 | 417 |
kaf24@5726 | 418 ESH_LOG("old_sle: %lx, new_gle: %lx, new_sle: %lx\n", |
kaf24@5726 | 419 entry_get_value(old_sle), entry_get_value(new_gle), |
kaf24@5726 | 420 entry_get_value(new_sle)); |
kaf24@5726 | 421 |
kaf24@5726 | 422 if ( ((entry_get_value(old_sle) | entry_get_value(new_sle)) & _PAGE_PRESENT) && |
kaf24@5726 | 423 entry_has_changed(old_sle, new_sle, _PAGE_PRESENT) ) |
kaf24@5726 | 424 { |
kaf24@5726 | 425 perfc_incrc(validate_entry_changes); |
kaf24@5726 | 426 |
kaf24@5726 | 427 if ( (entry_get_flags(new_sle) & _PAGE_PRESENT) && |
kaf24@5726 | 428 !get_shadow_ref(entry_get_pfn(new_sle)) ) |
kaf24@5726 | 429 BUG(); |
kaf24@5726 | 430 if ( entry_get_flags(old_sle) & _PAGE_PRESENT ) |
kaf24@5726 | 431 put_shadow_ref(entry_get_pfn(old_sle)); |
kaf24@5726 | 432 } |
kaf24@5726 | 433 |
kaf24@5726 | 434 *shadow_le_p = new_sle; |
kaf24@5726 | 435 |
kaf24@5726 | 436 return 1; |
kaf24@5726 | 437 } |
kaf24@5726 | 438 |
kaf24@5726 | 439 #endif |
kaf24@5726 | 440 |
kaf24@5726 | 441 |