debuggers.hg

annotate xen/arch/x86/shadow.c @ 3664:8472fafee3cf

bitkeeper revision 1.1159.212.74 (42015ef3sPQp8pjeJAck1wBtTAYL9g)

Interface to typed allocator is now just xmalloc/xmalloc_array/xfree.
_xmalloc/_xmalloc_array are dead (or, at least, non-API).
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Wed Feb 02 23:14:59 2005 +0000 (2005-02-02)
parents 0ef6e8e6e85d
children 677cb76cff18
rev   line source
djm@1686 1 /* -*- Mode:C++; c-file-style:BSD; c-basic-offset:4; tab-width:4 -*- */
djm@1686 2
djm@1686 3 #include <xen/config.h>
djm@1686 4 #include <xen/types.h>
djm@1686 5 #include <xen/mm.h>
kaf24@1787 6 #include <asm/shadow.h>
djm@1686 7 #include <asm/domain_page.h>
djm@1686 8 #include <asm/page.h>
djm@1686 9 #include <xen/event.h>
djm@1686 10 #include <xen/trace.h>
djm@1686 11
djm@1686 12 /********
djm@1686 13
djm@1686 14 To use these shadow page tables, guests must not rely on the ACCESSED
djm@1686 15 and DIRTY bits on L2 pte's being accurate -- they will typically all be set.
djm@1686 16
djm@1686 17 I doubt this will break anything. (If guests want to use the va_update
djm@1686 18 mechanism they've signed up for this anyhow...)
djm@1686 19
djm@1686 20 There's a per-domain shadow table spin lock which works fine for SMP
djm@1686 21 hosts. We don't have to worry about interrupts as no shadow operations
djm@1686 22 happen in an interrupt context. It's probably not quite ready for SMP
djm@1686 23 guest operation as we have to worry about synchonisation between gpte
djm@1686 24 and spte updates. Its possible that this might only happen in a
djm@1686 25 hypercall context, in which case we'll probably at have a per-domain
djm@1686 26 hypercall lock anyhow (at least initially).
djm@1686 27
djm@1686 28 ********/
djm@1686 29
kaf24@2673 30 static inline void free_shadow_page(
kaf24@2673 31 struct mm_struct *m, struct pfn_info *page)
djm@1686 32 {
djm@1686 33 m->shadow_page_count--;
djm@1686 34
kaf24@2673 35 switch ( page->u.inuse.type_info & PGT_type_mask )
kaf24@2673 36 {
kaf24@2673 37 case PGT_l1_page_table:
djm@1686 38 perfc_decr(shadow_l1_pages);
kaf24@2673 39 break;
kaf24@2673 40
kaf24@2673 41 case PGT_l2_page_table:
djm@1686 42 perfc_decr(shadow_l2_pages);
kaf24@2673 43 break;
kaf24@2673 44
kaf24@2673 45 default:
kaf24@2673 46 printk("Free shadow weird page type pfn=%08x type=%08x\n",
kaf24@2673 47 frame_table-page, page->u.inuse.type_info);
kaf24@2673 48 break;
kaf24@2673 49 }
kaf24@2673 50
kaf24@1974 51 free_domheap_page(page);
djm@1686 52 }
djm@1686 53
kaf24@2680 54 static void free_shadow_state(struct mm_struct *m)
djm@1686 55 {
kaf24@2673 56 int i, free = 0;
kaf24@2673 57 struct shadow_status *x, *n;
djm@1686 58
kaf24@2673 59 /*
kaf24@2673 60 * WARNING! The shadow page table must not currently be in use!
kaf24@2673 61 * e.g., You are expected to have paused the domain and synchronized CR3.
kaf24@2673 62 */
kaf24@2673 63
kaf24@2673 64 shadow_audit(m, 1);
djm@1686 65
kaf24@2673 66 /* Free each hash chain in turn. */
kaf24@2673 67 for ( i = 0; i < shadow_ht_buckets; i++ )
kaf24@2673 68 {
kaf24@2673 69 /* Skip empty buckets. */
kaf24@2673 70 x = &m->shadow_ht[i];
kaf24@2673 71 if ( x->pfn == 0 )
kaf24@2673 72 continue;
kaf24@2673 73
kaf24@2673 74 /* Free the head page. */
kaf24@2673 75 free_shadow_page(
kaf24@2673 76 m, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
djm@1686 77
kaf24@2673 78 /* Reinitialise the head node. */
kaf24@2673 79 x->pfn = 0;
kaf24@2673 80 x->spfn_and_flags = 0;
kaf24@2673 81 n = x->next;
kaf24@2673 82 x->next = NULL;
kaf24@2673 83
kaf24@2673 84 free++;
djm@1686 85
kaf24@2673 86 /* Iterate over non-head nodes. */
kaf24@2673 87 for ( x = n; x != NULL; x = n )
kaf24@2673 88 {
kaf24@2673 89 /* Free the shadow page. */
kaf24@2673 90 free_shadow_page(
kaf24@2673 91 m, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
kaf24@2673 92
kaf24@2673 93 /* Re-initialise the chain node. */
kaf24@2673 94 x->pfn = 0;
kaf24@2673 95 x->spfn_and_flags = 0;
kaf24@2673 96
kaf24@2673 97 /* Add to the free list. */
kaf24@2673 98 n = x->next;
kaf24@2673 99 x->next = m->shadow_ht_free;
kaf24@2673 100 m->shadow_ht_free = x;
kaf24@2673 101
djm@1686 102 free++;
djm@1686 103 }
djm@1686 104
kaf24@2673 105 shadow_audit(m, 0);
djm@1686 106 }
kaf24@2673 107
kaf24@2673 108 SH_LOG("Free shadow table. Freed=%d.", free);
djm@1686 109 }
djm@1686 110
kaf24@2680 111 static inline int clear_shadow_page(
kaf24@2673 112 struct mm_struct *m, struct shadow_status *x)
kaf24@2673 113 {
kaf24@2673 114 unsigned long *p;
kaf24@2673 115 int restart = 0;
kaf24@2673 116 struct pfn_info *spage = &frame_table[x->spfn_and_flags & PSH_pfn_mask];
djm@1686 117
kaf24@2673 118 switch ( spage->u.inuse.type_info & PGT_type_mask )
djm@1686 119 {
kaf24@2673 120 /* We clear L2 pages by zeroing the guest entries. */
kaf24@2673 121 case PGT_l2_page_table:
kaf24@2673 122 p = map_domain_mem((spage - frame_table) << PAGE_SHIFT);
iap10@3328 123 if (m->shadow_mode == SHM_full_32)
iap10@3328 124 memset(p, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
iap10@3328 125 else
iap10@3328 126 memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
kaf24@2673 127 unmap_domain_mem(p);
kaf24@2673 128 break;
djm@1686 129
kaf24@2673 130 /* We clear L1 pages by freeing them: no benefit from zeroing them. */
kaf24@2673 131 case PGT_l1_page_table:
kaf24@2673 132 delete_shadow_status(m, x->pfn);
kaf24@2673 133 free_shadow_page(m, spage);
kaf24@2673 134 restart = 1; /* We need to go to start of list again. */
kaf24@2673 135 break;
djm@1686 136 }
djm@1686 137
djm@1686 138 return restart;
djm@1686 139 }
djm@1686 140
kaf24@2680 141 static void clear_shadow_state(struct mm_struct *m)
djm@1686 142 {
kaf24@2673 143 int i;
kaf24@2673 144 struct shadow_status *x;
djm@1686 145
kaf24@2673 146 shadow_audit(m, 1);
djm@1686 147
kaf24@2673 148 for ( i = 0; i < shadow_ht_buckets; i++ )
djm@1686 149 {
kaf24@2673 150 retry:
kaf24@2673 151 /* Skip empty buckets. */
kaf24@2673 152 x = &m->shadow_ht[i];
kaf24@2673 153 if ( x->pfn == 0 )
kaf24@2673 154 continue;
kaf24@2673 155
kaf24@2680 156 if ( clear_shadow_page(m, x) )
kaf24@2673 157 goto retry;
kaf24@2673 158
kaf24@2673 159 for ( x = x->next; x != NULL; x = x->next )
kaf24@2680 160 if ( clear_shadow_page(m, x) )
kaf24@2673 161 goto retry;
kaf24@2673 162
kaf24@2673 163 shadow_audit(m, 0);
djm@1686 164 }
kaf24@2673 165
kaf24@2673 166 SH_VLOG("Scan shadow table. l1=%d l2=%d",
kaf24@2673 167 perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages));
djm@1686 168 }
djm@1686 169
djm@1686 170
djm@1686 171 void shadow_mode_init(void)
djm@1686 172 {
djm@1686 173 }
djm@1686 174
kaf24@2673 175 int shadow_mode_enable(struct domain *p, unsigned int mode)
djm@1686 176 {
cl349@2957 177 struct mm_struct *m = &p->exec_domain[0]->mm;
djm@1686 178
iap10@3650 179 m->shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
kaf24@2673 180 if ( m->shadow_ht == NULL )
djm@1686 181 goto nomem;
kaf24@2673 182 memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
djm@1686 183
djm@1686 184 if ( mode == SHM_logdirty )
djm@1686 185 {
kaf24@2673 186 m->shadow_dirty_bitmap_size = (p->max_pages + 63) & ~63;
djm@1686 187 m->shadow_dirty_bitmap =
kaf24@3664 188 xmalloc_array(unsigned long, m->shadow_dirty_bitmap_size /
kaf24@3664 189 (8 * sizeof(unsigned long)));
kaf24@2673 190 if ( m->shadow_dirty_bitmap == NULL )
djm@1686 191 {
djm@1686 192 m->shadow_dirty_bitmap_size = 0;
djm@1686 193 goto nomem;
djm@1686 194 }
kaf24@2673 195 memset(m->shadow_dirty_bitmap, 0, m->shadow_dirty_bitmap_size/8);
djm@1686 196 }
djm@1686 197
iap10@2569 198 m->shadow_mode = mode;
iap10@2569 199
kaf24@2673 200 __shadow_mk_pagetable(m);
djm@1686 201 return 0;
djm@1686 202
kaf24@2673 203 nomem:
kaf24@2673 204 if ( m->shadow_ht != NULL )
kaf24@2673 205 xfree( m->shadow_ht );
kaf24@2673 206 m->shadow_ht = NULL;
djm@1686 207 return -ENOMEM;
djm@1686 208 }
djm@1686 209
kaf24@1787 210 void __shadow_mode_disable(struct domain *d)
djm@1686 211 {
cl349@2957 212 struct mm_struct *m = &d->exec_domain[0]->mm;
kaf24@2673 213 struct shadow_status *x, *n;
djm@1686 214
kaf24@2680 215 free_shadow_state(m);
djm@1686 216 m->shadow_mode = 0;
djm@1686 217
iap10@2460 218 SH_VLOG("freed tables count=%d l1=%d l2=%d",
kaf24@2673 219 m->shadow_page_count, perfc_value(shadow_l1_pages),
kaf24@2673 220 perfc_value(shadow_l2_pages));
djm@1686 221
kaf24@2673 222 n = m->shadow_ht_extras;
kaf24@2673 223 while ( (x = n) != NULL )
djm@1686 224 {
djm@1686 225 m->shadow_extras_count--;
kaf24@2673 226 n = *((struct shadow_status **)(&x[shadow_ht_extra_size]));
kaf24@2673 227 xfree(x);
djm@1686 228 }
djm@1686 229
kaf24@2673 230 m->shadow_ht_extras = NULL;
kaf24@2673 231 ASSERT(m->shadow_extras_count == 0);
djm@1686 232 SH_LOG("freed extras, now %d", m->shadow_extras_count);
djm@1686 233
kaf24@2673 234 if ( m->shadow_dirty_bitmap != NULL )
djm@1686 235 {
kaf24@2673 236 xfree(m->shadow_dirty_bitmap);
djm@1686 237 m->shadow_dirty_bitmap = 0;
djm@1686 238 m->shadow_dirty_bitmap_size = 0;
djm@1686 239 }
djm@1686 240
kaf24@2673 241 xfree(m->shadow_ht);
kaf24@2673 242 m->shadow_ht = NULL;
djm@1686 243 }
djm@1686 244
kaf24@2673 245 static int shadow_mode_table_op(
kaf24@2673 246 struct domain *d, dom0_shadow_control_t *sc)
djm@1686 247 {
kaf24@2673 248 unsigned int op = sc->op;
cl349@2957 249 struct mm_struct *m = &d->exec_domain[0]->mm;
kaf24@2673 250 int i, rc = 0;
djm@1686 251
kaf24@2680 252 ASSERT(spin_is_locked(&m->shadow_lock));
djm@1686 253
kaf24@2673 254 SH_VLOG("shadow mode table op %08lx %08lx count %d",
kaf24@2673 255 pagetable_val(m->pagetable), pagetable_val(m->shadow_table),
kaf24@2673 256 m->shadow_page_count);
djm@1686 257
kaf24@2673 258 shadow_audit(m, 1);
djm@1686 259
kaf24@2673 260 switch ( op )
djm@1686 261 {
djm@1686 262 case DOM0_SHADOW_CONTROL_OP_FLUSH:
kaf24@2680 263 free_shadow_state(m);
iap10@2597 264
kaf24@2680 265 m->shadow_fault_count = 0;
kaf24@2680 266 m->shadow_dirty_count = 0;
kaf24@2680 267 m->shadow_dirty_net_count = 0;
kaf24@2680 268 m->shadow_dirty_block_count = 0;
iap10@2597 269
djm@1686 270 break;
djm@1686 271
kaf24@2673 272 case DOM0_SHADOW_CONTROL_OP_CLEAN:
kaf24@2680 273 clear_shadow_state(m);
djm@1686 274
kaf24@2680 275 sc->stats.fault_count = m->shadow_fault_count;
kaf24@2680 276 sc->stats.dirty_count = m->shadow_dirty_count;
kaf24@2680 277 sc->stats.dirty_net_count = m->shadow_dirty_net_count;
kaf24@2680 278 sc->stats.dirty_block_count = m->shadow_dirty_block_count;
djm@1686 279
kaf24@2680 280 m->shadow_fault_count = 0;
kaf24@2680 281 m->shadow_dirty_count = 0;
kaf24@2680 282 m->shadow_dirty_net_count = 0;
kaf24@2680 283 m->shadow_dirty_block_count = 0;
kaf24@2673 284
kaf24@2673 285 if ( (d->max_pages > sc->pages) ||
kaf24@2673 286 (sc->dirty_bitmap == NULL) ||
kaf24@2680 287 (m->shadow_dirty_bitmap == NULL) )
kaf24@2673 288 {
kaf24@2673 289 rc = -EINVAL;
kaf24@2680 290 break;
kaf24@2673 291 }
kaf24@2673 292
kaf24@2673 293 sc->pages = d->max_pages;
djm@1686 294
kaf24@2673 295 #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
kaf24@2673 296 for ( i = 0; i < d->max_pages; i += chunk )
kaf24@2673 297 {
kaf24@2673 298 int bytes = ((((d->max_pages - i) > chunk) ?
kaf24@2673 299 chunk : (d->max_pages - i)) + 7) / 8;
kaf24@2673 300
cwc22@2692 301 if (copy_to_user(
cwc22@2692 302 sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
cwc22@2692 303 m->shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
cwc22@2692 304 bytes))
cwc22@2692 305 {
cwc22@2692 306 // copy_to_user can fail when copying to guest app memory.
cwc22@2692 307 // app should zero buffer after mallocing, and pin it
cwc22@2692 308 rc = -EINVAL;
cwc22@2692 309 memset(
cwc22@2692 310 m->shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
cwc22@2692 311 0, (d->max_pages/8) - (i/(8*sizeof(unsigned long))));
cwc22@2692 312 break;
cwc22@2692 313 }
cwc22@2692 314
kaf24@2673 315 memset(
kaf24@2680 316 m->shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
kaf24@2673 317 0, bytes);
kaf24@2673 318 }
kaf24@2673 319
kaf24@2673 320 break;
djm@1686 321
kaf24@2673 322 case DOM0_SHADOW_CONTROL_OP_PEEK:
kaf24@2680 323 sc->stats.fault_count = m->shadow_fault_count;
kaf24@2680 324 sc->stats.dirty_count = m->shadow_dirty_count;
kaf24@2680 325 sc->stats.dirty_net_count = m->shadow_dirty_net_count;
kaf24@2680 326 sc->stats.dirty_block_count = m->shadow_dirty_block_count;
kaf24@2673 327
kaf24@2673 328 if ( (d->max_pages > sc->pages) ||
kaf24@2673 329 (sc->dirty_bitmap == NULL) ||
kaf24@2680 330 (m->shadow_dirty_bitmap == NULL) )
kaf24@2673 331 {
kaf24@2673 332 rc = -EINVAL;
kaf24@2680 333 break;
kaf24@2673 334 }
kaf24@2673 335
kaf24@2673 336 sc->pages = d->max_pages;
cwc22@2692 337 if (copy_to_user(
cwc22@2692 338 sc->dirty_bitmap, m->shadow_dirty_bitmap, (d->max_pages+7)/8))
cwc22@2692 339 {
cwc22@2692 340 rc = -EINVAL;
cwc22@2692 341 break;
cwc22@2692 342 }
djm@1686 343
kaf24@2673 344 break;
djm@1686 345
kaf24@2673 346 default:
kaf24@2680 347 rc = -EINVAL;
kaf24@2680 348 break;
djm@1686 349 }
djm@1686 350
djm@1686 351 SH_VLOG("shadow mode table op : page count %d", m->shadow_page_count);
kaf24@2673 352 shadow_audit(m, 1);
kaf24@2673 353 __shadow_mk_pagetable(m);
djm@1686 354 return rc;
djm@1686 355 }
djm@1686 356
kaf24@1787 357 int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
djm@1686 358 {
kaf24@2680 359 unsigned int op = sc->op;
kaf24@2680 360 int rc = 0;
kaf24@2673 361
cl349@2957 362 if ( unlikely(d == current->domain) )
kaf24@2673 363 {
kaf24@2673 364 DPRINTK("Don't try to do a shadow op on yourself!\n");
kaf24@2673 365 return -EINVAL;
kaf24@2673 366 }
djm@1686 367
iap10@2349 368 domain_pause(d);
iap10@2349 369 synchronise_pagetables(~0UL);
iap10@2331 370
cl349@2957 371 shadow_lock(&d->exec_domain[0]->mm);
djm@1686 372
kaf24@2680 373 switch ( op )
djm@1686 374 {
kaf24@2673 375 case DOM0_SHADOW_CONTROL_OP_OFF:
kaf24@1787 376 shadow_mode_disable(d);
kaf24@2673 377 break;
kaf24@2673 378
kaf24@2673 379 case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
kaf24@1787 380 shadow_mode_disable(d);
iap10@2569 381 rc = shadow_mode_enable(d, SHM_test);
kaf24@2673 382 break;
kaf24@2673 383
kaf24@2673 384 case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
kaf24@1787 385 shadow_mode_disable(d);
iap10@2569 386 rc = shadow_mode_enable(d, SHM_logdirty);
kaf24@2673 387 break;
kaf24@2673 388
kaf24@2673 389 default:
cl349@2957 390 rc = shadow_mode(d->exec_domain[0]) ? shadow_mode_table_op(d, sc) : -EINVAL;
kaf24@2673 391 break;
djm@1686 392 }
djm@1686 393
cl349@2957 394 shadow_unlock(&d->exec_domain[0]->mm);
djm@1686 395
iap10@2349 396 domain_unpause(d);
iap10@2331 397
djm@1686 398 return rc;
djm@1686 399 }
djm@1686 400
kaf24@1787 401 static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m)
djm@1686 402 {
kaf24@2673 403 struct pfn_info *page = alloc_domheap_page(NULL);
kaf24@2673 404
djm@1686 405 m->shadow_page_count++;
iap10@2595 406
kaf24@2673 407 if ( unlikely(page == NULL) )
kaf24@2673 408 {
kaf24@2673 409 printk("Couldn't alloc shadow page! count=%d\n",
kaf24@2673 410 m->shadow_page_count);
kaf24@2673 411 SH_VLOG("Shadow tables l1=%d l2=%d",
kaf24@2673 412 perfc_value(shadow_l1_pages),
kaf24@2673 413 perfc_value(shadow_l2_pages));
kaf24@2673 414 BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */
kaf24@2673 415 }
iap10@2595 416
kaf24@2673 417 return page;
djm@1686 418 }
djm@1686 419
kaf24@2673 420 void unshadow_table(unsigned long gpfn, unsigned int type)
djm@1686 421 {
kaf24@2673 422 unsigned long spfn;
kaf24@2673 423 struct domain *d = frame_table[gpfn].u.inuse.domain;
djm@1686 424
kaf24@2673 425 SH_VLOG("unshadow_table type=%08x gpfn=%08lx", type, gpfn);
djm@1686 426
djm@1686 427 perfc_incrc(unshadow_table_count);
djm@1686 428
kaf24@2673 429 /*
kaf24@2673 430 * This function is the same for all p.t. pages. Even for multi-processor
kaf24@2673 431 * guests there won't be a race here as this CPU was the one that
kaf24@2673 432 * cmpxchg'ed the page to invalid.
kaf24@2673 433 */
cl349@2957 434 spfn = __shadow_status(&d->exec_domain[0]->mm, gpfn) & PSH_pfn_mask;
cl349@2957 435 delete_shadow_status(&d->exec_domain[0]->mm, gpfn);
cl349@2957 436 free_shadow_page(&d->exec_domain[0]->mm, &frame_table[spfn]);
djm@1686 437 }
djm@1686 438
iap10@3328 439 #ifdef CONFIG_VMX
iap10@3328 440 void vmx_shadow_clear_state(struct mm_struct *m)
iap10@3328 441 {
iap10@3328 442 SH_VVLOG("vmx_clear_shadow_state: \n");
iap10@3328 443 clear_shadow_state(m);
iap10@3328 444 }
iap10@3328 445 #endif
iap10@3328 446
iap10@3328 447
djm@1686 448 unsigned long shadow_l2_table(
kaf24@2673 449 struct mm_struct *m, unsigned long gpfn)
djm@1686 450 {
djm@1686 451 struct pfn_info *spfn_info;
kaf24@2673 452 unsigned long spfn;
kaf24@3333 453 l2_pgentry_t *spl2e = 0;
iap10@3328 454 unsigned long guest_gpfn;
iap10@3328 455
iap10@3328 456 __get_machine_to_phys(m, guest_gpfn, gpfn);
djm@1686 457
kaf24@2673 458 SH_VVLOG("shadow_l2_table( %08lx )", gpfn);
djm@1686 459
djm@1686 460 perfc_incrc(shadow_l2_table_count);
djm@1686 461
kaf24@2691 462 if ( (spfn_info = alloc_shadow_page(m)) == NULL )
kaf24@2673 463 BUG(); /* XXX Deal gracefully with failure. */
djm@1686 464
kaf24@1970 465 spfn_info->u.inuse.type_info = PGT_l2_page_table;
djm@1686 466 perfc_incr(shadow_l2_pages);
djm@1686 467
kaf24@2673 468 spfn = spfn_info - frame_table;
iap10@3328 469 /* Mark pfn as being shadowed; update field to point at shadow. */
iap10@3328 470 set_shadow_status(m, guest_gpfn, spfn | PSH_shadowed);
djm@1686 471
djm@1686 472 #ifdef __i386__
kaf24@2673 473 /* Install hypervisor and 2x linear p.t. mapings. */
kaf24@3333 474 if ( m->shadow_mode == SHM_full_32 )
kaf24@3333 475 {
iap10@3328 476 vmx_update_shadow_state(m, gpfn, spfn);
kaf24@3333 477 }
kaf24@3333 478 else
kaf24@3333 479 {
iap10@3328 480 spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
iap10@3328 481 /*
kaf24@3333 482 * We could proactively fill in PDEs for pages that are already
kaf24@3333 483 * shadowed. However, we tried it and it didn't help performance.
kaf24@3333 484 * This is simpler.
iap10@3328 485 */
kaf24@3333 486 memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
iap10@3328 487
iap10@3328 488 /* Install hypervisor and 2x linear p.t. mapings. */
iap10@3328 489 memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
iap10@3328 490 &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
iap10@3328 491 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
iap10@3328 492 spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
iap10@3328 493 mk_l2_pgentry((gpfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
iap10@3328 494 spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
iap10@3328 495 mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
iap10@3328 496 spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
iap10@3328 497 mk_l2_pgentry(__pa(frame_table[gpfn].u.inuse.domain->mm_perdomain_pt) |
iap10@3328 498 __PAGE_HYPERVISOR);
iap10@3328 499 }
djm@1686 500 #endif
djm@1686 501
kaf24@3333 502 if ( m->shadow_mode != SHM_full_32 )
iap10@3328 503 unmap_domain_mem(spl2e);
djm@1686 504
kaf24@2673 505 SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn);
djm@1686 506 return spfn;
djm@1686 507 }
djm@1686 508
kaf24@2673 509 static void shadow_map_l1_into_current_l2(unsigned long va)
kaf24@2673 510 {
kaf24@2673 511 struct mm_struct *m = &current->mm;
iap10@3328 512 unsigned long *gpl1e, *spl1e, gpl2e, spl2e, gl1pfn, sl1pfn=0, sl1ss;
kaf24@2673 513 struct pfn_info *sl1pfn_info;
kaf24@2673 514 int i;
djm@1686 515
iap10@3328 516 __guest_get_pl2e(m, va, &gpl2e);
kaf24@2673 517
iap10@3328 518 gl1pfn = gpl2e >> PAGE_SHIFT;
kaf24@2673 519
kaf24@2673 520 sl1ss = __shadow_status(m, gl1pfn);
kaf24@2673 521 if ( !(sl1ss & PSH_shadowed) )
kaf24@2673 522 {
kaf24@2673 523 /* This L1 is NOT already shadowed so we need to shadow it. */
kaf24@2673 524 SH_VVLOG("4a: l1 not shadowed ( %08lx )", sl1pfn);
kaf24@2673 525
kaf24@2673 526 sl1pfn_info = alloc_shadow_page(m);
kaf24@2673 527 sl1pfn_info->u.inuse.type_info = PGT_l1_page_table;
kaf24@2673 528
kaf24@2673 529 sl1pfn = sl1pfn_info - frame_table;
kaf24@2673 530
kaf24@2673 531 perfc_incrc(shadow_l1_table_count);
kaf24@2673 532 perfc_incr(shadow_l1_pages);
kaf24@2673 533
kaf24@2673 534 set_shadow_status(m, gl1pfn, PSH_shadowed | sl1pfn);
kaf24@2673 535
iap10@3328 536 l2pde_general(m, &gpl2e, &spl2e, sl1pfn);
kaf24@2673 537
iap10@3328 538 __guest_set_pl2e(m, va, gpl2e);
iap10@3328 539 __shadow_set_pl2e(m, va, spl2e);
kaf24@2673 540
kaf24@2673 541 gpl1e = (unsigned long *) &(linear_pg_table[
iap10@3432 542 (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
kaf24@2673 543
kaf24@3343 544 spl1e = (unsigned long *) &(shadow_linear_pg_table[
iap10@3432 545 (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
kaf24@2673 546
kaf24@2673 547 for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
kaf24@2673 548 l1pte_propagate_from_guest(m, &gpl1e[i], &spl1e[i]);
kaf24@2673 549 }
kaf24@2673 550 else
kaf24@2673 551 {
kaf24@2673 552 /* This L1 is shadowed already, but the L2 entry is missing. */
kaf24@2673 553 SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )", sl1pfn);
kaf24@2673 554
kaf24@2673 555 sl1pfn = sl1ss & PSH_pfn_mask;
iap10@3328 556 l2pde_general(m, &gpl2e, &spl2e, sl1pfn);
iap10@3328 557 __guest_set_pl2e(m, va, gpl2e);
iap10@3328 558 __shadow_set_pl2e(m, va, spl2e);
kaf24@2673 559 }
kaf24@2673 560 }
kaf24@2673 561
iap10@3328 562 #ifdef CONFIG_VMX
iap10@3328 563 void vmx_shadow_invlpg(struct mm_struct *m, unsigned long va)
iap10@3328 564 {
iap10@3328 565 unsigned long gpte, spte, host_pfn;
iap10@3328 566
iap10@3328 567 if (__put_user(0L, (unsigned long *)
iap10@3328 568 &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
iap10@3328 569 vmx_shadow_clear_state(m);
iap10@3328 570 return;
iap10@3328 571 }
iap10@3328 572
iap10@3328 573 if (__get_user(gpte, (unsigned long *)
iap10@3328 574 &linear_pg_table[va >> PAGE_SHIFT])) {
iap10@3328 575 return;
iap10@3328 576 }
iap10@3328 577
iap10@3328 578 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
iap10@3328 579 spte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
iap10@3328 580
iap10@3328 581 if (__put_user(spte, (unsigned long *)
iap10@3328 582 &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
iap10@3328 583 return;
iap10@3328 584 }
iap10@3328 585 }
iap10@3328 586 #endif
iap10@3328 587
kaf24@2673 588 int shadow_fault(unsigned long va, long error_code)
djm@1686 589 {
djm@1686 590 unsigned long gpte, spte;
djm@1686 591 struct mm_struct *m = &current->mm;
djm@1686 592
djm@1686 593 SH_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code );
djm@1686 594
kaf24@2673 595 check_pagetable(m, current->mm.pagetable, "pre-sf");
djm@1686 596
kaf24@2673 597 /*
kaf24@2673 598 * STEP 1. A fast-reject set of checks with no locking.
kaf24@2673 599 */
kaf24@2673 600
kaf24@2673 601 if ( unlikely(__get_user(gpte, (unsigned long *)
kaf24@2673 602 &linear_pg_table[va >> PAGE_SHIFT])) )
djm@1686 603 {
djm@1686 604 SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
kaf24@2673 605 return 0;
djm@1686 606 }
djm@1686 607
kaf24@2673 608 if ( !(gpte & _PAGE_PRESENT) )
djm@1686 609 {
djm@1686 610 SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
djm@1686 611 return 0;
djm@1686 612 }
djm@1686 613
kaf24@2673 614 if ( (error_code & 2) && !(gpte & _PAGE_RW) )
kaf24@2673 615 {
kaf24@2673 616 /* Write fault on a read-only mapping. */
kaf24@2673 617 return 0;
kaf24@2673 618 }
kaf24@2673 619
kaf24@2673 620 /*
kaf24@2673 621 * STEP 2. Take the shadow lock and re-check the guest PTE.
kaf24@2673 622 */
djm@1686 623
kaf24@2416 624 shadow_lock(m);
kaf24@2673 625
kaf24@2673 626 if ( unlikely(__get_user(gpte, (unsigned long *)
kaf24@2673 627 &linear_pg_table[va >> PAGE_SHIFT])) )
djm@1686 628 {
djm@1686 629 SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
kaf24@2416 630 shadow_unlock(m);
kaf24@2673 631 return 0;
djm@1686 632 }
djm@1686 633
djm@1686 634 if ( unlikely(!(gpte & _PAGE_PRESENT)) )
djm@1686 635 {
djm@1686 636 SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
kaf24@2416 637 shadow_unlock(m);
kaf24@2673 638 return 0;
djm@1686 639 }
djm@1686 640
kaf24@2673 641 /* Write fault? */
kaf24@2673 642 if ( error_code & 2 )
kaf24@2673 643 {
kaf24@2673 644 if ( unlikely(!(gpte & _PAGE_RW)) )
djm@1686 645 {
kaf24@2673 646 /* Write fault on a read-only mapping. */
kaf24@2673 647 SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)", gpte);
kaf24@2673 648 shadow_unlock(m);
kaf24@2673 649 return 0;
djm@1686 650 }
kaf24@2673 651
kaf24@2673 652 l1pte_write_fault(m, &gpte, &spte);
djm@1686 653 }
djm@1686 654 else
djm@1686 655 {
kaf24@2673 656 l1pte_read_fault(m, &gpte, &spte);
djm@1686 657 }
djm@1686 658
kaf24@2673 659 /*
kaf24@2673 660 * STEP 3. Write the modified shadow PTE and guest PTE back to the tables.
kaf24@2673 661 */
djm@1686 662
kaf24@2673 663 /* XXX Watch out for read-only L2 entries! (not used in Linux). */
kaf24@2673 664 if ( unlikely(__put_user(gpte, (unsigned long *)
kaf24@2673 665 &linear_pg_table[va >> PAGE_SHIFT])) )
kaf24@2673 666 domain_crash();
djm@1686 667
kaf24@2673 668 /*
kaf24@2673 669 * Update of shadow PTE can fail because the L1 p.t. is not shadowed,
kaf24@2673 670 * or because the shadow isn't linked into this shadow L2 p.t.
kaf24@2673 671 */
kaf24@2673 672 if ( unlikely(__put_user(spte, (unsigned long *)
kaf24@2673 673 &shadow_linear_pg_table[va >> PAGE_SHIFT])) )
kaf24@2673 674 {
kaf24@2673 675 SH_VVLOG("3: not shadowed/mapped gpte=%08lx spte=%08lx", gpte, spte);
kaf24@2673 676 shadow_map_l1_into_current_l2(va);
kaf24@2673 677 shadow_linear_pg_table[va >> PAGE_SHIFT] = mk_l1_pgentry(spte);
kaf24@2673 678 }
djm@1686 679
djm@1686 680 perfc_incrc(shadow_fixup_count);
kaf24@2673 681 m->shadow_fault_count++;
djm@1686 682
kaf24@2416 683 shadow_unlock(m);
djm@1686 684
kaf24@2673 685 check_pagetable(m, current->mm.pagetable, "post-sf");
kaf24@3090 686 return EXCRET_fault_fixed;
djm@1686 687 }
djm@1686 688
djm@1686 689
kaf24@2673 690 void shadow_l1_normal_pt_update(
kaf24@2673 691 unsigned long pa, unsigned long gpte,
kaf24@2673 692 unsigned long *prev_spfn_ptr,
kaf24@2673 693 l1_pgentry_t **prev_spl1e_ptr)
djm@1686 694 {
kaf24@2673 695 unsigned long spfn, spte, prev_spfn = *prev_spfn_ptr;
kaf24@2673 696 l1_pgentry_t *spl1e, *prev_spl1e = *prev_spl1e_ptr;
djm@1686 697
kaf24@2673 698 /* N.B. To get here, we know the l1 page *must* be shadowed. */
kaf24@2673 699 SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, "
kaf24@2673 700 "prev_spfn=%08lx, prev_spl1e=%p\n",
kaf24@2673 701 pa, gpte, prev_spfn, prev_spl1e);
djm@1686 702
kaf24@2673 703 spfn = __shadow_status(&current->mm, pa >> PAGE_SHIFT) & PSH_pfn_mask;
djm@1686 704
djm@1686 705 if ( spfn == prev_spfn )
djm@1686 706 {
djm@1686 707 spl1e = prev_spl1e;
djm@1686 708 }
djm@1686 709 else
djm@1686 710 {
kaf24@2673 711 if ( prev_spl1e != NULL )
kaf24@2673 712 unmap_domain_mem( prev_spl1e );
kaf24@2673 713 spl1e = (l1_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
djm@1686 714 *prev_spfn_ptr = spfn;
djm@1686 715 *prev_spl1e_ptr = spl1e;
djm@1686 716 }
djm@1686 717
kaf24@2673 718 l1pte_propagate_from_guest(&current->mm, &gpte, &spte);
kaf24@2673 719 spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte);
djm@1686 720 }
djm@1686 721
kaf24@2673 722 void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte)
djm@1686 723 {
kaf24@2673 724 unsigned long spfn, spte;
kaf24@2673 725 l2_pgentry_t *spl2e;
kaf24@2673 726 unsigned long s_sh;
djm@1686 727
kaf24@2673 728 /* N.B. To get here, we know the l2 page *must* be shadowed. */
djm@1686 729 SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpte=%08lx",pa,gpte);
djm@1686 730
kaf24@2673 731 spfn = __shadow_status(&current->mm, pa >> PAGE_SHIFT) & PSH_pfn_mask;
kaf24@2673 732
kaf24@2673 733 s_sh = (gpte & _PAGE_PRESENT) ?
kaf24@2673 734 __shadow_status(&current->mm, gpte >> PAGE_SHIFT) : 0;
djm@1686 735
kaf24@2673 736 /* XXXX Should mark guest pte as DIRTY and ACCESSED too! */
kaf24@2673 737 l2pde_general(&current->mm, &gpte, &spte, s_sh);
kaf24@2673 738 spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
kaf24@2673 739 spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spte);
kaf24@2673 740 unmap_domain_mem(spl2e);
kaf24@2673 741 }
djm@1686 742
djm@1686 743
djm@1686 744
djm@1686 745
kaf24@2673 746 /************************************************************************/
kaf24@2673 747 /************************************************************************/
kaf24@2673 748 /************************************************************************/
djm@1686 749
djm@1686 750 #if SHADOW_DEBUG
djm@1686 751
djm@1686 752 static int sh_l2_present;
djm@1686 753 static int sh_l1_present;
djm@1686 754 char * sh_check_name;
djm@1686 755
kaf24@2673 756 #define FAIL(_f, _a...) \
kaf24@2673 757 do { \
kaf24@2673 758 printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx\n", \
kaf24@2673 759 sh_check_name, level, i, ## _a , gpte, spte); \
kaf24@2673 760 BUG(); \
kaf24@2673 761 } while ( 0 )
djm@1686 762
kaf24@2673 763 static int check_pte(
kaf24@2673 764 struct mm_struct *m, unsigned long gpte, unsigned long spte,
kaf24@2673 765 int level, int i)
djm@1686 766 {
djm@1686 767 unsigned long mask, gpfn, spfn;
iap10@3328 768 #ifdef CONFIG_VMX
iap10@3328 769 unsigned long guest_gpfn;
iap10@3328 770 #endif
djm@1686 771
kaf24@2673 772 if ( (spte == 0) || (spte == 0xdeadface) || (spte == 0x00000E00) )
kaf24@2673 773 return 1; /* always safe */
djm@1686 774
djm@1686 775 if ( !(spte & _PAGE_PRESENT) )
djm@1686 776 FAIL("Non zero not present spte");
djm@1686 777
kaf24@2673 778 if ( level == 2 ) sh_l2_present++;
kaf24@2673 779 if ( level == 1 ) sh_l1_present++;
djm@1686 780
djm@1686 781 if ( !(gpte & _PAGE_PRESENT) )
djm@1686 782 FAIL("Guest not present yet shadow is");
djm@1686 783
djm@1686 784 mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|0xFFFFF000);
djm@1686 785
kaf24@2673 786 if ( (spte & mask) != (gpte & mask) )
djm@1686 787 FAIL("Corrupt?");
djm@1686 788
djm@1686 789 if ( (spte & _PAGE_DIRTY ) && !(gpte & _PAGE_DIRTY) )
djm@1686 790 FAIL("Dirty coherence");
djm@1686 791
djm@1686 792 if ( (spte & _PAGE_ACCESSED ) && !(gpte & _PAGE_ACCESSED) )
djm@1686 793 FAIL("Accessed coherence");
djm@1686 794
djm@1686 795 if ( (spte & _PAGE_RW ) && !(gpte & _PAGE_RW) )
djm@1686 796 FAIL("RW coherence");
djm@1686 797
kaf24@2673 798 if ( (spte & _PAGE_RW ) && !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY)) )
djm@1686 799 FAIL("RW2 coherence");
djm@1686 800
kaf24@2673 801 spfn = spte >> PAGE_SHIFT;
kaf24@2673 802 gpfn = gpte >> PAGE_SHIFT;
djm@1686 803
djm@1686 804 if ( gpfn == spfn )
djm@1686 805 {
djm@1686 806 if ( level > 1 )
kaf24@2673 807 FAIL("Linear map ???"); /* XXX this will fail on BSD */
djm@1686 808 }
djm@1686 809 else
djm@1686 810 {
djm@1686 811 if ( level < 2 )
djm@1686 812 FAIL("Shadow in L1 entry?");
djm@1686 813
iap10@3328 814 if (m->shadow_mode == SHM_full_32) {
iap10@3328 815
iap10@3328 816 guest_gpfn = phys_to_machine_mapping[gpfn];
iap10@3328 817
iap10@3328 818 if ( __shadow_status(m, guest_gpfn) != (PSH_shadowed | spfn) )
iap10@3328 819 FAIL("spfn problem g.sf=%08lx",
iap10@3328 820 __shadow_status(m, guest_gpfn) );
iap10@3328 821
iap10@3328 822 } else {
iap10@3328 823 if ( __shadow_status(m, gpfn) != (PSH_shadowed | spfn) )
iap10@3328 824 FAIL("spfn problem g.sf=%08lx",
iap10@3328 825 __shadow_status(m, gpfn) );
iap10@3328 826 }
iap10@3328 827
djm@1686 828 }
djm@1686 829
djm@1686 830 return 1;
djm@1686 831 }
djm@1686 832
djm@1686 833
kaf24@2673 834 static int check_l1_table(
kaf24@2673 835 struct mm_struct *m, unsigned long va,
kaf24@2673 836 unsigned long g2, unsigned long s2)
djm@1686 837 {
kaf24@2673 838 int i;
djm@1686 839 unsigned long *gpl1e, *spl1e;
djm@1686 840
kaf24@2673 841 gpl1e = map_domain_mem(g2 << PAGE_SHIFT);
kaf24@2673 842 spl1e = map_domain_mem(s2 << PAGE_SHIFT);
djm@1686 843
kaf24@2673 844 for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
kaf24@2673 845 check_pte(m, gpl1e[i], spl1e[i], 1, i);
djm@1686 846
kaf24@2673 847 unmap_domain_mem(spl1e);
kaf24@2673 848 unmap_domain_mem(gpl1e);
djm@1686 849
djm@1686 850 return 1;
djm@1686 851 }
djm@1686 852
kaf24@2673 853 #define FAILPT(_f, _a...) \
kaf24@2673 854 do { \
kaf24@2673 855 printk("XXX FAIL %s-PT" _f "\n", s, ## _a ); \
kaf24@2673 856 BUG(); \
kaf24@2673 857 } while ( 0 )
djm@1686 858
kaf24@2673 859 int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s)
djm@1686 860 {
djm@1686 861 unsigned long gptbase = pagetable_val(pt);
djm@1686 862 unsigned long gpfn, spfn;
kaf24@2673 863 int i;
djm@1686 864 l2_pgentry_t *gpl2e, *spl2e;
iap10@3328 865 unsigned long host_gpfn = 0;
djm@1686 866
djm@1686 867 sh_check_name = s;
djm@1686 868
kaf24@2673 869 SH_VVLOG("%s-PT Audit", s);
djm@1686 870
djm@1686 871 sh_l2_present = sh_l1_present = 0;
djm@1686 872
kaf24@2673 873 gpfn = gptbase >> PAGE_SHIFT;
djm@1686 874
iap10@3328 875 __get_phys_to_machine(m, host_gpfn, gpfn);
iap10@3328 876
iap10@3328 877 if ( ! (__shadow_status(m, gpfn) & PSH_shadowed) )
djm@1686 878 {
djm@1686 879 printk("%s-PT %08lx not shadowed\n", s, gptbase);
iap10@3328 880
iap10@3328 881 if( __shadow_status(m, gpfn) != 0 ) BUG();
iap10@3328 882 return 0;
iap10@3328 883 }
djm@1686 884
kaf24@2673 885 spfn = __shadow_status(m, gpfn) & PSH_pfn_mask;
djm@1686 886
iap10@3328 887 if ( ! __shadow_status(m, gpfn) == (PSH_shadowed | spfn) )
iap10@3328 888 FAILPT("ptbase shadow inconsistent1");
djm@1686 889
iap10@3328 890 if (m->shadow_mode == SHM_full_32)
iap10@3328 891 {
iap10@3328 892 host_gpfn = phys_to_machine_mapping[gpfn];
iap10@3328 893 gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT );
iap10@3328 894
iap10@3328 895 } else
iap10@3328 896 gpl2e = (l2_pgentry_t *) map_domain_mem( gpfn << PAGE_SHIFT );
iap10@3328 897
djm@1686 898 spl2e = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
djm@1686 899
kaf24@2673 900 if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
kaf24@2673 901 &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
kaf24@2673 902 ((SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) -
kaf24@2673 903 DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)) )
djm@1686 904 {
kaf24@2673 905 for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
kaf24@2673 906 i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT);
kaf24@2673 907 i++ )
djm@1686 908 printk("+++ (%d) %08lx %08lx\n",i,
kaf24@2673 909 l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]));
djm@1686 910 FAILPT("hypervisor entries inconsistent");
djm@1686 911 }
djm@1686 912
djm@1686 913 if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
djm@1686 914 l2_pgentry_val(gpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])) )
djm@1686 915 FAILPT("hypervisor linear map inconsistent");
djm@1686 916
kaf24@2673 917 if ( (l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
kaf24@2673 918 L2_PAGETABLE_SHIFT]) !=
djm@1686 919 ((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) )
djm@1686 920 FAILPT("hypervisor shadow linear map inconsistent %08lx %08lx",
kaf24@2673 921 l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
kaf24@2673 922 L2_PAGETABLE_SHIFT]),
kaf24@2673 923 (spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
djm@1686 924
iap10@3328 925 if (m->shadow_mode != SHM_full_32) {
iap10@3328 926 if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
iap10@3328 927 ((__pa(frame_table[gpfn].u.inuse.domain->mm.perdomain_pt) |
kaf24@2673 928 __PAGE_HYPERVISOR))) )
iap10@3328 929 FAILPT("hypervisor per-domain map inconsistent");
iap10@3328 930 }
djm@1686 931
kaf24@2673 932 /* Check the whole L2. */
djm@1686 933 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
kaf24@2673 934 check_pte(m, l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]), 2, i);
djm@1686 935
kaf24@2673 936 /* Go back and recurse. */
djm@1686 937 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
djm@1686 938 {
kaf24@2673 939 if ( l2_pgentry_val(spl2e[i]) != 0 )
kaf24@2673 940 check_l1_table(
kaf24@2673 941 m, i << L2_PAGETABLE_SHIFT,
kaf24@2673 942 l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT,
kaf24@2673 943 l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT);
djm@1686 944 }
djm@1686 945
kaf24@2673 946 unmap_domain_mem(spl2e);
kaf24@2673 947 unmap_domain_mem(gpl2e);
djm@1686 948
djm@1686 949 SH_VVLOG("PT verified : l2_present = %d, l1_present = %d\n",
kaf24@2673 950 sh_l2_present, sh_l1_present);
djm@1686 951
djm@1686 952 return 1;
djm@1686 953 }
djm@1686 954
djm@1686 955 #endif