debuggers.hg

annotate xen/arch/x86/shadow.c @ 3658:0ef6e8e6e85d

bitkeeper revision 1.1159.212.71 (4200f0afX_JumfbEHQex6TdFENULMQ)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
author iap10@labyrinth.cl.cam.ac.uk
date Wed Feb 02 15:24:31 2005 +0000 (2005-02-02)
parents a4621fab44b4 10a0f6b0a996
children 8472fafee3cf
rev   line source
djm@1686 1 /* -*- Mode:C++; c-file-style:BSD; c-basic-offset:4; tab-width:4 -*- */
djm@1686 2
djm@1686 3 #include <xen/config.h>
djm@1686 4 #include <xen/types.h>
djm@1686 5 #include <xen/mm.h>
kaf24@1787 6 #include <asm/shadow.h>
djm@1686 7 #include <asm/domain_page.h>
djm@1686 8 #include <asm/page.h>
djm@1686 9 #include <xen/event.h>
djm@1686 10 #include <xen/trace.h>
djm@1686 11
djm@1686 12 /********
djm@1686 13
djm@1686 14 To use these shadow page tables, guests must not rely on the ACCESSED
djm@1686 15 and DIRTY bits on L2 pte's being accurate -- they will typically all be set.
djm@1686 16
djm@1686 17 I doubt this will break anything. (If guests want to use the va_update
djm@1686 18 mechanism they've signed up for this anyhow...)
djm@1686 19
djm@1686 20 There's a per-domain shadow table spin lock which works fine for SMP
djm@1686 21 hosts. We don't have to worry about interrupts as no shadow operations
djm@1686 22 happen in an interrupt context. It's probably not quite ready for SMP
djm@1686 23 guest operation as we have to worry about synchonisation between gpte
djm@1686 24 and spte updates. Its possible that this might only happen in a
djm@1686 25 hypercall context, in which case we'll probably at have a per-domain
djm@1686 26 hypercall lock anyhow (at least initially).
djm@1686 27
djm@1686 28 ********/
djm@1686 29
kaf24@2673 30 static inline void free_shadow_page(
kaf24@2673 31 struct mm_struct *m, struct pfn_info *page)
djm@1686 32 {
djm@1686 33 m->shadow_page_count--;
djm@1686 34
kaf24@2673 35 switch ( page->u.inuse.type_info & PGT_type_mask )
kaf24@2673 36 {
kaf24@2673 37 case PGT_l1_page_table:
djm@1686 38 perfc_decr(shadow_l1_pages);
kaf24@2673 39 break;
kaf24@2673 40
kaf24@2673 41 case PGT_l2_page_table:
djm@1686 42 perfc_decr(shadow_l2_pages);
kaf24@2673 43 break;
kaf24@2673 44
kaf24@2673 45 default:
kaf24@2673 46 printk("Free shadow weird page type pfn=%08x type=%08x\n",
kaf24@2673 47 frame_table-page, page->u.inuse.type_info);
kaf24@2673 48 break;
kaf24@2673 49 }
kaf24@2673 50
kaf24@1974 51 free_domheap_page(page);
djm@1686 52 }
djm@1686 53
kaf24@2680 54 static void free_shadow_state(struct mm_struct *m)
djm@1686 55 {
kaf24@2673 56 int i, free = 0;
kaf24@2673 57 struct shadow_status *x, *n;
djm@1686 58
kaf24@2673 59 /*
kaf24@2673 60 * WARNING! The shadow page table must not currently be in use!
kaf24@2673 61 * e.g., You are expected to have paused the domain and synchronized CR3.
kaf24@2673 62 */
kaf24@2673 63
kaf24@2673 64 shadow_audit(m, 1);
djm@1686 65
kaf24@2673 66 /* Free each hash chain in turn. */
kaf24@2673 67 for ( i = 0; i < shadow_ht_buckets; i++ )
kaf24@2673 68 {
kaf24@2673 69 /* Skip empty buckets. */
kaf24@2673 70 x = &m->shadow_ht[i];
kaf24@2673 71 if ( x->pfn == 0 )
kaf24@2673 72 continue;
kaf24@2673 73
kaf24@2673 74 /* Free the head page. */
kaf24@2673 75 free_shadow_page(
kaf24@2673 76 m, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
djm@1686 77
kaf24@2673 78 /* Reinitialise the head node. */
kaf24@2673 79 x->pfn = 0;
kaf24@2673 80 x->spfn_and_flags = 0;
kaf24@2673 81 n = x->next;
kaf24@2673 82 x->next = NULL;
kaf24@2673 83
kaf24@2673 84 free++;
djm@1686 85
kaf24@2673 86 /* Iterate over non-head nodes. */
kaf24@2673 87 for ( x = n; x != NULL; x = n )
kaf24@2673 88 {
kaf24@2673 89 /* Free the shadow page. */
kaf24@2673 90 free_shadow_page(
kaf24@2673 91 m, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
kaf24@2673 92
kaf24@2673 93 /* Re-initialise the chain node. */
kaf24@2673 94 x->pfn = 0;
kaf24@2673 95 x->spfn_and_flags = 0;
kaf24@2673 96
kaf24@2673 97 /* Add to the free list. */
kaf24@2673 98 n = x->next;
kaf24@2673 99 x->next = m->shadow_ht_free;
kaf24@2673 100 m->shadow_ht_free = x;
kaf24@2673 101
djm@1686 102 free++;
djm@1686 103 }
djm@1686 104
kaf24@2673 105 shadow_audit(m, 0);
djm@1686 106 }
kaf24@2673 107
kaf24@2673 108 SH_LOG("Free shadow table. Freed=%d.", free);
djm@1686 109 }
djm@1686 110
kaf24@2680 111 static inline int clear_shadow_page(
kaf24@2673 112 struct mm_struct *m, struct shadow_status *x)
kaf24@2673 113 {
kaf24@2673 114 unsigned long *p;
kaf24@2673 115 int restart = 0;
kaf24@2673 116 struct pfn_info *spage = &frame_table[x->spfn_and_flags & PSH_pfn_mask];
djm@1686 117
kaf24@2673 118 switch ( spage->u.inuse.type_info & PGT_type_mask )
djm@1686 119 {
kaf24@2673 120 /* We clear L2 pages by zeroing the guest entries. */
kaf24@2673 121 case PGT_l2_page_table:
kaf24@2673 122 p = map_domain_mem((spage - frame_table) << PAGE_SHIFT);
iap10@3328 123 if (m->shadow_mode == SHM_full_32)
iap10@3328 124 memset(p, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
iap10@3328 125 else
iap10@3328 126 memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
kaf24@2673 127 unmap_domain_mem(p);
kaf24@2673 128 break;
djm@1686 129
kaf24@2673 130 /* We clear L1 pages by freeing them: no benefit from zeroing them. */
kaf24@2673 131 case PGT_l1_page_table:
kaf24@2673 132 delete_shadow_status(m, x->pfn);
kaf24@2673 133 free_shadow_page(m, spage);
kaf24@2673 134 restart = 1; /* We need to go to start of list again. */
kaf24@2673 135 break;
djm@1686 136 }
djm@1686 137
djm@1686 138 return restart;
djm@1686 139 }
djm@1686 140
kaf24@2680 141 static void clear_shadow_state(struct mm_struct *m)
djm@1686 142 {
kaf24@2673 143 int i;
kaf24@2673 144 struct shadow_status *x;
djm@1686 145
kaf24@2673 146 shadow_audit(m, 1);
djm@1686 147
kaf24@2673 148 for ( i = 0; i < shadow_ht_buckets; i++ )
djm@1686 149 {
kaf24@2673 150 retry:
kaf24@2673 151 /* Skip empty buckets. */
kaf24@2673 152 x = &m->shadow_ht[i];
kaf24@2673 153 if ( x->pfn == 0 )
kaf24@2673 154 continue;
kaf24@2673 155
kaf24@2680 156 if ( clear_shadow_page(m, x) )
kaf24@2673 157 goto retry;
kaf24@2673 158
kaf24@2673 159 for ( x = x->next; x != NULL; x = x->next )
kaf24@2680 160 if ( clear_shadow_page(m, x) )
kaf24@2673 161 goto retry;
kaf24@2673 162
kaf24@2673 163 shadow_audit(m, 0);
djm@1686 164 }
kaf24@2673 165
kaf24@2673 166 SH_VLOG("Scan shadow table. l1=%d l2=%d",
kaf24@2673 167 perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages));
djm@1686 168 }
djm@1686 169
djm@1686 170
djm@1686 171 void shadow_mode_init(void)
djm@1686 172 {
djm@1686 173 }
djm@1686 174
kaf24@2673 175 int shadow_mode_enable(struct domain *p, unsigned int mode)
djm@1686 176 {
cl349@2957 177 struct mm_struct *m = &p->exec_domain[0]->mm;
djm@1686 178
iap10@3650 179 m->shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
kaf24@2673 180 if ( m->shadow_ht == NULL )
djm@1686 181 goto nomem;
kaf24@2673 182 memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
djm@1686 183
djm@1686 184 if ( mode == SHM_logdirty )
djm@1686 185 {
kaf24@2673 186 m->shadow_dirty_bitmap_size = (p->max_pages + 63) & ~63;
djm@1686 187 m->shadow_dirty_bitmap =
iap10@3652 188 _xmalloc(m->shadow_dirty_bitmap_size/8);
kaf24@2673 189 if ( m->shadow_dirty_bitmap == NULL )
djm@1686 190 {
djm@1686 191 m->shadow_dirty_bitmap_size = 0;
djm@1686 192 goto nomem;
djm@1686 193 }
kaf24@2673 194 memset(m->shadow_dirty_bitmap, 0, m->shadow_dirty_bitmap_size/8);
djm@1686 195 }
djm@1686 196
iap10@2569 197 m->shadow_mode = mode;
iap10@2569 198
kaf24@2673 199 __shadow_mk_pagetable(m);
djm@1686 200 return 0;
djm@1686 201
kaf24@2673 202 nomem:
kaf24@2673 203 if ( m->shadow_ht != NULL )
kaf24@2673 204 xfree( m->shadow_ht );
kaf24@2673 205 m->shadow_ht = NULL;
djm@1686 206 return -ENOMEM;
djm@1686 207 }
djm@1686 208
kaf24@1787 209 void __shadow_mode_disable(struct domain *d)
djm@1686 210 {
cl349@2957 211 struct mm_struct *m = &d->exec_domain[0]->mm;
kaf24@2673 212 struct shadow_status *x, *n;
djm@1686 213
kaf24@2680 214 free_shadow_state(m);
djm@1686 215 m->shadow_mode = 0;
djm@1686 216
iap10@2460 217 SH_VLOG("freed tables count=%d l1=%d l2=%d",
kaf24@2673 218 m->shadow_page_count, perfc_value(shadow_l1_pages),
kaf24@2673 219 perfc_value(shadow_l2_pages));
djm@1686 220
kaf24@2673 221 n = m->shadow_ht_extras;
kaf24@2673 222 while ( (x = n) != NULL )
djm@1686 223 {
djm@1686 224 m->shadow_extras_count--;
kaf24@2673 225 n = *((struct shadow_status **)(&x[shadow_ht_extra_size]));
kaf24@2673 226 xfree(x);
djm@1686 227 }
djm@1686 228
kaf24@2673 229 m->shadow_ht_extras = NULL;
kaf24@2673 230 ASSERT(m->shadow_extras_count == 0);
djm@1686 231 SH_LOG("freed extras, now %d", m->shadow_extras_count);
djm@1686 232
kaf24@2673 233 if ( m->shadow_dirty_bitmap != NULL )
djm@1686 234 {
kaf24@2673 235 xfree(m->shadow_dirty_bitmap);
djm@1686 236 m->shadow_dirty_bitmap = 0;
djm@1686 237 m->shadow_dirty_bitmap_size = 0;
djm@1686 238 }
djm@1686 239
kaf24@2673 240 xfree(m->shadow_ht);
kaf24@2673 241 m->shadow_ht = NULL;
djm@1686 242 }
djm@1686 243
kaf24@2673 244 static int shadow_mode_table_op(
kaf24@2673 245 struct domain *d, dom0_shadow_control_t *sc)
djm@1686 246 {
kaf24@2673 247 unsigned int op = sc->op;
cl349@2957 248 struct mm_struct *m = &d->exec_domain[0]->mm;
kaf24@2673 249 int i, rc = 0;
djm@1686 250
kaf24@2680 251 ASSERT(spin_is_locked(&m->shadow_lock));
djm@1686 252
kaf24@2673 253 SH_VLOG("shadow mode table op %08lx %08lx count %d",
kaf24@2673 254 pagetable_val(m->pagetable), pagetable_val(m->shadow_table),
kaf24@2673 255 m->shadow_page_count);
djm@1686 256
kaf24@2673 257 shadow_audit(m, 1);
djm@1686 258
kaf24@2673 259 switch ( op )
djm@1686 260 {
djm@1686 261 case DOM0_SHADOW_CONTROL_OP_FLUSH:
kaf24@2680 262 free_shadow_state(m);
iap10@2597 263
kaf24@2680 264 m->shadow_fault_count = 0;
kaf24@2680 265 m->shadow_dirty_count = 0;
kaf24@2680 266 m->shadow_dirty_net_count = 0;
kaf24@2680 267 m->shadow_dirty_block_count = 0;
iap10@2597 268
djm@1686 269 break;
djm@1686 270
kaf24@2673 271 case DOM0_SHADOW_CONTROL_OP_CLEAN:
kaf24@2680 272 clear_shadow_state(m);
djm@1686 273
kaf24@2680 274 sc->stats.fault_count = m->shadow_fault_count;
kaf24@2680 275 sc->stats.dirty_count = m->shadow_dirty_count;
kaf24@2680 276 sc->stats.dirty_net_count = m->shadow_dirty_net_count;
kaf24@2680 277 sc->stats.dirty_block_count = m->shadow_dirty_block_count;
djm@1686 278
kaf24@2680 279 m->shadow_fault_count = 0;
kaf24@2680 280 m->shadow_dirty_count = 0;
kaf24@2680 281 m->shadow_dirty_net_count = 0;
kaf24@2680 282 m->shadow_dirty_block_count = 0;
kaf24@2673 283
kaf24@2673 284 if ( (d->max_pages > sc->pages) ||
kaf24@2673 285 (sc->dirty_bitmap == NULL) ||
kaf24@2680 286 (m->shadow_dirty_bitmap == NULL) )
kaf24@2673 287 {
kaf24@2673 288 rc = -EINVAL;
kaf24@2680 289 break;
kaf24@2673 290 }
kaf24@2673 291
kaf24@2673 292 sc->pages = d->max_pages;
djm@1686 293
kaf24@2673 294 #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
kaf24@2673 295 for ( i = 0; i < d->max_pages; i += chunk )
kaf24@2673 296 {
kaf24@2673 297 int bytes = ((((d->max_pages - i) > chunk) ?
kaf24@2673 298 chunk : (d->max_pages - i)) + 7) / 8;
kaf24@2673 299
cwc22@2692 300 if (copy_to_user(
cwc22@2692 301 sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
cwc22@2692 302 m->shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
cwc22@2692 303 bytes))
cwc22@2692 304 {
cwc22@2692 305 // copy_to_user can fail when copying to guest app memory.
cwc22@2692 306 // app should zero buffer after mallocing, and pin it
cwc22@2692 307 rc = -EINVAL;
cwc22@2692 308 memset(
cwc22@2692 309 m->shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
cwc22@2692 310 0, (d->max_pages/8) - (i/(8*sizeof(unsigned long))));
cwc22@2692 311 break;
cwc22@2692 312 }
cwc22@2692 313
kaf24@2673 314 memset(
kaf24@2680 315 m->shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
kaf24@2673 316 0, bytes);
kaf24@2673 317 }
kaf24@2673 318
kaf24@2673 319 break;
djm@1686 320
kaf24@2673 321 case DOM0_SHADOW_CONTROL_OP_PEEK:
kaf24@2680 322 sc->stats.fault_count = m->shadow_fault_count;
kaf24@2680 323 sc->stats.dirty_count = m->shadow_dirty_count;
kaf24@2680 324 sc->stats.dirty_net_count = m->shadow_dirty_net_count;
kaf24@2680 325 sc->stats.dirty_block_count = m->shadow_dirty_block_count;
kaf24@2673 326
kaf24@2673 327 if ( (d->max_pages > sc->pages) ||
kaf24@2673 328 (sc->dirty_bitmap == NULL) ||
kaf24@2680 329 (m->shadow_dirty_bitmap == NULL) )
kaf24@2673 330 {
kaf24@2673 331 rc = -EINVAL;
kaf24@2680 332 break;
kaf24@2673 333 }
kaf24@2673 334
kaf24@2673 335 sc->pages = d->max_pages;
cwc22@2692 336 if (copy_to_user(
cwc22@2692 337 sc->dirty_bitmap, m->shadow_dirty_bitmap, (d->max_pages+7)/8))
cwc22@2692 338 {
cwc22@2692 339 rc = -EINVAL;
cwc22@2692 340 break;
cwc22@2692 341 }
djm@1686 342
kaf24@2673 343 break;
djm@1686 344
kaf24@2673 345 default:
kaf24@2680 346 rc = -EINVAL;
kaf24@2680 347 break;
djm@1686 348 }
djm@1686 349
djm@1686 350 SH_VLOG("shadow mode table op : page count %d", m->shadow_page_count);
kaf24@2673 351 shadow_audit(m, 1);
kaf24@2673 352 __shadow_mk_pagetable(m);
djm@1686 353 return rc;
djm@1686 354 }
djm@1686 355
kaf24@1787 356 int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
djm@1686 357 {
kaf24@2680 358 unsigned int op = sc->op;
kaf24@2680 359 int rc = 0;
kaf24@2673 360
cl349@2957 361 if ( unlikely(d == current->domain) )
kaf24@2673 362 {
kaf24@2673 363 DPRINTK("Don't try to do a shadow op on yourself!\n");
kaf24@2673 364 return -EINVAL;
kaf24@2673 365 }
djm@1686 366
iap10@2349 367 domain_pause(d);
iap10@2349 368 synchronise_pagetables(~0UL);
iap10@2331 369
cl349@2957 370 shadow_lock(&d->exec_domain[0]->mm);
djm@1686 371
kaf24@2680 372 switch ( op )
djm@1686 373 {
kaf24@2673 374 case DOM0_SHADOW_CONTROL_OP_OFF:
kaf24@1787 375 shadow_mode_disable(d);
kaf24@2673 376 break;
kaf24@2673 377
kaf24@2673 378 case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
kaf24@1787 379 shadow_mode_disable(d);
iap10@2569 380 rc = shadow_mode_enable(d, SHM_test);
kaf24@2673 381 break;
kaf24@2673 382
kaf24@2673 383 case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
kaf24@1787 384 shadow_mode_disable(d);
iap10@2569 385 rc = shadow_mode_enable(d, SHM_logdirty);
kaf24@2673 386 break;
kaf24@2673 387
kaf24@2673 388 default:
cl349@2957 389 rc = shadow_mode(d->exec_domain[0]) ? shadow_mode_table_op(d, sc) : -EINVAL;
kaf24@2673 390 break;
djm@1686 391 }
djm@1686 392
cl349@2957 393 shadow_unlock(&d->exec_domain[0]->mm);
djm@1686 394
iap10@2349 395 domain_unpause(d);
iap10@2331 396
djm@1686 397 return rc;
djm@1686 398 }
djm@1686 399
kaf24@1787 400 static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m)
djm@1686 401 {
kaf24@2673 402 struct pfn_info *page = alloc_domheap_page(NULL);
kaf24@2673 403
djm@1686 404 m->shadow_page_count++;
iap10@2595 405
kaf24@2673 406 if ( unlikely(page == NULL) )
kaf24@2673 407 {
kaf24@2673 408 printk("Couldn't alloc shadow page! count=%d\n",
kaf24@2673 409 m->shadow_page_count);
kaf24@2673 410 SH_VLOG("Shadow tables l1=%d l2=%d",
kaf24@2673 411 perfc_value(shadow_l1_pages),
kaf24@2673 412 perfc_value(shadow_l2_pages));
kaf24@2673 413 BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */
kaf24@2673 414 }
iap10@2595 415
kaf24@2673 416 return page;
djm@1686 417 }
djm@1686 418
kaf24@2673 419 void unshadow_table(unsigned long gpfn, unsigned int type)
djm@1686 420 {
kaf24@2673 421 unsigned long spfn;
kaf24@2673 422 struct domain *d = frame_table[gpfn].u.inuse.domain;
djm@1686 423
kaf24@2673 424 SH_VLOG("unshadow_table type=%08x gpfn=%08lx", type, gpfn);
djm@1686 425
djm@1686 426 perfc_incrc(unshadow_table_count);
djm@1686 427
kaf24@2673 428 /*
kaf24@2673 429 * This function is the same for all p.t. pages. Even for multi-processor
kaf24@2673 430 * guests there won't be a race here as this CPU was the one that
kaf24@2673 431 * cmpxchg'ed the page to invalid.
kaf24@2673 432 */
cl349@2957 433 spfn = __shadow_status(&d->exec_domain[0]->mm, gpfn) & PSH_pfn_mask;
cl349@2957 434 delete_shadow_status(&d->exec_domain[0]->mm, gpfn);
cl349@2957 435 free_shadow_page(&d->exec_domain[0]->mm, &frame_table[spfn]);
djm@1686 436 }
djm@1686 437
iap10@3328 438 #ifdef CONFIG_VMX
iap10@3328 439 void vmx_shadow_clear_state(struct mm_struct *m)
iap10@3328 440 {
iap10@3328 441 SH_VVLOG("vmx_clear_shadow_state: \n");
iap10@3328 442 clear_shadow_state(m);
iap10@3328 443 }
iap10@3328 444 #endif
iap10@3328 445
iap10@3328 446
djm@1686 447 unsigned long shadow_l2_table(
kaf24@2673 448 struct mm_struct *m, unsigned long gpfn)
djm@1686 449 {
djm@1686 450 struct pfn_info *spfn_info;
kaf24@2673 451 unsigned long spfn;
kaf24@3333 452 l2_pgentry_t *spl2e = 0;
iap10@3328 453 unsigned long guest_gpfn;
iap10@3328 454
iap10@3328 455 __get_machine_to_phys(m, guest_gpfn, gpfn);
djm@1686 456
kaf24@2673 457 SH_VVLOG("shadow_l2_table( %08lx )", gpfn);
djm@1686 458
djm@1686 459 perfc_incrc(shadow_l2_table_count);
djm@1686 460
kaf24@2691 461 if ( (spfn_info = alloc_shadow_page(m)) == NULL )
kaf24@2673 462 BUG(); /* XXX Deal gracefully with failure. */
djm@1686 463
kaf24@1970 464 spfn_info->u.inuse.type_info = PGT_l2_page_table;
djm@1686 465 perfc_incr(shadow_l2_pages);
djm@1686 466
kaf24@2673 467 spfn = spfn_info - frame_table;
iap10@3328 468 /* Mark pfn as being shadowed; update field to point at shadow. */
iap10@3328 469 set_shadow_status(m, guest_gpfn, spfn | PSH_shadowed);
djm@1686 470
djm@1686 471 #ifdef __i386__
kaf24@2673 472 /* Install hypervisor and 2x linear p.t. mapings. */
kaf24@3333 473 if ( m->shadow_mode == SHM_full_32 )
kaf24@3333 474 {
iap10@3328 475 vmx_update_shadow_state(m, gpfn, spfn);
kaf24@3333 476 }
kaf24@3333 477 else
kaf24@3333 478 {
iap10@3328 479 spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
iap10@3328 480 /*
kaf24@3333 481 * We could proactively fill in PDEs for pages that are already
kaf24@3333 482 * shadowed. However, we tried it and it didn't help performance.
kaf24@3333 483 * This is simpler.
iap10@3328 484 */
kaf24@3333 485 memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
iap10@3328 486
iap10@3328 487 /* Install hypervisor and 2x linear p.t. mapings. */
iap10@3328 488 memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
iap10@3328 489 &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
iap10@3328 490 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
iap10@3328 491 spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
iap10@3328 492 mk_l2_pgentry((gpfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
iap10@3328 493 spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
iap10@3328 494 mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
iap10@3328 495 spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
iap10@3328 496 mk_l2_pgentry(__pa(frame_table[gpfn].u.inuse.domain->mm_perdomain_pt) |
iap10@3328 497 __PAGE_HYPERVISOR);
iap10@3328 498 }
djm@1686 499 #endif
djm@1686 500
kaf24@3333 501 if ( m->shadow_mode != SHM_full_32 )
iap10@3328 502 unmap_domain_mem(spl2e);
djm@1686 503
kaf24@2673 504 SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn);
djm@1686 505 return spfn;
djm@1686 506 }
djm@1686 507
kaf24@2673 508 static void shadow_map_l1_into_current_l2(unsigned long va)
kaf24@2673 509 {
kaf24@2673 510 struct mm_struct *m = &current->mm;
iap10@3328 511 unsigned long *gpl1e, *spl1e, gpl2e, spl2e, gl1pfn, sl1pfn=0, sl1ss;
kaf24@2673 512 struct pfn_info *sl1pfn_info;
kaf24@2673 513 int i;
djm@1686 514
iap10@3328 515 __guest_get_pl2e(m, va, &gpl2e);
kaf24@2673 516
iap10@3328 517 gl1pfn = gpl2e >> PAGE_SHIFT;
kaf24@2673 518
kaf24@2673 519 sl1ss = __shadow_status(m, gl1pfn);
kaf24@2673 520 if ( !(sl1ss & PSH_shadowed) )
kaf24@2673 521 {
kaf24@2673 522 /* This L1 is NOT already shadowed so we need to shadow it. */
kaf24@2673 523 SH_VVLOG("4a: l1 not shadowed ( %08lx )", sl1pfn);
kaf24@2673 524
kaf24@2673 525 sl1pfn_info = alloc_shadow_page(m);
kaf24@2673 526 sl1pfn_info->u.inuse.type_info = PGT_l1_page_table;
kaf24@2673 527
kaf24@2673 528 sl1pfn = sl1pfn_info - frame_table;
kaf24@2673 529
kaf24@2673 530 perfc_incrc(shadow_l1_table_count);
kaf24@2673 531 perfc_incr(shadow_l1_pages);
kaf24@2673 532
kaf24@2673 533 set_shadow_status(m, gl1pfn, PSH_shadowed | sl1pfn);
kaf24@2673 534
iap10@3328 535 l2pde_general(m, &gpl2e, &spl2e, sl1pfn);
kaf24@2673 536
iap10@3328 537 __guest_set_pl2e(m, va, gpl2e);
iap10@3328 538 __shadow_set_pl2e(m, va, spl2e);
kaf24@2673 539
kaf24@2673 540 gpl1e = (unsigned long *) &(linear_pg_table[
iap10@3432 541 (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
kaf24@2673 542
kaf24@3343 543 spl1e = (unsigned long *) &(shadow_linear_pg_table[
iap10@3432 544 (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
kaf24@2673 545
kaf24@2673 546 for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
kaf24@2673 547 l1pte_propagate_from_guest(m, &gpl1e[i], &spl1e[i]);
kaf24@2673 548 }
kaf24@2673 549 else
kaf24@2673 550 {
kaf24@2673 551 /* This L1 is shadowed already, but the L2 entry is missing. */
kaf24@2673 552 SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )", sl1pfn);
kaf24@2673 553
kaf24@2673 554 sl1pfn = sl1ss & PSH_pfn_mask;
iap10@3328 555 l2pde_general(m, &gpl2e, &spl2e, sl1pfn);
iap10@3328 556 __guest_set_pl2e(m, va, gpl2e);
iap10@3328 557 __shadow_set_pl2e(m, va, spl2e);
kaf24@2673 558 }
kaf24@2673 559 }
kaf24@2673 560
iap10@3328 561 #ifdef CONFIG_VMX
iap10@3328 562 void vmx_shadow_invlpg(struct mm_struct *m, unsigned long va)
iap10@3328 563 {
iap10@3328 564 unsigned long gpte, spte, host_pfn;
iap10@3328 565
iap10@3328 566 if (__put_user(0L, (unsigned long *)
iap10@3328 567 &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
iap10@3328 568 vmx_shadow_clear_state(m);
iap10@3328 569 return;
iap10@3328 570 }
iap10@3328 571
iap10@3328 572 if (__get_user(gpte, (unsigned long *)
iap10@3328 573 &linear_pg_table[va >> PAGE_SHIFT])) {
iap10@3328 574 return;
iap10@3328 575 }
iap10@3328 576
iap10@3328 577 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
iap10@3328 578 spte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
iap10@3328 579
iap10@3328 580 if (__put_user(spte, (unsigned long *)
iap10@3328 581 &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
iap10@3328 582 return;
iap10@3328 583 }
iap10@3328 584 }
iap10@3328 585 #endif
iap10@3328 586
kaf24@2673 587 int shadow_fault(unsigned long va, long error_code)
djm@1686 588 {
djm@1686 589 unsigned long gpte, spte;
djm@1686 590 struct mm_struct *m = &current->mm;
djm@1686 591
djm@1686 592 SH_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code );
djm@1686 593
kaf24@2673 594 check_pagetable(m, current->mm.pagetable, "pre-sf");
djm@1686 595
kaf24@2673 596 /*
kaf24@2673 597 * STEP 1. A fast-reject set of checks with no locking.
kaf24@2673 598 */
kaf24@2673 599
kaf24@2673 600 if ( unlikely(__get_user(gpte, (unsigned long *)
kaf24@2673 601 &linear_pg_table[va >> PAGE_SHIFT])) )
djm@1686 602 {
djm@1686 603 SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
kaf24@2673 604 return 0;
djm@1686 605 }
djm@1686 606
kaf24@2673 607 if ( !(gpte & _PAGE_PRESENT) )
djm@1686 608 {
djm@1686 609 SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
djm@1686 610 return 0;
djm@1686 611 }
djm@1686 612
kaf24@2673 613 if ( (error_code & 2) && !(gpte & _PAGE_RW) )
kaf24@2673 614 {
kaf24@2673 615 /* Write fault on a read-only mapping. */
kaf24@2673 616 return 0;
kaf24@2673 617 }
kaf24@2673 618
kaf24@2673 619 /*
kaf24@2673 620 * STEP 2. Take the shadow lock and re-check the guest PTE.
kaf24@2673 621 */
djm@1686 622
kaf24@2416 623 shadow_lock(m);
kaf24@2673 624
kaf24@2673 625 if ( unlikely(__get_user(gpte, (unsigned long *)
kaf24@2673 626 &linear_pg_table[va >> PAGE_SHIFT])) )
djm@1686 627 {
djm@1686 628 SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
kaf24@2416 629 shadow_unlock(m);
kaf24@2673 630 return 0;
djm@1686 631 }
djm@1686 632
djm@1686 633 if ( unlikely(!(gpte & _PAGE_PRESENT)) )
djm@1686 634 {
djm@1686 635 SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
kaf24@2416 636 shadow_unlock(m);
kaf24@2673 637 return 0;
djm@1686 638 }
djm@1686 639
kaf24@2673 640 /* Write fault? */
kaf24@2673 641 if ( error_code & 2 )
kaf24@2673 642 {
kaf24@2673 643 if ( unlikely(!(gpte & _PAGE_RW)) )
djm@1686 644 {
kaf24@2673 645 /* Write fault on a read-only mapping. */
kaf24@2673 646 SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)", gpte);
kaf24@2673 647 shadow_unlock(m);
kaf24@2673 648 return 0;
djm@1686 649 }
kaf24@2673 650
kaf24@2673 651 l1pte_write_fault(m, &gpte, &spte);
djm@1686 652 }
djm@1686 653 else
djm@1686 654 {
kaf24@2673 655 l1pte_read_fault(m, &gpte, &spte);
djm@1686 656 }
djm@1686 657
kaf24@2673 658 /*
kaf24@2673 659 * STEP 3. Write the modified shadow PTE and guest PTE back to the tables.
kaf24@2673 660 */
djm@1686 661
kaf24@2673 662 /* XXX Watch out for read-only L2 entries! (not used in Linux). */
kaf24@2673 663 if ( unlikely(__put_user(gpte, (unsigned long *)
kaf24@2673 664 &linear_pg_table[va >> PAGE_SHIFT])) )
kaf24@2673 665 domain_crash();
djm@1686 666
kaf24@2673 667 /*
kaf24@2673 668 * Update of shadow PTE can fail because the L1 p.t. is not shadowed,
kaf24@2673 669 * or because the shadow isn't linked into this shadow L2 p.t.
kaf24@2673 670 */
kaf24@2673 671 if ( unlikely(__put_user(spte, (unsigned long *)
kaf24@2673 672 &shadow_linear_pg_table[va >> PAGE_SHIFT])) )
kaf24@2673 673 {
kaf24@2673 674 SH_VVLOG("3: not shadowed/mapped gpte=%08lx spte=%08lx", gpte, spte);
kaf24@2673 675 shadow_map_l1_into_current_l2(va);
kaf24@2673 676 shadow_linear_pg_table[va >> PAGE_SHIFT] = mk_l1_pgentry(spte);
kaf24@2673 677 }
djm@1686 678
djm@1686 679 perfc_incrc(shadow_fixup_count);
kaf24@2673 680 m->shadow_fault_count++;
djm@1686 681
kaf24@2416 682 shadow_unlock(m);
djm@1686 683
kaf24@2673 684 check_pagetable(m, current->mm.pagetable, "post-sf");
kaf24@3090 685 return EXCRET_fault_fixed;
djm@1686 686 }
djm@1686 687
djm@1686 688
kaf24@2673 689 void shadow_l1_normal_pt_update(
kaf24@2673 690 unsigned long pa, unsigned long gpte,
kaf24@2673 691 unsigned long *prev_spfn_ptr,
kaf24@2673 692 l1_pgentry_t **prev_spl1e_ptr)
djm@1686 693 {
kaf24@2673 694 unsigned long spfn, spte, prev_spfn = *prev_spfn_ptr;
kaf24@2673 695 l1_pgentry_t *spl1e, *prev_spl1e = *prev_spl1e_ptr;
djm@1686 696
kaf24@2673 697 /* N.B. To get here, we know the l1 page *must* be shadowed. */
kaf24@2673 698 SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, "
kaf24@2673 699 "prev_spfn=%08lx, prev_spl1e=%p\n",
kaf24@2673 700 pa, gpte, prev_spfn, prev_spl1e);
djm@1686 701
kaf24@2673 702 spfn = __shadow_status(&current->mm, pa >> PAGE_SHIFT) & PSH_pfn_mask;
djm@1686 703
djm@1686 704 if ( spfn == prev_spfn )
djm@1686 705 {
djm@1686 706 spl1e = prev_spl1e;
djm@1686 707 }
djm@1686 708 else
djm@1686 709 {
kaf24@2673 710 if ( prev_spl1e != NULL )
kaf24@2673 711 unmap_domain_mem( prev_spl1e );
kaf24@2673 712 spl1e = (l1_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
djm@1686 713 *prev_spfn_ptr = spfn;
djm@1686 714 *prev_spl1e_ptr = spl1e;
djm@1686 715 }
djm@1686 716
kaf24@2673 717 l1pte_propagate_from_guest(&current->mm, &gpte, &spte);
kaf24@2673 718 spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte);
djm@1686 719 }
djm@1686 720
kaf24@2673 721 void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte)
djm@1686 722 {
kaf24@2673 723 unsigned long spfn, spte;
kaf24@2673 724 l2_pgentry_t *spl2e;
kaf24@2673 725 unsigned long s_sh;
djm@1686 726
kaf24@2673 727 /* N.B. To get here, we know the l2 page *must* be shadowed. */
djm@1686 728 SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpte=%08lx",pa,gpte);
djm@1686 729
kaf24@2673 730 spfn = __shadow_status(&current->mm, pa >> PAGE_SHIFT) & PSH_pfn_mask;
kaf24@2673 731
kaf24@2673 732 s_sh = (gpte & _PAGE_PRESENT) ?
kaf24@2673 733 __shadow_status(&current->mm, gpte >> PAGE_SHIFT) : 0;
djm@1686 734
kaf24@2673 735 /* XXXX Should mark guest pte as DIRTY and ACCESSED too! */
kaf24@2673 736 l2pde_general(&current->mm, &gpte, &spte, s_sh);
kaf24@2673 737 spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
kaf24@2673 738 spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spte);
kaf24@2673 739 unmap_domain_mem(spl2e);
kaf24@2673 740 }
djm@1686 741
djm@1686 742
djm@1686 743
djm@1686 744
kaf24@2673 745 /************************************************************************/
kaf24@2673 746 /************************************************************************/
kaf24@2673 747 /************************************************************************/
djm@1686 748
djm@1686 749 #if SHADOW_DEBUG
djm@1686 750
djm@1686 751 static int sh_l2_present;
djm@1686 752 static int sh_l1_present;
djm@1686 753 char * sh_check_name;
djm@1686 754
kaf24@2673 755 #define FAIL(_f, _a...) \
kaf24@2673 756 do { \
kaf24@2673 757 printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx\n", \
kaf24@2673 758 sh_check_name, level, i, ## _a , gpte, spte); \
kaf24@2673 759 BUG(); \
kaf24@2673 760 } while ( 0 )
djm@1686 761
kaf24@2673 762 static int check_pte(
kaf24@2673 763 struct mm_struct *m, unsigned long gpte, unsigned long spte,
kaf24@2673 764 int level, int i)
djm@1686 765 {
djm@1686 766 unsigned long mask, gpfn, spfn;
iap10@3328 767 #ifdef CONFIG_VMX
iap10@3328 768 unsigned long guest_gpfn;
iap10@3328 769 #endif
djm@1686 770
kaf24@2673 771 if ( (spte == 0) || (spte == 0xdeadface) || (spte == 0x00000E00) )
kaf24@2673 772 return 1; /* always safe */
djm@1686 773
djm@1686 774 if ( !(spte & _PAGE_PRESENT) )
djm@1686 775 FAIL("Non zero not present spte");
djm@1686 776
kaf24@2673 777 if ( level == 2 ) sh_l2_present++;
kaf24@2673 778 if ( level == 1 ) sh_l1_present++;
djm@1686 779
djm@1686 780 if ( !(gpte & _PAGE_PRESENT) )
djm@1686 781 FAIL("Guest not present yet shadow is");
djm@1686 782
djm@1686 783 mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|0xFFFFF000);
djm@1686 784
kaf24@2673 785 if ( (spte & mask) != (gpte & mask) )
djm@1686 786 FAIL("Corrupt?");
djm@1686 787
djm@1686 788 if ( (spte & _PAGE_DIRTY ) && !(gpte & _PAGE_DIRTY) )
djm@1686 789 FAIL("Dirty coherence");
djm@1686 790
djm@1686 791 if ( (spte & _PAGE_ACCESSED ) && !(gpte & _PAGE_ACCESSED) )
djm@1686 792 FAIL("Accessed coherence");
djm@1686 793
djm@1686 794 if ( (spte & _PAGE_RW ) && !(gpte & _PAGE_RW) )
djm@1686 795 FAIL("RW coherence");
djm@1686 796
kaf24@2673 797 if ( (spte & _PAGE_RW ) && !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY)) )
djm@1686 798 FAIL("RW2 coherence");
djm@1686 799
kaf24@2673 800 spfn = spte >> PAGE_SHIFT;
kaf24@2673 801 gpfn = gpte >> PAGE_SHIFT;
djm@1686 802
djm@1686 803 if ( gpfn == spfn )
djm@1686 804 {
djm@1686 805 if ( level > 1 )
kaf24@2673 806 FAIL("Linear map ???"); /* XXX this will fail on BSD */
djm@1686 807 }
djm@1686 808 else
djm@1686 809 {
djm@1686 810 if ( level < 2 )
djm@1686 811 FAIL("Shadow in L1 entry?");
djm@1686 812
iap10@3328 813 if (m->shadow_mode == SHM_full_32) {
iap10@3328 814
iap10@3328 815 guest_gpfn = phys_to_machine_mapping[gpfn];
iap10@3328 816
iap10@3328 817 if ( __shadow_status(m, guest_gpfn) != (PSH_shadowed | spfn) )
iap10@3328 818 FAIL("spfn problem g.sf=%08lx",
iap10@3328 819 __shadow_status(m, guest_gpfn) );
iap10@3328 820
iap10@3328 821 } else {
iap10@3328 822 if ( __shadow_status(m, gpfn) != (PSH_shadowed | spfn) )
iap10@3328 823 FAIL("spfn problem g.sf=%08lx",
iap10@3328 824 __shadow_status(m, gpfn) );
iap10@3328 825 }
iap10@3328 826
djm@1686 827 }
djm@1686 828
djm@1686 829 return 1;
djm@1686 830 }
djm@1686 831
djm@1686 832
kaf24@2673 833 static int check_l1_table(
kaf24@2673 834 struct mm_struct *m, unsigned long va,
kaf24@2673 835 unsigned long g2, unsigned long s2)
djm@1686 836 {
kaf24@2673 837 int i;
djm@1686 838 unsigned long *gpl1e, *spl1e;
djm@1686 839
kaf24@2673 840 gpl1e = map_domain_mem(g2 << PAGE_SHIFT);
kaf24@2673 841 spl1e = map_domain_mem(s2 << PAGE_SHIFT);
djm@1686 842
kaf24@2673 843 for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
kaf24@2673 844 check_pte(m, gpl1e[i], spl1e[i], 1, i);
djm@1686 845
kaf24@2673 846 unmap_domain_mem(spl1e);
kaf24@2673 847 unmap_domain_mem(gpl1e);
djm@1686 848
djm@1686 849 return 1;
djm@1686 850 }
djm@1686 851
kaf24@2673 852 #define FAILPT(_f, _a...) \
kaf24@2673 853 do { \
kaf24@2673 854 printk("XXX FAIL %s-PT" _f "\n", s, ## _a ); \
kaf24@2673 855 BUG(); \
kaf24@2673 856 } while ( 0 )
djm@1686 857
kaf24@2673 858 int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s)
djm@1686 859 {
djm@1686 860 unsigned long gptbase = pagetable_val(pt);
djm@1686 861 unsigned long gpfn, spfn;
kaf24@2673 862 int i;
djm@1686 863 l2_pgentry_t *gpl2e, *spl2e;
iap10@3328 864 unsigned long host_gpfn = 0;
djm@1686 865
djm@1686 866 sh_check_name = s;
djm@1686 867
kaf24@2673 868 SH_VVLOG("%s-PT Audit", s);
djm@1686 869
djm@1686 870 sh_l2_present = sh_l1_present = 0;
djm@1686 871
kaf24@2673 872 gpfn = gptbase >> PAGE_SHIFT;
djm@1686 873
iap10@3328 874 __get_phys_to_machine(m, host_gpfn, gpfn);
iap10@3328 875
iap10@3328 876 if ( ! (__shadow_status(m, gpfn) & PSH_shadowed) )
djm@1686 877 {
djm@1686 878 printk("%s-PT %08lx not shadowed\n", s, gptbase);
iap10@3328 879
iap10@3328 880 if( __shadow_status(m, gpfn) != 0 ) BUG();
iap10@3328 881 return 0;
iap10@3328 882 }
djm@1686 883
kaf24@2673 884 spfn = __shadow_status(m, gpfn) & PSH_pfn_mask;
djm@1686 885
iap10@3328 886 if ( ! __shadow_status(m, gpfn) == (PSH_shadowed | spfn) )
iap10@3328 887 FAILPT("ptbase shadow inconsistent1");
djm@1686 888
iap10@3328 889 if (m->shadow_mode == SHM_full_32)
iap10@3328 890 {
iap10@3328 891 host_gpfn = phys_to_machine_mapping[gpfn];
iap10@3328 892 gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT );
iap10@3328 893
iap10@3328 894 } else
iap10@3328 895 gpl2e = (l2_pgentry_t *) map_domain_mem( gpfn << PAGE_SHIFT );
iap10@3328 896
djm@1686 897 spl2e = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
djm@1686 898
kaf24@2673 899 if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
kaf24@2673 900 &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
kaf24@2673 901 ((SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) -
kaf24@2673 902 DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)) )
djm@1686 903 {
kaf24@2673 904 for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
kaf24@2673 905 i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT);
kaf24@2673 906 i++ )
djm@1686 907 printk("+++ (%d) %08lx %08lx\n",i,
kaf24@2673 908 l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]));
djm@1686 909 FAILPT("hypervisor entries inconsistent");
djm@1686 910 }
djm@1686 911
djm@1686 912 if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
djm@1686 913 l2_pgentry_val(gpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])) )
djm@1686 914 FAILPT("hypervisor linear map inconsistent");
djm@1686 915
kaf24@2673 916 if ( (l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
kaf24@2673 917 L2_PAGETABLE_SHIFT]) !=
djm@1686 918 ((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) )
djm@1686 919 FAILPT("hypervisor shadow linear map inconsistent %08lx %08lx",
kaf24@2673 920 l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
kaf24@2673 921 L2_PAGETABLE_SHIFT]),
kaf24@2673 922 (spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
djm@1686 923
iap10@3328 924 if (m->shadow_mode != SHM_full_32) {
iap10@3328 925 if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
iap10@3328 926 ((__pa(frame_table[gpfn].u.inuse.domain->mm.perdomain_pt) |
kaf24@2673 927 __PAGE_HYPERVISOR))) )
iap10@3328 928 FAILPT("hypervisor per-domain map inconsistent");
iap10@3328 929 }
djm@1686 930
kaf24@2673 931 /* Check the whole L2. */
djm@1686 932 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
kaf24@2673 933 check_pte(m, l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]), 2, i);
djm@1686 934
kaf24@2673 935 /* Go back and recurse. */
djm@1686 936 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
djm@1686 937 {
kaf24@2673 938 if ( l2_pgentry_val(spl2e[i]) != 0 )
kaf24@2673 939 check_l1_table(
kaf24@2673 940 m, i << L2_PAGETABLE_SHIFT,
kaf24@2673 941 l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT,
kaf24@2673 942 l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT);
djm@1686 943 }
djm@1686 944
kaf24@2673 945 unmap_domain_mem(spl2e);
kaf24@2673 946 unmap_domain_mem(gpl2e);
djm@1686 947
djm@1686 948 SH_VVLOG("PT verified : l2_present = %d, l1_present = %d\n",
kaf24@2673 949 sh_l2_present, sh_l1_present);
djm@1686 950
djm@1686 951 return 1;
djm@1686 952 }
djm@1686 953
djm@1686 954 #endif