debuggers.hg

view xen/include/asm-x86/shadow.h @ 3770:d21fbb46b9d8

bitkeeper revision 1.1159.253.1 (4208f8a54Zaz-XgC11YTHeLxPHPoZg)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 08 17:36:37 2005 +0000 (2005-02-08)
parents f5f2757b3aa2 cb87fd290eb0
children 12104922e743
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 #ifndef _XEN_SHADOW_H
4 #define _XEN_SHADOW_H
6 #include <xen/config.h>
7 #include <xen/types.h>
8 #include <xen/perfc.h>
9 #include <asm/processor.h>
11 #ifdef CONFIG_VMX
12 #include <asm/domain_page.h>
13 #endif
15 /* Shadow PT flag bits in pfn_info */
16 #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
17 #define PSH_pfn_mask ((1<<21)-1)
19 /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
20 #define SHM_test (1) /* just run domain on shadow PTs */
21 #define SHM_logdirty (2) /* log pages that are dirtied */
22 #define SHM_translate (3) /* lookup machine pages in translation table */
23 #define SHM_cow (4) /* copy on write all dirtied pages */
24 #define SHM_full_32 (8) /* full virtualization for 32-bit */
26 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
27 #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
28 (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
30 #define shadow_mode(_d) ((_d)->arch.shadow_mode)
31 #define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
32 #define shadow_lock(_d) spin_lock(&(_d)->arch.shadow_lock)
33 #define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock)
35 extern void shadow_mode_init(void);
36 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
37 extern int shadow_fault(unsigned long va, long error_code);
38 extern void shadow_l1_normal_pt_update(
39 unsigned long pa, unsigned long gpte,
40 unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
41 extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde);
42 extern void unshadow_table(unsigned long gpfn, unsigned int type);
43 extern int shadow_mode_enable(struct domain *p, unsigned int mode);
45 #ifdef CONFIG_VMX
46 extern void vmx_shadow_clear_state(struct domain *);
47 extern void vmx_shadow_invlpg(struct domain *, unsigned long);
48 #endif
50 #define __mfn_to_gpfn(_d, mfn) \
51 ( (shadow_mode(_d) == SHM_full_32) \
52 ? machine_to_phys_mapping[(mfn)] \
53 : (mfn) )
55 #define __gpfn_to_mfn(_d, gpfn) \
56 ( (shadow_mode(_d) == SHM_full_32) \
57 ? phys_to_machine_mapping(gpfn) \
58 : (gpfn) )
60 extern void __shadow_mode_disable(struct domain *d);
61 static inline void shadow_mode_disable(struct domain *d)
62 {
63 if ( shadow_mode(d) )
64 __shadow_mode_disable(d);
65 }
67 extern unsigned long shadow_l2_table(
68 struct domain *d, unsigned long gpfn);
70 static inline void shadow_invalidate(struct exec_domain *ed) {
71 if ( shadow_mode(ed->domain) != SHM_full_32 )
72 BUG();
73 memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
74 }
76 #define SHADOW_DEBUG 1
77 #define SHADOW_VERBOSE_DEBUG 0
78 #define SHADOW_HASH_DEBUG 1
80 struct shadow_status {
81 unsigned long pfn; /* Guest pfn. */
82 unsigned long smfn_and_flags; /* Shadow mfn plus flags. */
83 struct shadow_status *next; /* Pull-to-front list. */
84 };
86 #define shadow_ht_extra_size 128
87 #define shadow_ht_buckets 256
89 #ifdef VERBOSE
90 #define SH_LOG(_f, _a...) \
91 printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \
92 current->domain->id , current->processor, __LINE__ , ## _a )
93 #else
94 #define SH_LOG(_f, _a...)
95 #endif
97 #if SHADOW_DEBUG
98 #define SH_VLOG(_f, _a...) \
99 printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \
100 current->domain->id, current->processor, __LINE__ , ## _a )
101 #else
102 #define SH_VLOG(_f, _a...)
103 #endif
105 #if SHADOW_VERBOSE_DEBUG
106 #define SH_VVLOG(_f, _a...) \
107 printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \
108 current->domain->id, current->processor, __LINE__ , ## _a )
109 #else
110 #define SH_VVLOG(_f, _a...)
111 #endif
113 // BUG: mafetter: this assumes ed == current, so why pass ed?
114 static inline void __shadow_get_l2e(
115 struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
116 {
117 if ( shadow_mode(ed->domain) == SHM_full_32 ) {
118 *sl2e = l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]);
119 }
120 else if ( shadow_mode(ed->domain) ) {
121 *sl2e = l2_pgentry_val(shadow_linear_l2_table[l2_table_offset(va)]);
122 }
123 else
124 *sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
125 }
127 static inline void __shadow_set_l2e(
128 struct exec_domain *ed, unsigned long va, unsigned long value)
129 {
130 if ( shadow_mode(ed->domain) == SHM_full_32 ) {
131 ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
132 }
133 else if ( shadow_mode(ed->domain) ) {
134 shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
135 }
136 else
137 linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
138 }
140 static inline void __guest_get_l2e(
141 struct exec_domain *ed, unsigned long va, unsigned long *l2e)
142 {
143 *l2e = ( shadow_mode(ed->domain) == SHM_full_32) ?
144 l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) :
145 l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
146 }
148 static inline void __guest_set_l2e(
149 struct exec_domain *ed, unsigned long va, unsigned long value)
150 {
151 if ( shadow_mode(ed->domain) == SHM_full_32 )
152 {
153 unsigned long pfn;
155 pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
156 ed->arch.guest_pl2e_cache[l2_table_offset(va)] =
157 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
159 ed->arch.vpagetable[l2_table_offset(va)] = mk_l2_pgentry(value);
160 }
161 else
162 {
163 linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
164 }
165 }
167 /************************************************************************/
169 static inline int __mark_dirty(struct domain *d, unsigned int mfn)
170 {
171 unsigned long pfn;
172 int rc = 0;
174 ASSERT(spin_is_locked(&d->arch.shadow_lock));
175 ASSERT(d->arch.shadow_dirty_bitmap != NULL);
177 pfn = machine_to_phys_mapping[mfn];
179 /*
180 * Values with the MSB set denote MFNs that aren't really part of the
181 * domain's pseudo-physical memory map (e.g., the shared info frame).
182 * Nothing to do here...
183 */
184 if ( unlikely(pfn & 0x80000000UL) )
185 return rc;
187 if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) )
188 {
189 /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
190 if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
191 {
192 d->arch.shadow_dirty_count++;
193 rc = 1;
194 }
195 }
196 #ifndef NDEBUG
197 else if ( mfn < max_page )
198 {
199 SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)",
200 mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
201 SH_LOG("dom=%p caf=%08x taf=%08x\n",
202 page_get_owner(&frame_table[mfn]),
203 frame_table[mfn].count_info,
204 frame_table[mfn].u.inuse.type_info );
205 }
206 #endif
208 return rc;
209 }
212 static inline int mark_dirty(struct domain *d, unsigned int mfn)
213 {
214 int rc;
215 shadow_lock(d);
216 rc = __mark_dirty(d, mfn);
217 shadow_unlock(d);
218 return rc;
219 }
222 /************************************************************************/
224 static inline void l1pte_write_fault(
225 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
226 {
227 unsigned long gpte = *gpte_p;
228 unsigned long spte = *spte_p;
229 unsigned long pfn = gpte >> PAGE_SHIFT;
230 unsigned long mfn = __gpfn_to_mfn(d, pfn);
232 ASSERT(gpte & _PAGE_RW);
233 gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
235 if ( shadow_mode(d) == SHM_logdirty )
236 __mark_dirty(d, pfn);
238 spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
240 SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
241 *gpte_p = gpte;
242 *spte_p = spte;
243 }
245 static inline void l1pte_read_fault(
246 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
247 {
248 unsigned long gpte = *gpte_p;
249 unsigned long spte = *spte_p;
250 unsigned long pfn = gpte >> PAGE_SHIFT;
251 unsigned long mfn = __gpfn_to_mfn(d, pfn);
253 gpte |= _PAGE_ACCESSED;
254 spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
256 if ( (shadow_mode(d) == SHM_logdirty) || ! (gpte & _PAGE_DIRTY) )
257 spte &= ~_PAGE_RW;
259 SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
260 *gpte_p = gpte;
261 *spte_p = spte;
262 }
264 static inline void l1pte_propagate_from_guest(
265 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
266 {
267 unsigned long gpte = *gpte_p;
268 unsigned long spte = *spte_p;
269 unsigned long host_pfn, host_gpte;
270 #if SHADOW_VERBOSE_DEBUG
271 unsigned long old_spte = spte;
272 #endif
274 switch ( shadow_mode(d) )
275 {
276 case SHM_test:
277 spte = 0;
278 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
279 (_PAGE_PRESENT|_PAGE_ACCESSED) )
280 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
281 break;
283 case SHM_logdirty:
284 spte = 0;
285 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
286 (_PAGE_PRESENT|_PAGE_ACCESSED) )
287 spte = gpte & ~_PAGE_RW;
288 break;
290 case SHM_full_32:
291 spte = 0;
293 if ( mmio_space(gpte & 0xFFFFF000) )
294 {
295 *spte_p = spte;
296 return;
297 }
299 host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
300 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
302 if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
303 (_PAGE_PRESENT|_PAGE_ACCESSED) )
304 spte = (host_gpte & _PAGE_DIRTY) ?
305 host_gpte : (host_gpte & ~_PAGE_RW);
307 break;
308 }
310 #if SHADOW_VERBOSE_DEBUG
311 if ( old_spte || spte || gpte )
312 SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new spte=0x%p ", gpte, old_spte, spte);
313 #endif
315 *gpte_p = gpte;
316 *spte_p = spte;
317 }
319 static inline void l2pde_general(
320 struct domain *d,
321 unsigned long *gpde_p,
322 unsigned long *spde_p,
323 unsigned long sl1mfn)
324 {
325 unsigned long gpde = *gpde_p;
326 unsigned long spde = *spde_p;
328 spde = 0;
330 if ( sl1mfn != 0 )
331 {
332 spde = (gpde & ~PAGE_MASK) | (sl1mfn << PAGE_SHIFT) |
333 _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
334 gpde |= _PAGE_ACCESSED | _PAGE_DIRTY;
336 /* Detect linear p.t. mappings and write-protect them. */
337 if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) ==
338 PGT_l2_page_table )
339 {
340 if ( shadow_mode(d) != SHM_full_32 )
341 spde = gpde & ~_PAGE_RW;
343 }
344 }
346 *gpde_p = gpde;
347 *spde_p = spde;
348 }
350 /*********************************************************************/
352 #if SHADOW_HASH_DEBUG
353 static void shadow_audit(struct domain *d, int print)
354 {
355 int live = 0, free = 0, j = 0, abs;
356 struct shadow_status *a;
358 for ( j = 0; j < shadow_ht_buckets; j++ )
359 {
360 a = &d->arch.shadow_ht[j];
361 if ( a->pfn ) { live++; ASSERT(a->smfn_and_flags & PSH_pfn_mask); }
362 ASSERT(a->pfn < 0x00100000UL);
363 a = a->next;
364 while ( a && (live < 9999) )
365 {
366 live++;
367 if ( (a->pfn == 0) || (a->smfn_and_flags == 0) )
368 {
369 printk("XXX live=%d pfn=%p sp=%p next=%p\n",
370 live, a->pfn, a->smfn_and_flags, a->next);
371 BUG();
372 }
373 ASSERT(a->pfn < 0x00100000UL);
374 ASSERT(a->smfn_and_flags & PSH_pfn_mask);
375 a = a->next;
376 }
377 ASSERT(live < 9999);
378 }
380 for ( a = d->arch.shadow_ht_free; a != NULL; a = a->next )
381 free++;
383 if ( print)
384 printk("Xlive=%d free=%d\n",live,free);
386 abs = (perfc_value(shadow_l1_pages) + perfc_value(shadow_l2_pages)) - live;
387 if ( (abs < -1) || (abs > 1) )
388 {
389 printk("live=%d free=%d l1=%d l2=%d\n",live,free,
390 perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
391 BUG();
392 }
393 }
394 #else
395 #define shadow_audit(p, print) ((void)0)
396 #endif
399 static inline struct shadow_status *hash_bucket(
400 struct domain *d, unsigned int gpfn)
401 {
402 return &d->arch.shadow_ht[gpfn % shadow_ht_buckets];
403 }
406 /*
407 * N.B. This takes a guest pfn (i.e. a pfn in the guest's namespace,
408 * which, depending on full shadow mode, may or may not equal
409 * its mfn).
410 * The shadow status it returns is a mfn.
411 */
412 static inline unsigned long __shadow_status(
413 struct domain *d, unsigned int gpfn)
414 {
415 struct shadow_status *p, *x, *head;
417 x = head = hash_bucket(d, gpfn);
418 p = NULL;
420 //SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
421 shadow_audit(d, 0);
423 do
424 {
425 ASSERT(x->pfn || ((x == head) && (x->next == NULL)));
427 if ( x->pfn == gpfn )
428 {
429 /* Pull-to-front if 'x' isn't already the head item. */
430 if ( unlikely(x != head) )
431 {
432 /* Delete 'x' from list and reinsert immediately after head. */
433 p->next = x->next;
434 x->next = head->next;
435 head->next = x;
437 /* Swap 'x' contents with head contents. */
438 SWAP(head->pfn, x->pfn);
439 SWAP(head->smfn_and_flags, x->smfn_and_flags);
440 }
442 SH_VVLOG("lookup gpfn=%p => status=%p",
443 gpfn, head->smfn_and_flags);
444 return head->smfn_and_flags;
445 }
447 p = x;
448 x = x->next;
449 }
450 while ( x != NULL );
452 SH_VVLOG("lookup gpfn=%p => status=0", gpfn);
453 return 0;
454 }
456 /*
457 * N.B. We can make this locking more fine grained (e.g., per shadow page) if
458 * it ever becomes a problem, but since we need a spin lock on the hash table
459 * anyway it's probably not worth being too clever.
460 */
461 static inline unsigned long get_shadow_status(
462 struct domain *d, unsigned int gpfn )
463 {
464 unsigned long res;
466 ASSERT(shadow_mode(d));
468 /*
469 * If we get here we know that some sort of update has happened to the
470 * underlying page table page: either a PTE has been updated, or the page
471 * has changed type. If we're in log dirty mode, we should set the
472 * appropriate bit in the dirty bitmap.
473 * N.B. The VA update path doesn't use this and is handled independently.
474 */
476 shadow_lock(d);
478 if ( shadow_mode(d) == SHM_logdirty )
479 __mark_dirty(d, gpfn);
481 if ( !(res = __shadow_status(d, gpfn)) )
482 shadow_unlock(d);
484 return res;
485 }
488 static inline void put_shadow_status(
489 struct domain *d)
490 {
491 shadow_unlock(d);
492 }
495 static inline void delete_shadow_status(
496 struct domain *d, unsigned int gpfn)
497 {
498 struct shadow_status *p, *x, *n, *head;
500 ASSERT(spin_is_locked(&d->arch.shadow_lock));
501 ASSERT(gpfn != 0);
503 head = hash_bucket(d, gpfn);
505 SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, head);
506 shadow_audit(d, 0);
508 /* Match on head item? */
509 if ( head->pfn == gpfn )
510 {
511 if ( (n = head->next) != NULL )
512 {
513 /* Overwrite head with contents of following node. */
514 head->pfn = n->pfn;
515 head->smfn_and_flags = n->smfn_and_flags;
517 /* Delete following node. */
518 head->next = n->next;
520 /* Add deleted node to the free list. */
521 n->pfn = 0;
522 n->smfn_and_flags = 0;
523 n->next = d->arch.shadow_ht_free;
524 d->arch.shadow_ht_free = n;
525 }
526 else
527 {
528 /* This bucket is now empty. Initialise the head node. */
529 head->pfn = 0;
530 head->smfn_and_flags = 0;
531 }
533 goto found;
534 }
536 p = head;
537 x = head->next;
539 do
540 {
541 if ( x->pfn == gpfn )
542 {
543 /* Delete matching node. */
544 p->next = x->next;
546 /* Add deleted node to the free list. */
547 x->pfn = 0;
548 x->smfn_and_flags = 0;
549 x->next = d->arch.shadow_ht_free;
550 d->arch.shadow_ht_free = x;
552 goto found;
553 }
555 p = x;
556 x = x->next;
557 }
558 while ( x != NULL );
560 /* If we got here, it wasn't in the list! */
561 BUG();
563 found:
564 shadow_audit(d, 0);
565 }
568 static inline void set_shadow_status(
569 struct domain *d, unsigned int gpfn, unsigned long s)
570 {
571 struct shadow_status *x, *head, *extra;
572 int i;
574 ASSERT(spin_is_locked(&d->arch.shadow_lock));
575 ASSERT(gpfn != 0);
576 ASSERT(s & PSH_shadowed);
578 x = head = hash_bucket(d, gpfn);
580 SH_VVLOG("set gpfn=%08x s=%p bucket=%p(%p)", gpfn, s, x, x->next);
581 shadow_audit(d, 0);
583 /*
584 * STEP 1. If page is already in the table, update it in place.
585 */
587 do
588 {
589 if ( x->pfn == gpfn )
590 {
591 x->smfn_and_flags = s;
592 goto done;
593 }
595 x = x->next;
596 }
597 while ( x != NULL );
599 /*
600 * STEP 2. The page must be inserted into the table.
601 */
603 /* If the bucket is empty then insert the new page as the head item. */
604 if ( head->pfn == 0 )
605 {
606 head->pfn = gpfn;
607 head->smfn_and_flags = s;
608 ASSERT(head->next == NULL);
609 goto done;
610 }
612 /* We need to allocate a new node. Ensure the quicklist is non-empty. */
613 if ( unlikely(d->arch.shadow_ht_free == NULL) )
614 {
615 SH_LOG("Allocate more shadow hashtable blocks.");
617 extra = xmalloc_bytes(
618 sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
620 /* XXX Should be more graceful here. */
621 if ( extra == NULL )
622 BUG();
624 memset(extra, 0, sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
626 /* Record the allocation block so it can be correctly freed later. */
627 d->arch.shadow_extras_count++;
628 *((struct shadow_status **)&extra[shadow_ht_extra_size]) =
629 d->arch.shadow_ht_extras;
630 d->arch.shadow_ht_extras = &extra[0];
632 /* Thread a free chain through the newly-allocated nodes. */
633 for ( i = 0; i < (shadow_ht_extra_size - 1); i++ )
634 extra[i].next = &extra[i+1];
635 extra[i].next = NULL;
637 /* Add the new nodes to the free list. */
638 d->arch.shadow_ht_free = &extra[0];
639 }
641 /* Allocate a new node from the quicklist. */
642 x = d->arch.shadow_ht_free;
643 d->arch.shadow_ht_free = x->next;
645 /* Initialise the new node and insert directly after the head item. */
646 x->pfn = gpfn;
647 x->smfn_and_flags = s;
648 x->next = head->next;
649 head->next = x;
651 done:
652 shadow_audit(d, 0);
653 }
655 #ifdef CONFIG_VMX
657 static inline void vmx_update_shadow_state(
658 struct exec_domain *ed, unsigned long gpfn, unsigned long smfn)
659 {
661 l2_pgentry_t *mpl2e = 0;
662 l2_pgentry_t *gpl2e, *spl2e;
664 /* unmap the old mappings */
665 if ( ed->arch.shadow_vtable )
666 unmap_domain_mem(ed->arch.shadow_vtable);
667 if ( ed->arch.vpagetable )
668 unmap_domain_mem(ed->arch.vpagetable);
670 /* new mapping */
671 mpl2e = (l2_pgentry_t *)
672 map_domain_mem(pagetable_val(ed->arch.monitor_table));
674 mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
675 mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
676 __flush_tlb_one(SH_LINEAR_PT_VIRT_START);
678 spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
679 gpl2e = (l2_pgentry_t *)map_domain_mem(gpfn << PAGE_SHIFT);
680 memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
682 ed->arch.shadow_vtable = spl2e;
683 ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */
684 unmap_domain_mem(mpl2e);
685 }
687 #endif /* CONFIG_VMX */
689 static inline void __shadow_mk_pagetable(struct exec_domain *ed)
690 {
691 struct domain *d = ed->domain;
692 unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
693 unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
695 SH_VVLOG("0: __shadow_mk_pagetable(gpfn=%p, smfn=%p)", gpfn, smfn);
697 if ( unlikely(smfn == 0) )
698 smfn = shadow_l2_table(d, gpfn);
699 #ifdef CONFIG_VMX
700 else
701 if (d->arch.shadow_mode == SHM_full_32)
702 vmx_update_shadow_state(ed, gpfn, smfn);
703 #endif
705 ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
706 }
708 static inline void shadow_mk_pagetable(struct exec_domain *ed)
709 {
710 if ( unlikely(shadow_mode(ed->domain)) )
711 {
712 SH_VVLOG("shadow_mk_pagetable( gptbase=%p, mode=%d )",
713 pagetable_val(ed->arch.pagetable),
714 shadow_mode(ed->domain));
716 shadow_lock(ed->domain);
717 __shadow_mk_pagetable(ed);
718 shadow_unlock(ed->domain);
720 SH_VVLOG("leaving shadow_mk_pagetable:\n"
721 "( gptbase=%p, mode=%d ) sh=%p",
722 pagetable_val(ed->arch.pagetable),
723 shadow_mode(ed->domain),
724 pagetable_val(ed->arch.shadow_table) );
725 }
726 }
728 #if SHADOW_DEBUG
729 extern void check_pagetable(struct domain *d, pagetable_t pt, char *s);
730 #else
731 #define check_pagetable(d, pt, s) ((void)0)
732 #endif
734 #endif /* XEN_SHADOW_H */