debuggers.hg

view xen/include/asm-x86/shadow.h @ 3715:d93748c50893

bitkeeper revision 1.1159.212.100 (42050e5fWLAKCQAvoZ3CPmyAaL-51g)

Reorganise 'struct domain' and 'struct exec_domain' to each have an
architecture-specific portion. Removed 'mm_struct'.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@viper.(none)
date Sat Feb 05 18:20:15 2005 +0000 (2005-02-05)
parents 677cb76cff18
children 88957a238191 ef5e5cd10778
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 #ifndef _XEN_SHADOW_H
4 #define _XEN_SHADOW_H
6 #include <xen/config.h>
7 #include <xen/types.h>
8 #include <xen/perfc.h>
9 #include <asm/processor.h>
11 /* Shadow PT flag bits in pfn_info */
12 #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
13 #define PSH_pfn_mask ((1<<21)-1)
15 /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
16 #define SHM_test (1) /* just run domain on shadow PTs */
17 #define SHM_logdirty (2) /* log pages that are dirtied */
18 #define SHM_translate (3) /* lookup machine pages in translation table */
19 #define SHM_cow (4) /* copy on write all dirtied pages */
20 #define SHM_full_32 (8) /* full virtualization for 32-bit */
22 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
23 #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
24 (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
26 #define shadow_mode(_d) ((_d)->arch.shadow_mode)
27 #define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
28 #define shadow_lock(_d) spin_lock(&(_d)->arch.shadow_lock)
29 #define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock)
31 extern void shadow_mode_init(void);
32 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
33 extern int shadow_fault(unsigned long va, long error_code);
34 extern void shadow_l1_normal_pt_update(
35 unsigned long pa, unsigned long gpte,
36 unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
37 extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte);
38 extern void unshadow_table(unsigned long gpfn, unsigned int type);
39 extern int shadow_mode_enable(struct domain *p, unsigned int mode);
41 #ifdef CONFIG_VMX
42 extern void vmx_shadow_clear_state(struct domain *);
43 extern void vmx_shadow_invlpg(struct domain *, unsigned long);
44 #endif
46 #define __get_machine_to_phys(_d, guest_gpfn, gpfn) \
47 if ((_d)->arch.shadow_mode == SHM_full_32) \
48 (guest_gpfn) = machine_to_phys_mapping[(gpfn)]; \
49 else \
50 (guest_gpfn) = (gpfn);
52 #define __get_phys_to_machine(_d, host_gpfn, gpfn) \
53 if ((_d)->arch.shadow_mode == SHM_full_32) \
54 (host_gpfn) = phys_to_machine_mapping[(gpfn)]; \
55 else \
56 (host_gpfn) = (gpfn);
58 extern void __shadow_mode_disable(struct domain *d);
59 static inline void shadow_mode_disable(struct domain *d)
60 {
61 if ( shadow_mode(d) )
62 __shadow_mode_disable(d);
63 }
65 extern unsigned long shadow_l2_table(
66 struct domain *d, unsigned long gpfn);
68 static inline void shadow_invalidate(struct exec_domain *ed) {
69 if ( ed->domain->arch.shadow_mode != SHM_full_32 )
70 BUG();
71 memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
72 }
74 #define SHADOW_DEBUG 1
75 #define SHADOW_HASH_DEBUG 1
77 struct shadow_status {
78 unsigned long pfn; /* Guest pfn. */
79 unsigned long spfn_and_flags; /* Shadow pfn plus flags. */
80 struct shadow_status *next; /* Pull-to-front list. */
81 };
83 #define shadow_ht_extra_size 128
84 #define shadow_ht_buckets 256
86 #ifdef VERBOSE
87 #define SH_LOG(_f, _a...) \
88 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
89 current->domain->id , __LINE__ , ## _a )
90 #else
91 #define SH_LOG(_f, _a...)
92 #endif
94 #if SHADOW_DEBUG
95 #define SH_VLOG(_f, _a...) \
96 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
97 current->domain->id , __LINE__ , ## _a )
98 #else
99 #define SH_VLOG(_f, _a...)
100 #endif
102 #if 0
103 #define SH_VVLOG(_f, _a...) \
104 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
105 current->domain->id , __LINE__ , ## _a )
106 #else
107 #define SH_VVLOG(_f, _a...)
108 #endif
110 static inline void __shadow_get_pl2e(
111 struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
112 {
113 *sl2e = (ed->domain->arch.shadow_mode == SHM_full_32) ?
114 l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]) :
115 l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
116 }
118 static inline void __shadow_set_pl2e(
119 struct exec_domain *ed, unsigned long va, unsigned long value)
120 {
121 if ( ed->domain->arch.shadow_mode == SHM_full_32 )
122 ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
123 else
124 linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
125 }
127 static inline void __guest_get_pl2e(
128 struct exec_domain *ed, unsigned long va, unsigned long *l2e)
129 {
130 *l2e = (ed->domain->arch.shadow_mode == SHM_full_32) ?
131 l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) :
132 l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
133 }
135 static inline void __guest_set_pl2e(
136 struct exec_domain *ed, unsigned long va, unsigned long value)
137 {
138 if ( ed->domain->arch.shadow_mode == SHM_full_32 )
139 {
140 unsigned long pfn;
142 pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
143 ed->arch.guest_pl2e_cache[l2_table_offset(va)] =
144 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
146 ed->arch.vpagetable[l2_table_offset(va)] = mk_l2_pgentry(value);
147 }
148 else
149 {
150 linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
151 }
152 }
154 /************************************************************************/
156 static inline int __mark_dirty(struct domain *d, unsigned int mfn)
157 {
158 unsigned long pfn;
159 int rc = 0;
161 ASSERT(spin_is_locked(&d->arch.shadow_lock));
162 ASSERT(d->arch.shadow_dirty_bitmap != NULL);
164 pfn = machine_to_phys_mapping[mfn];
166 /*
167 * Values with the MSB set denote MFNs that aren't really part of the
168 * domain's pseudo-physical memory map (e.g., the shared info frame).
169 * Nothing to do here...
170 */
171 if ( unlikely(pfn & 0x80000000UL) )
172 return rc;
174 if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) )
175 {
176 /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
177 if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
178 {
179 d->arch.shadow_dirty_count++;
180 rc = 1;
181 }
182 }
183 #ifndef NDEBUG
184 else if ( mfn < max_page )
185 {
186 SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)",
187 mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
188 SH_LOG("dom=%p caf=%08x taf=%08x\n",
189 page_get_owner(&frame_table[mfn]),
190 frame_table[mfn].count_info,
191 frame_table[mfn].u.inuse.type_info );
192 }
193 #endif
195 return rc;
196 }
199 static inline int mark_dirty(struct domain *d, unsigned int mfn)
200 {
201 int rc;
202 shadow_lock(d);
203 rc = __mark_dirty(d, mfn);
204 shadow_unlock(d);
205 return rc;
206 }
209 /************************************************************************/
211 static inline void l1pte_write_fault(
212 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
213 {
214 unsigned long gpte = *gpte_p;
215 unsigned long spte = *spte_p;
217 ASSERT(gpte & _PAGE_RW);
218 gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
220 switch ( d->arch.shadow_mode )
221 {
222 case SHM_test:
223 spte = gpte | _PAGE_RW;
224 break;
226 case SHM_logdirty:
227 spte = gpte | _PAGE_RW;
228 __mark_dirty(d, gpte >> PAGE_SHIFT);
230 case SHM_full_32:
231 {
232 unsigned long host_pfn, host_gpte;
234 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
235 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
236 spte = host_gpte | _PAGE_RW;
237 }
238 break;
239 }
241 SH_VVLOG("updating spte=%lx gpte=%lx", spte, gpte);
242 *gpte_p = gpte;
243 *spte_p = spte;
244 }
246 static inline void l1pte_read_fault(
247 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
248 {
249 unsigned long gpte = *gpte_p;
250 unsigned long spte = *spte_p;
252 gpte |= _PAGE_ACCESSED;
254 switch ( d->arch.shadow_mode )
255 {
256 case SHM_test:
257 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
258 break;
260 case SHM_logdirty:
261 spte = gpte & ~_PAGE_RW;
262 break;
264 case SHM_full_32:
265 {
266 unsigned long host_pfn, host_gpte;
268 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
269 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
270 spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
271 }
272 break;
274 }
276 *gpte_p = gpte;
277 *spte_p = spte;
278 }
280 static inline void l1pte_propagate_from_guest(
281 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
282 {
283 unsigned long gpte = *gpte_p;
284 unsigned long spte = *spte_p;
285 unsigned long host_pfn, host_gpte;
287 switch ( d->arch.shadow_mode )
288 {
289 case SHM_test:
290 spte = 0;
291 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
292 (_PAGE_PRESENT|_PAGE_ACCESSED) )
293 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
294 break;
296 case SHM_logdirty:
297 spte = 0;
298 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
299 (_PAGE_PRESENT|_PAGE_ACCESSED) )
300 spte = gpte & ~_PAGE_RW;
301 break;
303 case SHM_full_32:
304 spte = 0;
306 if ( mmio_space(gpte & 0xFFFFF000) )
307 {
308 *spte_p = spte;
309 return;
310 }
312 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
313 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
315 if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
316 (_PAGE_PRESENT|_PAGE_ACCESSED) )
317 spte = (host_gpte & _PAGE_DIRTY) ?
318 host_gpte : (host_gpte & ~_PAGE_RW);
320 break;
321 }
323 *gpte_p = gpte;
324 *spte_p = spte;
325 }
327 static inline void l2pde_general(
328 struct domain *d,
329 unsigned long *gpde_p,
330 unsigned long *spde_p,
331 unsigned long sl1pfn)
332 {
333 unsigned long gpde = *gpde_p;
334 unsigned long spde = *spde_p;
336 spde = 0;
338 if ( sl1pfn != 0 )
339 {
340 spde = (gpde & ~PAGE_MASK) | (sl1pfn << PAGE_SHIFT) |
341 _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
342 gpde |= _PAGE_ACCESSED | _PAGE_DIRTY;
344 /* Detect linear p.t. mappings and write-protect them. */
345 if ( (frame_table[sl1pfn].u.inuse.type_info & PGT_type_mask) ==
346 PGT_l2_page_table )
347 {
348 if ( d->arch.shadow_mode != SHM_full_32 )
349 spde = gpde & ~_PAGE_RW;
351 }
352 }
354 *gpde_p = gpde;
355 *spde_p = spde;
356 }
358 /*********************************************************************/
360 #if SHADOW_HASH_DEBUG
361 static void shadow_audit(struct domain *d, int print)
362 {
363 int live = 0, free = 0, j = 0, abs;
364 struct shadow_status *a;
366 for ( j = 0; j < shadow_ht_buckets; j++ )
367 {
368 a = &d->arch.shadow_ht[j];
369 if ( a->pfn ) { live++; ASSERT(a->spfn_and_flags & PSH_pfn_mask); }
370 ASSERT(a->pfn < 0x00100000UL);
371 a = a->next;
372 while ( a && (live < 9999) )
373 {
374 live++;
375 if ( (a->pfn == 0) || (a->spfn_and_flags == 0) )
376 {
377 printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n",
378 live, a->pfn, a->spfn_and_flags, a->next);
379 BUG();
380 }
381 ASSERT(a->pfn < 0x00100000UL);
382 ASSERT(a->spfn_and_flags & PSH_pfn_mask);
383 a = a->next;
384 }
385 ASSERT(live < 9999);
386 }
388 for ( a = d->arch.shadow_ht_free; a != NULL; a = a->next )
389 free++;
391 if ( print)
392 printk("Xlive=%d free=%d\n",live,free);
394 abs = (perfc_value(shadow_l1_pages) + perfc_value(shadow_l2_pages)) - live;
395 if ( (abs < -1) || (abs > 1) )
396 {
397 printk("live=%d free=%d l1=%d l2=%d\n",live,free,
398 perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
399 BUG();
400 }
401 }
402 #else
403 #define shadow_audit(p, print) ((void)0)
404 #endif
407 static inline struct shadow_status *hash_bucket(
408 struct domain *d, unsigned int gpfn)
409 {
410 return &d->arch.shadow_ht[gpfn % shadow_ht_buckets];
411 }
414 static inline unsigned long __shadow_status(
415 struct domain *d, unsigned int gpfn)
416 {
417 struct shadow_status *p, *x, *head;
419 x = head = hash_bucket(d, gpfn);
420 p = NULL;
422 SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
423 shadow_audit(d, 0);
425 do
426 {
427 ASSERT(x->pfn || ((x == head) && (x->next == NULL)));
429 if ( x->pfn == gpfn )
430 {
431 /* Pull-to-front if 'x' isn't already the head item. */
432 if ( unlikely(x != head) )
433 {
434 /* Delete 'x' from list and reinsert immediately after head. */
435 p->next = x->next;
436 x->next = head->next;
437 head->next = x;
439 /* Swap 'x' contents with head contents. */
440 SWAP(head->pfn, x->pfn);
441 SWAP(head->spfn_and_flags, x->spfn_and_flags);
442 }
444 return head->spfn_and_flags;
445 }
447 p = x;
448 x = x->next;
449 }
450 while ( x != NULL );
452 return 0;
453 }
455 /*
456 * N.B. We can make this locking more fine grained (e.g., per shadow page) if
457 * it ever becomes a problem, but since we need a spin lock on the hash table
458 * anyway it's probably not worth being too clever.
459 */
460 static inline unsigned long get_shadow_status(
461 struct domain *d, unsigned int gpfn )
462 {
463 unsigned long res;
465 ASSERT(d->arch.shadow_mode);
467 /*
468 * If we get here we know that some sort of update has happened to the
469 * underlying page table page: either a PTE has been updated, or the page
470 * has changed type. If we're in log dirty mode, we should set the
471 * appropriate bit in the dirty bitmap.
472 * N.B. The VA update path doesn't use this and is handled independently.
473 */
475 shadow_lock(d);
477 if ( d->arch.shadow_mode == SHM_logdirty )
478 __mark_dirty(d, gpfn);
480 if ( !(res = __shadow_status(d, gpfn)) )
481 shadow_unlock(d);
483 return res;
484 }
487 static inline void put_shadow_status(
488 struct domain *d)
489 {
490 shadow_unlock(d);
491 }
494 static inline void delete_shadow_status(
495 struct domain *d, unsigned int gpfn)
496 {
497 struct shadow_status *p, *x, *n, *head;
499 ASSERT(spin_is_locked(&d->arch.shadow_lock));
500 ASSERT(gpfn != 0);
502 head = hash_bucket(d, gpfn);
504 SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, head);
505 shadow_audit(d, 0);
507 /* Match on head item? */
508 if ( head->pfn == gpfn )
509 {
510 if ( (n = head->next) != NULL )
511 {
512 /* Overwrite head with contents of following node. */
513 head->pfn = n->pfn;
514 head->spfn_and_flags = n->spfn_and_flags;
516 /* Delete following node. */
517 head->next = n->next;
519 /* Add deleted node to the free list. */
520 n->pfn = 0;
521 n->spfn_and_flags = 0;
522 n->next = d->arch.shadow_ht_free;
523 d->arch.shadow_ht_free = n;
524 }
525 else
526 {
527 /* This bucket is now empty. Initialise the head node. */
528 head->pfn = 0;
529 head->spfn_and_flags = 0;
530 }
532 goto found;
533 }
535 p = head;
536 x = head->next;
538 do
539 {
540 if ( x->pfn == gpfn )
541 {
542 /* Delete matching node. */
543 p->next = x->next;
545 /* Add deleted node to the free list. */
546 x->pfn = 0;
547 x->spfn_and_flags = 0;
548 x->next = d->arch.shadow_ht_free;
549 d->arch.shadow_ht_free = x;
551 goto found;
552 }
554 p = x;
555 x = x->next;
556 }
557 while ( x != NULL );
559 /* If we got here, it wasn't in the list! */
560 BUG();
562 found:
563 shadow_audit(d, 0);
564 }
567 static inline void set_shadow_status(
568 struct domain *d, unsigned int gpfn, unsigned long s)
569 {
570 struct shadow_status *x, *head, *extra;
571 int i;
573 ASSERT(spin_is_locked(&d->arch.shadow_lock));
574 ASSERT(gpfn != 0);
575 ASSERT(s & PSH_shadowed);
577 x = head = hash_bucket(d, gpfn);
579 SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, x, x->next);
580 shadow_audit(d, 0);
582 /*
583 * STEP 1. If page is already in the table, update it in place.
584 */
586 do
587 {
588 if ( x->pfn == gpfn )
589 {
590 x->spfn_and_flags = s;
591 goto done;
592 }
594 x = x->next;
595 }
596 while ( x != NULL );
598 /*
599 * STEP 2. The page must be inserted into the table.
600 */
602 /* If the bucket is empty then insert the new page as the head item. */
603 if ( head->pfn == 0 )
604 {
605 head->pfn = gpfn;
606 head->spfn_and_flags = s;
607 ASSERT(head->next == NULL);
608 goto done;
609 }
611 /* We need to allocate a new node. Ensure the quicklist is non-empty. */
612 if ( unlikely(d->arch.shadow_ht_free == NULL) )
613 {
614 SH_LOG("Allocate more shadow hashtable blocks.");
616 extra = xmalloc_bytes(
617 sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
619 /* XXX Should be more graceful here. */
620 if ( extra == NULL )
621 BUG();
623 memset(extra, 0, sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
625 /* Record the allocation block so it can be correctly freed later. */
626 d->arch.shadow_extras_count++;
627 *((struct shadow_status **)&extra[shadow_ht_extra_size]) =
628 d->arch.shadow_ht_extras;
629 d->arch.shadow_ht_extras = &extra[0];
631 /* Thread a free chain through the newly-allocated nodes. */
632 for ( i = 0; i < (shadow_ht_extra_size - 1); i++ )
633 extra[i].next = &extra[i+1];
634 extra[i].next = NULL;
636 /* Add the new nodes to the free list. */
637 d->arch.shadow_ht_free = &extra[0];
638 }
640 /* Allocate a new node from the quicklist. */
641 x = d->arch.shadow_ht_free;
642 d->arch.shadow_ht_free = x->next;
644 /* Initialise the new node and insert directly after the head item. */
645 x->pfn = gpfn;
646 x->spfn_and_flags = s;
647 x->next = head->next;
648 head->next = x;
650 done:
651 shadow_audit(d, 0);
652 }
654 #ifdef CONFIG_VMX
655 #include <asm/domain_page.h>
657 static inline void vmx_update_shadow_state(
658 struct exec_domain *ed, unsigned long gpfn, unsigned long spfn)
659 {
661 l2_pgentry_t *mpl2e = 0;
662 l2_pgentry_t *gpl2e, *spl2e;
664 /* unmap the old mappings */
665 if ( ed->arch.shadow_vtable )
666 unmap_domain_mem(ed->arch.shadow_vtable);
667 if ( ed->arch.vpagetable )
668 unmap_domain_mem(ed->arch.vpagetable);
670 /* new mapping */
671 mpl2e = (l2_pgentry_t *)
672 map_domain_mem(pagetable_val(ed->arch.monitor_table));
674 mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
675 mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
676 __flush_tlb_one(SH_LINEAR_PT_VIRT_START);
678 spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
679 gpl2e = (l2_pgentry_t *)map_domain_mem(gpfn << PAGE_SHIFT);
680 memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
682 ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
683 ed->arch.shadow_vtable = spl2e;
684 ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */
685 unmap_domain_mem(mpl2e);
686 }
688 static inline void __shadow_mk_pagetable(struct exec_domain *ed)
689 {
690 struct domain *d = ed->domain;
691 unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
692 unsigned long spfn;
693 SH_VLOG("0: __shadow_mk_pagetable(gpfn=%08lx\n", gpfn);
695 if (d->arch.shadow_mode == SHM_full_32)
696 {
697 unsigned long guest_gpfn;
698 guest_gpfn = machine_to_phys_mapping[gpfn];
700 SH_VVLOG("__shadow_mk_pagetable(guest_gpfn=%08lx, gpfn=%08lx\n",
701 guest_gpfn, gpfn);
703 spfn = __shadow_status(d, guest_gpfn) & PSH_pfn_mask;
704 if ( unlikely(spfn == 0) ) {
705 spfn = shadow_l2_table(d, gpfn);
706 ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
707 } else {
708 vmx_update_shadow_state(ed, gpfn, spfn);
709 }
710 } else {
711 spfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
713 if ( unlikely(spfn == 0) ) {
714 spfn = shadow_l2_table(d, gpfn);
715 }
716 ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
717 }
718 }
719 #else
720 static inline void __shadow_mk_pagetable(struct exec_domain *ed)
721 {
722 unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
723 unsigned long spfn = __shadow_status(ed->domain, gpfn);
725 if ( unlikely(spfn == 0) )
726 spfn = shadow_l2_table(ed->domain, gpfn);
728 ed->arch.shadow_table = mk_pagetable(spfn << PAGE_SHIFT);
729 }
730 #endif /* CONFIG_VMX */
732 static inline void shadow_mk_pagetable(struct exec_domain *ed)
733 {
734 if ( unlikely(ed->domain->arch.shadow_mode) )
735 {
736 SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
737 pagetable_val(ed->arch.pagetable),
738 ed->domain->arch.shadow_mode);
740 shadow_lock(ed->domain);
741 __shadow_mk_pagetable(ed);
742 shadow_unlock(ed->domain);
744 SH_VVLOG("leaving shadow_mk_pagetable:\n"
745 "( gptbase=%08lx, mode=%d ) sh=%08lx",
746 pagetable_val(ed->arch.pagetable),
747 ed->domain->arch.shadow_mode,
748 pagetable_val(ed->arch.shadow_table) );
749 }
750 }
752 #if SHADOW_DEBUG
753 extern int check_pagetable(struct domain *d, pagetable_t pt, char *s);
754 #else
755 #define check_pagetable(d, pt, s) ((void)0)
756 #endif
758 #endif /* XEN_SHADOW_H */