debuggers.hg

view xen/include/asm-x86/shadow.h @ 3349:c754bd0be650

bitkeeper revision 1.1159.1.496 (41c85faeMBUejFtICiJueb_Xdh8yJA)

Priv-op emulation in Xen, for RDMSR/WRMSR/WBINVD. Cleaned up Linux
a bit as a result.
author kaf24@scramble.cl.cam.ac.uk
date Tue Dec 21 17:38:54 2004 +0000 (2004-12-21)
parents b9ab4345fd1b
children cd26f113b1b1 bc0fbb38cb25
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- */
3 #ifndef _XEN_SHADOW_H
4 #define _XEN_SHADOW_H
6 #include <xen/config.h>
7 #include <xen/types.h>
8 #include <xen/perfc.h>
9 #include <asm/processor.h>
11 /* Shadow PT flag bits in pfn_info */
12 #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
13 #define PSH_pfn_mask ((1<<21)-1)
15 /* Shadow PT operation mode : shadowmode variable in mm_struct */
16 #define SHM_test (1) /* just run domain on shadow PTs */
17 #define SHM_logdirty (2) /* log pages that are dirtied */
18 #define SHM_translate (3) /* lookup machine pages in translation table */
19 #define SHM_cow (4) /* copy on write all dirtied pages */
20 #define SHM_full_32 (8) /* full virtualization for 32-bit */
22 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
23 #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
24 (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
26 #define shadow_mode(_d) ((_d)->mm.shadow_mode)
27 #define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock)
28 #define shadow_lock(_m) spin_lock(&(_m)->shadow_lock)
29 #define shadow_unlock(_m) spin_unlock(&(_m)->shadow_lock)
31 extern void shadow_mode_init(void);
32 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
33 extern int shadow_fault(unsigned long va, long error_code);
34 extern void shadow_l1_normal_pt_update(
35 unsigned long pa, unsigned long gpte,
36 unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
37 extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte);
38 extern void unshadow_table(unsigned long gpfn, unsigned int type);
39 extern int shadow_mode_enable(struct domain *p, unsigned int mode);
41 #ifdef CONFIG_VMX
42 extern void vmx_shadow_clear_state(struct mm_struct *);
43 extern void vmx_shadow_invlpg(struct mm_struct *, unsigned long);
44 #endif
46 #define __get_machine_to_phys(m, guest_gpfn, gpfn) \
47 if ((m)->shadow_mode == SHM_full_32) \
48 (guest_gpfn) = machine_to_phys_mapping[(gpfn)]; \
49 else \
50 (guest_gpfn) = (gpfn);
52 #define __get_phys_to_machine(m, host_gpfn, gpfn) \
53 if ((m)->shadow_mode == SHM_full_32) \
54 (host_gpfn) = phys_to_machine_mapping[(gpfn)]; \
55 else \
56 (host_gpfn) = (gpfn);
58 extern void __shadow_mode_disable(struct domain *d);
59 static inline void shadow_mode_disable(struct domain *d)
60 {
61 if ( shadow_mode(d->exec_domain[0]) )
62 __shadow_mode_disable(d);
63 }
65 extern unsigned long shadow_l2_table(
66 struct mm_struct *m, unsigned long gpfn);
68 static inline void shadow_invalidate(struct mm_struct *m) {
69 if (m->shadow_mode != SHM_full_32)
70 BUG();
71 memset(m->shadow_vtable, 0, PAGE_SIZE);
72 }
74 #define SHADOW_DEBUG 0
75 #define SHADOW_HASH_DEBUG 0
77 struct shadow_status {
78 unsigned long pfn; /* Guest pfn. */
79 unsigned long spfn_and_flags; /* Shadow pfn plus flags. */
80 struct shadow_status *next; /* Pull-to-front list. */
81 };
83 #define shadow_ht_extra_size 128
84 #define shadow_ht_buckets 256
86 #ifdef VERBOSE
87 #define SH_LOG(_f, _a...) \
88 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
89 current->domain->id , __LINE__ , ## _a )
90 #else
91 #define SH_LOG(_f, _a...)
92 #endif
94 #if SHADOW_DEBUG
95 #define SH_VLOG(_f, _a...) \
96 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
97 current->id , __LINE__ , ## _a )
98 #else
99 #define SH_VLOG(_f, _a...)
100 #endif
102 #if 0
103 #define SH_VVLOG(_f, _a...) \
104 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
105 current->id , __LINE__ , ## _a )
106 #else
107 #define SH_VVLOG(_f, _a...)
108 #endif
110 static inline void __shadow_get_pl2e(struct mm_struct *m,
111 unsigned long va, unsigned long *sl2e)
112 {
113 if (m->shadow_mode == SHM_full_32) {
114 *sl2e = l2_pgentry_val(m->shadow_vtable[va >> L2_PAGETABLE_SHIFT]);
115 }
116 else
117 *sl2e = l2_pgentry_val(linear_l2_table[va >> L2_PAGETABLE_SHIFT]);
118 }
120 static inline void __shadow_set_pl2e(struct mm_struct *m,
121 unsigned long va, unsigned long value)
122 {
123 if (m->shadow_mode == SHM_full_32) {
124 m->shadow_vtable[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
125 }
126 else
127 linear_l2_table[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
128 }
130 static inline void __guest_get_pl2e(struct mm_struct *m,
131 unsigned long va, unsigned long *l2e)
132 {
133 if (m->shadow_mode == SHM_full_32) {
134 *l2e = l2_pgentry_val(m->vpagetable[va >> L2_PAGETABLE_SHIFT]);
135 }
136 else
137 *l2e = l2_pgentry_val(linear_l2_table[va >> L2_PAGETABLE_SHIFT]);
138 }
140 static inline void __guest_set_pl2e(struct mm_struct *m,
141 unsigned long va, unsigned long value)
142 {
143 if (m->shadow_mode == SHM_full_32) {
144 unsigned long pfn;
146 pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
147 m->guest_pl2e_cache[va >> L2_PAGETABLE_SHIFT] =
148 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
150 m->vpagetable[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
151 }
152 else
153 linear_l2_table[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
155 }
157 /************************************************************************/
159 static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn)
160 {
161 unsigned long pfn;
162 int rc = 0;
164 ASSERT(spin_is_locked(&m->shadow_lock));
165 ASSERT(m->shadow_dirty_bitmap != NULL);
167 pfn = machine_to_phys_mapping[mfn];
169 /*
170 * Values with the MSB set denote MFNs that aren't really part of the
171 * domain's pseudo-physical memory map (e.g., the shared info frame).
172 * Nothing to do here...
173 */
174 if ( unlikely(pfn & 0x80000000UL) )
175 return rc;
177 if ( likely(pfn < m->shadow_dirty_bitmap_size) )
178 {
179 /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
180 if ( !__test_and_set_bit(pfn, m->shadow_dirty_bitmap) )
181 {
182 m->shadow_dirty_count++;
183 rc = 1;
184 }
185 }
186 #ifndef NDEBUG
187 else if ( mfn < max_page )
188 {
189 SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (mm %p)",
190 mfn, pfn, m->shadow_dirty_bitmap_size, m );
191 SH_LOG("dom=%p caf=%08x taf=%08x\n",
192 frame_table[mfn].u.inuse.domain,
193 frame_table[mfn].count_info,
194 frame_table[mfn].u.inuse.type_info );
195 }
196 #endif
198 return rc;
199 }
202 static inline int mark_dirty(struct mm_struct *m, unsigned int mfn)
203 {
204 int rc;
205 shadow_lock(m);
206 rc = __mark_dirty(m, mfn);
207 shadow_unlock(m);
208 return rc;
209 }
212 /************************************************************************/
214 static inline void l1pte_write_fault(
215 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
216 {
217 unsigned long gpte = *gpte_p;
218 unsigned long spte = *spte_p;
220 ASSERT(gpte & _PAGE_RW);
221 gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
223 switch ( m->shadow_mode )
224 {
225 case SHM_test:
226 spte = gpte | _PAGE_RW;
227 break;
229 case SHM_logdirty:
230 spte = gpte | _PAGE_RW;
231 __mark_dirty(m, gpte >> PAGE_SHIFT);
233 case SHM_full_32:
234 {
235 unsigned long host_pfn, host_gpte;
237 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
238 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
239 spte = host_gpte | _PAGE_RW;
240 }
241 break;
242 }
244 SH_VVLOG("updating spte=%lx gpte=%lx", spte, gpte);
245 *gpte_p = gpte;
246 *spte_p = spte;
247 }
249 static inline void l1pte_read_fault(
250 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
251 {
252 unsigned long gpte = *gpte_p;
253 unsigned long spte = *spte_p;
255 gpte |= _PAGE_ACCESSED;
257 switch ( m->shadow_mode )
258 {
259 case SHM_test:
260 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
261 break;
263 case SHM_logdirty:
264 spte = gpte & ~_PAGE_RW;
265 break;
267 case SHM_full_32:
268 {
269 unsigned long host_pfn, host_gpte;
271 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
272 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
273 spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
274 }
275 break;
277 }
279 *gpte_p = gpte;
280 *spte_p = spte;
281 }
283 static inline void l1pte_propagate_from_guest(
284 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
285 {
286 unsigned long gpte = *gpte_p;
287 unsigned long spte = *spte_p;
289 switch ( m->shadow_mode )
290 {
291 case SHM_test:
292 spte = 0;
293 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
294 (_PAGE_PRESENT|_PAGE_ACCESSED) )
295 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
296 break;
298 case SHM_logdirty:
299 spte = 0;
300 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
301 (_PAGE_PRESENT|_PAGE_ACCESSED) )
302 spte = gpte & ~_PAGE_RW;
303 break;
305 case SHM_full_32:
306 {
307 unsigned long host_pfn, host_gpte;
309 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
310 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
311 spte = 0;
313 if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
314 (_PAGE_PRESENT|_PAGE_ACCESSED) )
315 spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
316 }
317 break;
318 }
320 *gpte_p = gpte;
321 *spte_p = spte;
322 }
324 static inline void l2pde_general(
325 struct mm_struct *m,
326 unsigned long *gpde_p,
327 unsigned long *spde_p,
328 unsigned long sl1pfn)
329 {
330 unsigned long gpde = *gpde_p;
331 unsigned long spde = *spde_p;
333 spde = 0;
335 if ( sl1pfn != 0 )
336 {
337 spde = (gpde & ~PAGE_MASK) | (sl1pfn << PAGE_SHIFT) |
338 _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
339 gpde |= _PAGE_ACCESSED | _PAGE_DIRTY;
341 /* Detect linear p.t. mappings and write-protect them. */
342 if ( (frame_table[sl1pfn].u.inuse.type_info & PGT_type_mask) ==
343 PGT_l2_page_table )
344 {
345 if (m->shadow_mode != SHM_full_32)
346 spde = gpde & ~_PAGE_RW;
348 }
349 }
351 *gpde_p = gpde;
352 *spde_p = spde;
353 }
355 /*********************************************************************/
357 #if SHADOW_HASH_DEBUG
358 static void shadow_audit(struct mm_struct *m, int print)
359 {
360 int live = 0, free = 0, j = 0, abs;
361 struct shadow_status *a;
363 for ( j = 0; j < shadow_ht_buckets; j++ )
364 {
365 a = &m->shadow_ht[j];
366 if ( a->pfn ) { live++; ASSERT(a->spfn_and_flags & PSH_pfn_mask); }
367 ASSERT(a->pfn < 0x00100000UL);
368 a = a->next;
369 while ( a && (live < 9999) )
370 {
371 live++;
372 if ( (a->pfn == 0) || (a->spfn_and_flags == 0) )
373 {
374 printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n",
375 live, a->pfn, a->spfn_and_flags, a->next);
376 BUG();
377 }
378 ASSERT(a->pfn < 0x00100000UL);
379 ASSERT(a->spfn_and_flags & PSH_pfn_mask);
380 a = a->next;
381 }
382 ASSERT(live < 9999);
383 }
385 for ( a = m->shadow_ht_free; a != NULL; a = a->next )
386 free++;
388 if ( print)
389 printk("Xlive=%d free=%d\n",live,free);
391 abs = (perfc_value(shadow_l1_pages) + perfc_value(shadow_l2_pages)) - live;
392 if ( (abs < -1) || (abs > 1) )
393 {
394 printk("live=%d free=%d l1=%d l2=%d\n",live,free,
395 perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
396 BUG();
397 }
398 }
399 #else
400 #define shadow_audit(p, print) ((void)0)
401 #endif
405 static inline struct shadow_status *hash_bucket(
406 struct mm_struct *m, unsigned int gpfn)
407 {
408 return &m->shadow_ht[gpfn % shadow_ht_buckets];
409 }
412 static inline unsigned long __shadow_status(
413 struct mm_struct *m, unsigned int gpfn)
414 {
415 struct shadow_status *p, *x, *head;
417 x = head = hash_bucket(m, gpfn);
418 p = NULL;
420 SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
421 shadow_audit(m, 0);
423 do
424 {
425 ASSERT(x->pfn || ((x == head) && (x->next == NULL)));
427 if ( x->pfn == gpfn )
428 {
429 /* Pull-to-front if 'x' isn't already the head item. */
430 if ( unlikely(x != head) )
431 {
432 /* Delete 'x' from list and reinsert immediately after head. */
433 p->next = x->next;
434 x->next = head->next;
435 head->next = x;
437 /* Swap 'x' contents with head contents. */
438 SWAP(head->pfn, x->pfn);
439 SWAP(head->spfn_and_flags, x->spfn_and_flags);
440 }
442 return head->spfn_and_flags;
443 }
445 p = x;
446 x = x->next;
447 }
448 while ( x != NULL );
450 return 0;
451 }
453 /*
454 * N.B. We can make this locking more fine grained (e.g., per shadow page) if
455 * it ever becomes a problem, but since we need a spin lock on the hash table
456 * anyway it's probably not worth being too clever.
457 */
458 static inline unsigned long get_shadow_status(
459 struct mm_struct *m, unsigned int gpfn )
460 {
461 unsigned long res;
463 ASSERT(m->shadow_mode);
465 /*
466 * If we get here we know that some sort of update has happened to the
467 * underlying page table page: either a PTE has been updated, or the page
468 * has changed type. If we're in log dirty mode, we should set the
469 * appropriate bit in the dirty bitmap.
470 * N.B. The VA update path doesn't use this and is handled independently.
471 */
473 shadow_lock(m);
475 if ( m->shadow_mode == SHM_logdirty )
476 __mark_dirty( m, gpfn );
478 if ( !(res = __shadow_status(m, gpfn)) )
479 shadow_unlock(m);
481 return res;
482 }
485 static inline void put_shadow_status(
486 struct mm_struct *m)
487 {
488 shadow_unlock(m);
489 }
492 static inline void delete_shadow_status(
493 struct mm_struct *m, unsigned int gpfn)
494 {
495 struct shadow_status *p, *x, *n, *head;
497 ASSERT(spin_is_locked(&m->shadow_lock));
498 ASSERT(gpfn != 0);
500 head = hash_bucket(m, gpfn);
502 SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, head);
503 shadow_audit(m, 0);
505 /* Match on head item? */
506 if ( head->pfn == gpfn )
507 {
508 if ( (n = head->next) != NULL )
509 {
510 /* Overwrite head with contents of following node. */
511 head->pfn = n->pfn;
512 head->spfn_and_flags = n->spfn_and_flags;
514 /* Delete following node. */
515 head->next = n->next;
517 /* Add deleted node to the free list. */
518 n->pfn = 0;
519 n->spfn_and_flags = 0;
520 n->next = m->shadow_ht_free;
521 m->shadow_ht_free = n;
522 }
523 else
524 {
525 /* This bucket is now empty. Initialise the head node. */
526 head->pfn = 0;
527 head->spfn_and_flags = 0;
528 }
530 goto found;
531 }
533 p = head;
534 x = head->next;
536 do
537 {
538 if ( x->pfn == gpfn )
539 {
540 /* Delete matching node. */
541 p->next = x->next;
543 /* Add deleted node to the free list. */
544 x->pfn = 0;
545 x->spfn_and_flags = 0;
546 x->next = m->shadow_ht_free;
547 m->shadow_ht_free = x;
549 goto found;
550 }
552 p = x;
553 x = x->next;
554 }
555 while ( x != NULL );
557 /* If we got here, it wasn't in the list! */
558 BUG();
560 found:
561 shadow_audit(m, 0);
562 }
565 static inline void set_shadow_status(
566 struct mm_struct *m, unsigned int gpfn, unsigned long s)
567 {
568 struct shadow_status *x, *head, *extra;
569 int i;
571 ASSERT(spin_is_locked(&m->shadow_lock));
572 ASSERT(gpfn != 0);
573 ASSERT(s & PSH_shadowed);
575 x = head = hash_bucket(m, gpfn);
577 SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, x, x->next);
578 shadow_audit(m, 0);
580 /*
581 * STEP 1. If page is already in the table, update it in place.
582 */
584 do
585 {
586 if ( x->pfn == gpfn )
587 {
588 x->spfn_and_flags = s;
589 goto done;
590 }
592 x = x->next;
593 }
594 while ( x != NULL );
596 /*
597 * STEP 2. The page must be inserted into the table.
598 */
600 /* If the bucket is empty then insert the new page as the head item. */
601 if ( head->pfn == 0 )
602 {
603 head->pfn = gpfn;
604 head->spfn_and_flags = s;
605 ASSERT(head->next == NULL);
606 goto done;
607 }
609 /* We need to allocate a new node. Ensure the quicklist is non-empty. */
610 if ( unlikely(m->shadow_ht_free == NULL) )
611 {
612 SH_LOG("Allocate more shadow hashtable blocks.");
614 extra = xmalloc(
615 sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
617 /* XXX Should be more graceful here. */
618 if ( extra == NULL )
619 BUG();
621 memset(extra, 0, sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
623 /* Record the allocation block so it can be correctly freed later. */
624 m->shadow_extras_count++;
625 *((struct shadow_status **)&extra[shadow_ht_extra_size]) =
626 m->shadow_ht_extras;
627 m->shadow_ht_extras = &extra[0];
629 /* Thread a free chain through the newly-allocated nodes. */
630 for ( i = 0; i < (shadow_ht_extra_size - 1); i++ )
631 extra[i].next = &extra[i+1];
632 extra[i].next = NULL;
634 /* Add the new nodes to the free list. */
635 m->shadow_ht_free = &extra[0];
636 }
638 /* Allocate a new node from the quicklist. */
639 x = m->shadow_ht_free;
640 m->shadow_ht_free = x->next;
642 /* Initialise the new node and insert directly after the head item. */
643 x->pfn = gpfn;
644 x->spfn_and_flags = s;
645 x->next = head->next;
646 head->next = x;
648 done:
649 shadow_audit(m, 0);
650 }
652 #ifdef CONFIG_VMX
653 #include <asm/domain_page.h>
655 static inline void vmx_update_shadow_state(
656 struct mm_struct *mm, unsigned long gpfn, unsigned long spfn)
657 {
659 l2_pgentry_t *mpl2e = 0;
660 l2_pgentry_t *gpl2e, *spl2e;
662 /* unmap the old mappings */
663 if (mm->shadow_vtable)
664 unmap_domain_mem(mm->shadow_vtable);
665 if (mm->vpagetable)
666 unmap_domain_mem(mm->vpagetable);
668 /* new mapping */
669 mpl2e = (l2_pgentry_t *)
670 map_domain_mem(pagetable_val(mm->monitor_table));
672 mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
673 mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
674 __flush_tlb_one(SH_LINEAR_PT_VIRT_START);
676 spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT);
677 gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT);
678 memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
680 mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
681 mm->shadow_vtable = spl2e;
682 mm->vpagetable = gpl2e; /* expect the guest did clean this up */
683 unmap_domain_mem(mpl2e);
684 }
686 static inline void __shadow_mk_pagetable( struct mm_struct *mm )
687 {
688 unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
689 unsigned long spfn;
690 SH_VLOG("0: __shadow_mk_pagetable(gpfn=%08lx\n", gpfn);
692 if (mm->shadow_mode == SHM_full_32)
693 {
694 unsigned long guest_gpfn;
695 guest_gpfn = machine_to_phys_mapping[gpfn];
697 SH_VVLOG("__shadow_mk_pagetable(guest_gpfn=%08lx, gpfn=%08lx\n",
698 guest_gpfn, gpfn);
700 spfn = __shadow_status(mm, gpfn) & PSH_pfn_mask;
701 if ( unlikely(spfn == 0) ) {
702 spfn = shadow_l2_table(mm, gpfn);
703 mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
704 } else {
705 vmx_update_shadow_state(mm, gpfn, spfn);
706 }
707 } else {
708 spfn = __shadow_status(mm, gpfn) & PSH_pfn_mask;
710 if ( unlikely(spfn == 0) ) {
711 spfn = shadow_l2_table(mm, gpfn);
712 }
713 mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
714 }
715 }
716 #else
717 static inline void __shadow_mk_pagetable(struct mm_struct *mm)
718 {
719 unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
720 unsigned long spfn = __shadow_status(mm, gpfn);
722 if ( unlikely(spfn == 0) )
723 spfn = shadow_l2_table(mm, gpfn);
725 mm->shadow_table = mk_pagetable(spfn << PAGE_SHIFT);
726 }
727 #endif /* CONFIG_VMX */
729 static inline void shadow_mk_pagetable(struct mm_struct *mm)
730 {
731 if ( unlikely(mm->shadow_mode) )
732 {
733 SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
734 pagetable_val(mm->pagetable), mm->shadow_mode );
736 shadow_lock(mm);
737 __shadow_mk_pagetable(mm);
738 shadow_unlock(mm);
740 SH_VVLOG("leaving shadow_mk_pagetable:\n");
742 SH_VVLOG("( gptbase=%08lx, mode=%d ) sh=%08lx",
743 pagetable_val(mm->pagetable), mm->shadow_mode,
744 pagetable_val(mm->shadow_table) );
746 }
747 }
749 #if SHADOW_DEBUG
750 extern int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s);
751 #else
752 #define check_pagetable(m, pt, s) ((void)0)
753 #endif
755 #endif /* XEN_SHADOW_H */