xen-vtx-unstable
changeset 5909:691cd6f65739
Really just basic preparation: switch over PAE builds to the new
shadow code, drop old dummy functions, add (fewer) new ones.
shadow code, drop old dummy functions, add (fewer) new ones.
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Jul 29 10:23:07 2005 +0000 (2005-07-29) |
parents | 0474ffc52ba7 |
children | e922662b7839 |
files | xen/arch/x86/Makefile xen/arch/x86/shadow.c xen/arch/x86/vmx.c xen/arch/x86/vmx_platform.c xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_64.h xen/include/asm-x86/shadow_public.h |
line diff
1.1 --- a/xen/arch/x86/Makefile Fri Jul 29 10:22:03 2005 +0000 1.2 +++ b/xen/arch/x86/Makefile Fri Jul 29 10:23:07 2005 +0000 1.3 @@ -13,11 +13,18 @@ OBJS := $(subst cpu/centaur.o,,$(OBJS)) 1.4 OBJS := $(subst cpu/cyrix.o,,$(OBJS)) 1.5 OBJS := $(subst cpu/rise.o,,$(OBJS)) 1.6 OBJS := $(subst cpu/transmeta.o,,$(OBJS)) 1.7 -OBJS := $(subst shadow32.o,,$(OBJS)) 1.8 -else 1.9 -OBJS := $(subst shadow.o,,$(OBJS)) 1.10 -OBJS := $(subst shadow_public.o,,$(OBJS)) 1.11 -OBJS := $(subst shadow_xxx.o,,$(OBJS)) 1.12 +endif 1.13 + 1.14 +OBJS := $(patsubst shadow%.o,,$(OBJS)) # drop all 1.15 +ifeq ($(TARGET_SUBARCH),x86_64) 1.16 + OBJS += shadow.o shadow_public.o # x86_64: new code 1.17 +endif 1.18 +ifeq ($(TARGET_SUBARCH),x86_32) 1.19 + ifneq ($(pae),n) 1.20 + OBJS += shadow.o shadow_public.o # x86_32p: new code 1.21 + else 1.22 + OBJS += shadow32.o # x86_32: old code 1.23 + endif 1.24 endif 1.25 1.26 OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
2.1 --- a/xen/arch/x86/shadow.c Fri Jul 29 10:22:03 2005 +0000 2.2 +++ b/xen/arch/x86/shadow.c Fri Jul 29 10:23:07 2005 +0000 2.3 @@ -41,7 +41,13 @@ extern void free_shadow_pages(struct dom 2.4 static void mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn); 2.5 #endif 2.6 2.7 -#if CONFIG_PAGING_LEVELS >= 4 2.8 +#if CONFIG_PAGING_LEVELS == 3 2.9 +#include <asm/shadow_64.h> 2.10 +static unsigned long shadow_l3_table( 2.11 + struct domain *d, unsigned long gpfn, unsigned long gmfn); 2.12 +#endif 2.13 + 2.14 +#if CONFIG_PAGING_LEVELS == 4 2.15 #include <asm/shadow_64.h> 2.16 static unsigned long shadow_l4_table( 2.17 struct domain *d, unsigned long gpfn, unsigned long gmfn); 2.18 @@ -1833,7 +1839,7 @@ static void shadow_update_pagetables(str 2.19 unsigned long gpfn = __mfn_to_gpfn(d, gmfn); 2.20 unsigned long smfn, old_smfn; 2.21 2.22 -#if defined (__i386__) 2.23 +#if CONFIG_PAGING_LEVELS == 2 2.24 unsigned long hl2mfn; 2.25 #endif 2.26 2.27 @@ -1890,7 +1896,7 @@ static void shadow_update_pagetables(str 2.28 v->arch.shadow_vtable = map_domain_page(smfn); 2.29 } 2.30 2.31 -#if defined (__i386__) 2.32 +#if CONFIG_PAGING_LEVELS == 2 2.33 /* 2.34 * arch.hl2_vtable 2.35 */ 2.36 @@ -1936,6 +1942,10 @@ static void shadow_update_pagetables(str 2.37 local_flush_tlb(); 2.38 } 2.39 #endif 2.40 + 2.41 +#if CONFIG_PAGING_LEVELS == 3 2.42 + /* FIXME: PAE code to be written */ 2.43 +#endif 2.44 } 2.45 2.46 struct shadow_ops MODE_A_HANDLER = { 2.47 @@ -2427,6 +2437,7 @@ static unsigned long shadow_l3_table( 2.48 struct domain *d, unsigned long gpfn, unsigned long gmfn) 2.49 { 2.50 BUG(); /* not implemenated yet */ 2.51 + return 42; 2.52 } 2.53 #endif 2.54
3.1 --- a/xen/arch/x86/vmx.c Fri Jul 29 10:22:03 2005 +0000 3.2 +++ b/xen/arch/x86/vmx.c Fri Jul 29 10:23:07 2005 +0000 3.3 @@ -38,7 +38,7 @@ 3.4 #include <asm/vmx_vmcs.h> 3.5 #include <asm/vmx_intercept.h> 3.6 #include <asm/shadow.h> 3.7 -#if CONFIG_PAGING_LEVELS >= 4 3.8 +#if CONFIG_PAGING_LEVELS >= 3 3.9 #include <asm/shadow_64.h> 3.10 #endif 3.11
4.1 --- a/xen/arch/x86/vmx_platform.c Fri Jul 29 10:22:03 2005 +0000 4.2 +++ b/xen/arch/x86/vmx_platform.c Fri Jul 29 10:23:07 2005 +0000 4.3 @@ -32,7 +32,7 @@ 4.4 #include <xen/lib.h> 4.5 #include <xen/sched.h> 4.6 #include <asm/current.h> 4.7 -#if CONFIG_PAGING_LEVELS >= 4 4.8 +#if CONFIG_PAGING_LEVELS >= 3 4.9 #include <asm/shadow_64.h> 4.10 #endif 4.11 #ifdef CONFIG_VMX
5.1 --- a/xen/include/asm-x86/shadow.h Fri Jul 29 10:22:03 2005 +0000 5.2 +++ b/xen/include/asm-x86/shadow.h Fri Jul 29 10:23:07 2005 +0000 5.3 @@ -131,12 +131,12 @@ extern void shadow_l2_normal_pt_update(s 5.4 unsigned long pa, l2_pgentry_t l2e, 5.5 struct domain_mmap_cache *cache); 5.6 #if CONFIG_PAGING_LEVELS >= 3 5.7 +#include <asm/page-guest32.h> 5.8 extern void shadow_l3_normal_pt_update(struct domain *d, 5.9 unsigned long pa, l3_pgentry_t l3e, 5.10 struct domain_mmap_cache *cache); 5.11 #endif 5.12 #if CONFIG_PAGING_LEVELS >= 4 5.13 -#include <asm/page-guest32.h> 5.14 extern void shadow_l4_normal_pt_update(struct domain *d, 5.15 unsigned long pa, l4_pgentry_t l4e, 5.16 struct domain_mmap_cache *cache); 5.17 @@ -631,82 +631,6 @@ static inline void shadow_sync_and_drop_ 5.18 } 5.19 #endif 5.20 5.21 -#if CONFIG_PAGING_LEVELS == 3 5.22 -/* dummy functions, PAE has no shadow support yet */ 5.23 - 5.24 -static inline void 5.25 -__shadow_get_l2e( 5.26 - struct vcpu *v, unsigned long va, l2_pgentry_t *psl2e) 5.27 -{ 5.28 - BUG(); 5.29 -} 5.30 - 5.31 -static inline void 5.32 -__shadow_set_l2e( 5.33 - struct vcpu *v, unsigned long va, l2_pgentry_t value) 5.34 -{ 5.35 - BUG(); 5.36 -} 5.37 - 5.38 -static inline void 5.39 -__guest_get_l2e( 5.40 - struct vcpu *v, unsigned long va, l2_pgentry_t *pl2e) 5.41 -{ 5.42 - BUG(); 5.43 -} 5.44 - 5.45 -static inline void 5.46 -__guest_set_l2e( 5.47 - struct vcpu *v, unsigned long va, l2_pgentry_t value) 5.48 -{ 5.49 - BUG(); 5.50 -} 5.51 - 5.52 -static inline void shadow_drop_references( 5.53 - struct domain *d, struct pfn_info *page) 5.54 -{ 5.55 - if ( likely(!shadow_mode_refcounts(d)) || 5.56 - ((page->u.inuse.type_info & PGT_count_mask) == 0) ) 5.57 - return; 5.58 - BUG(); 5.59 -} 5.60 - 5.61 -static inline void shadow_sync_and_drop_references( 5.62 - struct domain *d, struct pfn_info *page) 5.63 -{ 5.64 - if ( likely(!shadow_mode_refcounts(d)) ) 5.65 - return; 5.66 - BUG(); 5.67 -} 5.68 - 5.69 -static inline int l1pte_write_fault( 5.70 - struct vcpu *v, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p, 5.71 - unsigned long va) 5.72 -{ 5.73 - BUG(); 5.74 - return 42; 5.75 -} 5.76 - 5.77 -static inline int l1pte_read_fault( 5.78 - struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p) 5.79 -{ 5.80 - BUG(); 5.81 - return 42; 5.82 -} 5.83 - 5.84 -void static inline 5.85 -shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow) 5.86 -{ 5.87 - BUG(); 5.88 -} 5.89 - 5.90 -static inline unsigned long gva_to_gpa(unsigned long gva) 5.91 -{ 5.92 - BUG(); 5.93 - return 42; 5.94 -} 5.95 -#endif 5.96 - 5.97 /************************************************************************/ 5.98 5.99 /*
6.1 --- a/xen/include/asm-x86/shadow_64.h Fri Jul 29 10:22:03 2005 +0000 6.2 +++ b/xen/include/asm-x86/shadow_64.h Fri Jul 29 10:23:07 2005 +0000 6.3 @@ -85,8 +85,10 @@ static inline int table_offset_64(unsig 6.4 return (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)); 6.5 case 3: 6.6 return (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)); 6.7 +#if CONFIG_PAGING_LEVELS >= 4 6.8 case 4: 6.9 return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)); 6.10 +#endif 6.11 default: 6.12 //printk("<table_offset_64> level %d is too big\n", level); 6.13 return -1;
7.1 --- a/xen/include/asm-x86/shadow_public.h Fri Jul 29 10:22:03 2005 +0000 7.2 +++ b/xen/include/asm-x86/shadow_public.h Fri Jul 29 10:23:07 2005 +0000 7.3 @@ -21,7 +21,7 @@ 7.4 7.5 #ifndef _XEN_SHADOW_PUBLIC_H 7.6 #define _XEN_SHADOW_PUBLIC_H 7.7 -#if CONFIG_PAGING_LEVELS >= 4 7.8 +#if CONFIG_PAGING_LEVELS >= 3 7.9 #define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned) 7.10 7.11 extern int alloc_p2m_table(struct domain *d); 7.12 @@ -31,10 +31,6 @@ extern void shadow_sync_and_drop_referen 7.13 extern void shadow_drop_references( 7.14 struct domain *d, struct pfn_info *page); 7.15 7.16 -extern void shadow_l4_normal_pt_update(struct domain *d, 7.17 - unsigned long pa, l4_pgentry_t l4e, 7.18 - struct domain_mmap_cache *cache); 7.19 - 7.20 extern int shadow_set_guest_paging_levels(struct domain *d, int levels); 7.21 7.22 extern void release_out_of_sync_entry( 7.23 @@ -56,4 +52,10 @@ struct shadow_ops { 7.24 }; 7.25 #endif 7.26 7.27 +#if CONFIG_PAGING_LEVELS >= 4 7.28 +extern void shadow_l4_normal_pt_update(struct domain *d, 7.29 + unsigned long pa, l4_pgentry_t l4e, 7.30 + struct domain_mmap_cache *cache); 7.31 #endif 7.32 + 7.33 +#endif