debuggers.hg
changeset 21201:de7a50eb4854
p2m: merge ptp allocation
Signed-off-by: Christoph Egger <Christop.Egger@amd.com>
Signed-off-by: Christoph Egger <Christop.Egger@amd.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Tue Apr 13 12:20:48 2010 +0100 (2010-04-13) |
parents | 2a4970daad74 |
children | f7265414c27f |
files | xen/arch/x86/mm/hap/p2m-ept.c xen/arch/x86/mm/hap/private.h xen/arch/x86/mm/p2m.c xen/include/asm-x86/p2m.h |
line diff
1.1 --- a/xen/arch/x86/mm/hap/p2m-ept.c Tue Apr 13 09:38:54 2010 +0100 1.2 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Tue Apr 13 12:20:48 2010 +0100 1.3 @@ -97,14 +97,10 @@ static int ept_set_middle_entry(struct d 1.4 { 1.5 struct page_info *pg; 1.6 1.7 - pg = d->arch.p2m->alloc_page(d); 1.8 + pg = p2m_alloc_ptp(d, 0); 1.9 if ( pg == NULL ) 1.10 return 0; 1.11 1.12 - pg->count_info = 1; 1.13 - pg->u.inuse.type_info = 1 | PGT_validated; 1.14 - page_list_add_tail(pg, &d->arch.p2m->pages); 1.15 - 1.16 ept_entry->emt = 0; 1.17 ept_entry->ipat = 0; 1.18 ept_entry->sp_avail = 0;
2.1 --- a/xen/arch/x86/mm/hap/private.h Tue Apr 13 09:38:54 2010 +0100 2.2 +++ b/xen/arch/x86/mm/hap/private.h Tue Apr 13 12:20:48 2010 +0100 2.3 @@ -30,5 +30,4 @@ unsigned long hap_gva_to_gfn_3_levels(st 2.4 unsigned long hap_gva_to_gfn_4_levels(struct vcpu *v, unsigned long gva, 2.5 uint32_t *pfec); 2.6 2.7 - 2.8 -#endif /* __SVM_NPT_H__ */ 2.9 +#endif /* __HAP_PRIVATE_H__ */
3.1 --- a/xen/arch/x86/mm/p2m.c Tue Apr 13 09:38:54 2010 +0100 3.2 +++ b/xen/arch/x86/mm/p2m.c Tue Apr 13 12:20:48 2010 +0100 3.3 @@ -134,6 +134,22 @@ p2m_find_entry(void *table, unsigned lon 3.4 return (l1_pgentry_t *)table + index; 3.5 } 3.6 3.7 +struct page_info * 3.8 +p2m_alloc_ptp(struct domain *d, unsigned long type) 3.9 +{ 3.10 + struct page_info *pg; 3.11 + 3.12 + pg = d->arch.p2m->alloc_page(d); 3.13 + if (pg == NULL) 3.14 + return NULL; 3.15 + 3.16 + page_list_add_tail(pg, &d->arch.p2m->pages); 3.17 + pg->u.inuse.type_info = type | 1 | PGT_validated; 3.18 + pg->count_info |= 1; 3.19 + 3.20 + return pg; 3.21 +} 3.22 + 3.23 // Walk one level of the P2M table, allocating a new table if required. 3.24 // Returns 0 on error. 3.25 // 3.26 @@ -156,15 +172,14 @@ p2m_next_level(struct domain *d, mfn_t * 3.27 /* PoD: Not present doesn't imply empty. */ 3.28 if ( !l1e_get_flags(*p2m_entry) ) 3.29 { 3.30 - struct page_info *pg = d->arch.p2m->alloc_page(d); 3.31 + struct page_info *pg; 3.32 + 3.33 + pg = p2m_alloc_ptp(d, type); 3.34 if ( pg == NULL ) 3.35 return 0; 3.36 - page_list_add_tail(pg, &d->arch.p2m->pages); 3.37 - pg->u.inuse.type_info = type | 1 | PGT_validated; 3.38 - pg->count_info |= 1; 3.39 3.40 new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), 3.41 - __PAGE_HYPERVISOR|_PAGE_USER); 3.42 + __PAGE_HYPERVISOR | _PAGE_USER); 3.43 3.44 switch ( type ) { 3.45 case PGT_l3_page_table: 3.46 @@ -195,16 +210,15 @@ p2m_next_level(struct domain *d, mfn_t * 3.47 if ( type == PGT_l2_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) 3.48 { 3.49 unsigned long flags, pfn; 3.50 - struct page_info *pg = d->arch.p2m->alloc_page(d); 3.51 + struct page_info *pg; 3.52 + 3.53 + pg = p2m_alloc_ptp(d, PGT_l2_page_table); 3.54 if ( pg == NULL ) 3.55 return 0; 3.56 - page_list_add_tail(pg, &d->arch.p2m->pages); 3.57 - pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated; 3.58 - pg->count_info = 1; 3.59 - 3.60 + 3.61 flags = l1e_get_flags(*p2m_entry); 3.62 pfn = l1e_get_pfn(*p2m_entry); 3.63 - 3.64 + 3.65 l1_entry = map_domain_page(mfn_x(page_to_mfn(pg))); 3.66 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) 3.67 { 3.68 @@ -224,13 +238,12 @@ p2m_next_level(struct domain *d, mfn_t * 3.69 if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) 3.70 { 3.71 unsigned long flags, pfn; 3.72 - struct page_info *pg = d->arch.p2m->alloc_page(d); 3.73 + struct page_info *pg; 3.74 + 3.75 + pg = p2m_alloc_ptp(d, PGT_l1_page_table); 3.76 if ( pg == NULL ) 3.77 return 0; 3.78 - page_list_add_tail(pg, &d->arch.p2m->pages); 3.79 - pg->u.inuse.type_info = PGT_l1_page_table | 1 | PGT_validated; 3.80 - pg->count_info |= 1; 3.81 - 3.82 + 3.83 /* New splintered mappings inherit the flags of the old superpage, 3.84 * with a little reorganisation for the _PAGE_PSE_PAT bit. */ 3.85 flags = l1e_get_flags(*p2m_entry);
4.1 --- a/xen/include/asm-x86/p2m.h Tue Apr 13 09:38:54 2010 +0100 4.2 +++ b/xen/include/asm-x86/p2m.h Tue Apr 13 12:20:48 2010 +0100 4.3 @@ -444,6 +444,8 @@ int p2m_mem_paging_prep(struct domain *d 4.4 /* Resume normal operation (in case a domain was paused) */ 4.5 void p2m_mem_paging_resume(struct domain *d); 4.6 4.7 +struct page_info *p2m_alloc_ptp(struct domain *d, unsigned long type); 4.8 + 4.9 #endif /* _XEN_P2M_H */ 4.10 4.11 /*