debuggers.hg
changeset 18995:ebe11a452393
PoD memory 6/9: superpage splintering
Deal with splintering superpages in the PoD cache, and with
splintering superpage PoD entries in the p2m table.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Deal with splintering superpages in the PoD cache, and with
splintering superpage PoD entries in the p2m table.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Jan 05 10:45:09 2009 +0000 (2009-01-05) |
parents | 489f35400ef2 |
children | 2090917489c5 |
files | xen/arch/x86/mm/p2m.c |
line diff
1.1 --- a/xen/arch/x86/mm/p2m.c Mon Jan 05 10:44:39 2009 +0000 1.2 +++ b/xen/arch/x86/mm/p2m.c Mon Jan 05 10:45:09 2009 +0000 1.3 @@ -323,6 +323,70 @@ p2m_pod_cache_add(struct domain *d, 1.4 return 0; 1.5 } 1.6 1.7 +/* Get a page of size order from the populate-on-demand cache. Will break 1.8 + * down 2-meg pages into singleton pages automatically. Returns null if 1.9 + * a superpage is requested and no superpages are available. Must be called 1.10 + * with the d->page_lock held. */ 1.11 +static struct page_info * p2m_pod_cache_get(struct domain *d, 1.12 + unsigned long order) 1.13 +{ 1.14 + struct p2m_domain *p2md = d->arch.p2m; 1.15 + struct page_info *p = NULL; 1.16 + int i; 1.17 + 1.18 + if ( order == 9 && list_empty(&p2md->pod.super) ) 1.19 + { 1.20 + return NULL; 1.21 + } 1.22 + else if ( order == 0 && list_empty(&p2md->pod.single) ) 1.23 + { 1.24 + unsigned long mfn; 1.25 + struct page_info *q; 1.26 + 1.27 + BUG_ON( list_empty(&p2md->pod.super) ); 1.28 + 1.29 + /* Break up a superpage to make single pages. NB count doesn't 1.30 + * need to be adjusted. */ 1.31 + printk("%s: Breaking up superpage.\n", __func__); 1.32 + p = list_entry(p2md->pod.super.next, struct page_info, list); 1.33 + list_del(&p->list); 1.34 + mfn = mfn_x(page_to_mfn(p)); 1.35 + 1.36 + for ( i=0; i<(1<<9); i++ ) 1.37 + { 1.38 + q = mfn_to_page(_mfn(mfn+i)); 1.39 + list_add_tail(&q->list, &p2md->pod.single); 1.40 + } 1.41 + } 1.42 + 1.43 + switch ( order ) 1.44 + { 1.45 + case 9: 1.46 + BUG_ON( list_empty(&p2md->pod.super) ); 1.47 + p = list_entry(p2md->pod.super.next, struct page_info, list); 1.48 + p2md->pod.count -= 1 << order; /* Lock: page_alloc */ 1.49 + break; 1.50 + case 0: 1.51 + BUG_ON( list_empty(&p2md->pod.single) ); 1.52 + p = list_entry(p2md->pod.single.next, struct page_info, list); 1.53 + p2md->pod.count -= 1; 1.54 + break; 1.55 + default: 1.56 + BUG(); 1.57 + } 1.58 + 1.59 + list_del(&p->list); 1.60 + 1.61 + /* Put the pages back on the domain page_list */ 1.62 + for ( i = 0 ; i < (1 << order) ; i++ ) 1.63 + { 1.64 + BUG_ON(page_get_owner(p + i) != d); 1.65 + list_add_tail(&p[i].list, &d->page_list); 1.66 + } 1.67 + 1.68 + return p; 1.69 +} 1.70 + 1.71 void 1.72 p2m_pod_empty_cache(struct domain *d) 1.73 { 1.74 @@ -824,36 +888,15 @@ p2m_pod_demand_populate(struct domain *d 1.75 if ( p2md->pod.count == 0 ) 1.76 goto out_of_memory; 1.77 1.78 - /* FIXME -- use single pages / splinter superpages if need be */ 1.79 - switch ( order ) 1.80 - { 1.81 - case 9: 1.82 - BUG_ON( list_empty(&p2md->pod.super) ); 1.83 - p = list_entry(p2md->pod.super.next, struct page_info, list); 1.84 - p2md->pod.count -= 1 << order; /* Lock: page_alloc */ 1.85 - break; 1.86 - case 0: 1.87 - BUG_ON( list_empty(&p2md->pod.single) ); 1.88 - p = list_entry(p2md->pod.single.next, struct page_info, list); 1.89 - p2md->pod.count -= 1; 1.90 - break; 1.91 - default: 1.92 - BUG(); 1.93 - } 1.94 - 1.95 - list_del(&p->list); 1.96 + /* Get a page f/ the cache. A NULL return value indicates that the 1.97 + * 2-meg range should be marked singleton PoD, and retried */ 1.98 + if ( (p = p2m_pod_cache_get(d, order)) == NULL ) 1.99 + goto remap_and_retry; 1.100 1.101 mfn = page_to_mfn(p); 1.102 1.103 BUG_ON((mfn_x(mfn) & ((1 << order)-1)) != 0); 1.104 1.105 - /* Put the pages back on the domain page_list */ 1.106 - for ( i = 0 ; i < (1 << order) ; i++ ) 1.107 - { 1.108 - BUG_ON(page_get_owner(p + i) != d); 1.109 - list_add_tail(&p[i].list, &d->page_list); 1.110 - } 1.111 - 1.112 spin_unlock(&d->page_alloc_lock); 1.113 1.114 /* Fill in the entry in the p2m */ 1.115 @@ -897,6 +940,18 @@ out_of_memory: 1.116 printk("%s: Out of populate-on-demand memory!\n", __func__); 1.117 domain_crash(d); 1.118 return -1; 1.119 +remap_and_retry: 1.120 + BUG_ON(order != 9); 1.121 + spin_unlock(&d->page_alloc_lock); 1.122 + 1.123 + /* Remap this 2-meg region in singleton chunks */ 1.124 + gfn_aligned = (gfn>>order)<<order; 1.125 + for(i=0; i<(1<<order); i++) 1.126 + set_p2m_entry(d, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0, 1.127 + p2m_populate_on_demand); 1.128 + audit_p2m(d); 1.129 + p2m_unlock(p2md); 1.130 + return 0; 1.131 } 1.132 1.133 // Returns 0 on error (out of memory)