debuggers.hg
changeset 16433:8e98c3d6a55f
Log dirty radix tree code cleanup. Also do not deference non-existent
pointer in paging_new_log_dirty_*() functions if allocation fails.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
pointer in paging_new_log_dirty_*() functions if allocation fails.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Nov 16 20:06:15 2007 +0000 (2007-11-16) |
parents | 86e4b37a06cc |
children | d2935f9c217f |
files | xen/arch/x86/mm/paging.c xen/arch/x86/mm/shadow/private.h |
line diff
1.1 --- a/xen/arch/x86/mm/paging.c Fri Nov 16 19:07:46 2007 +0000 1.2 +++ b/xen/arch/x86/mm/paging.c Fri Nov 16 20:06:15 2007 +0000 1.3 @@ -101,36 +101,37 @@ static mfn_t paging_new_log_dirty_page(s 1.4 mfn_t mfn; 1.5 struct page_info *page = alloc_domheap_page(NULL); 1.6 1.7 - if ( unlikely(page == NULL) ) { 1.8 + if ( unlikely(page == NULL) ) 1.9 + { 1.10 d->arch.paging.log_dirty.failed_allocs++; 1.11 return _mfn(INVALID_MFN); 1.12 } 1.13 + 1.14 d->arch.paging.log_dirty.allocs++; 1.15 mfn = page_to_mfn(page); 1.16 *mapping_p = map_domain_page(mfn_x(mfn)); 1.17 + 1.18 return mfn; 1.19 } 1.20 1.21 - 1.22 static mfn_t paging_new_log_dirty_leaf(struct domain *d, uint8_t **leaf_p) 1.23 { 1.24 mfn_t mfn = paging_new_log_dirty_page(d, (void **)leaf_p); 1.25 - clear_page(*leaf_p); 1.26 + if ( mfn_valid(mfn) ) 1.27 + clear_page(*leaf_p); 1.28 return mfn; 1.29 } 1.30 - 1.31 1.32 static mfn_t paging_new_log_dirty_node(struct domain *d, mfn_t **node_p) 1.33 { 1.34 int i; 1.35 mfn_t mfn = paging_new_log_dirty_page(d, (void **)node_p); 1.36 - for (i = 0; i < LOGDIRTY_NODE_ENTRIES; i++) 1.37 - (*node_p)[i] = _mfn(INVALID_MFN); 1.38 + if ( mfn_valid(mfn) ) 1.39 + for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ ) 1.40 + (*node_p)[i] = _mfn(INVALID_MFN); 1.41 return mfn; 1.42 } 1.43 - 1.44 1.45 -/* allocate bitmap resources for log dirty */ 1.46 int paging_alloc_log_dirty_bitmap(struct domain *d) 1.47 { 1.48 mfn_t *mapping; 1.49 @@ -139,7 +140,8 @@ int paging_alloc_log_dirty_bitmap(struct 1.50 return 0; 1.51 1.52 d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d, &mapping); 1.53 - if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) { 1.54 + if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) 1.55 + { 1.56 /* Clear error indicator since we're reporting this one */ 1.57 d->arch.paging.log_dirty.failed_allocs = 0; 1.58 return -ENOMEM; 1.59 @@ -149,45 +151,57 @@ int paging_alloc_log_dirty_bitmap(struct 1.60 return 0; 1.61 } 1.62 1.63 - 1.64 static void paging_free_log_dirty_page(struct domain *d, mfn_t mfn) 1.65 { 1.66 d->arch.paging.log_dirty.allocs--; 1.67 free_domheap_page(mfn_to_page(mfn)); 1.68 } 1.69 1.70 -/* free bitmap resources */ 1.71 void paging_free_log_dirty_bitmap(struct domain *d) 1.72 { 1.73 + mfn_t *l4, *l3, *l2; 1.74 int i4, i3, i2; 1.75 1.76 - if (mfn_valid(d->arch.paging.log_dirty.top)) { 1.77 - mfn_t *l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top)); 1.78 - printk("%s: used %d pages for domain %d dirty logging\n", 1.79 - __FUNCTION__, d->arch.paging.log_dirty.allocs, d->domain_id); 1.80 - for (i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++) { 1.81 - if (mfn_valid(l4[i4])) { 1.82 - mfn_t *l3 = map_domain_page(mfn_x(l4[i4])); 1.83 - for (i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++) { 1.84 - if (mfn_valid(l3[i3])) { 1.85 - mfn_t *l2 = map_domain_page(mfn_x(l3[i3])); 1.86 - for (i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++) 1.87 - if (mfn_valid(l2[i2])) 1.88 - paging_free_log_dirty_page(d, l2[i2]); 1.89 - unmap_domain_page(l2); 1.90 - paging_free_log_dirty_page(d, l3[i3]); 1.91 - } 1.92 - } 1.93 - unmap_domain_page(l3); 1.94 - paging_free_log_dirty_page(d, l4[i4]); 1.95 - } 1.96 + if ( !mfn_valid(d->arch.paging.log_dirty.top) ) 1.97 + return; 1.98 + 1.99 + dprintk(XENLOG_DEBUG, "%s: used %d pages for domain %d dirty logging\n", 1.100 + __FUNCTION__, d->arch.paging.log_dirty.allocs, d->domain_id); 1.101 + 1.102 + l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top)); 1.103 + 1.104 + for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ ) 1.105 + { 1.106 + if ( !mfn_valid(l4[i4]) ) 1.107 + continue; 1.108 + 1.109 + l3 = map_domain_page(mfn_x(l4[i4])); 1.110 + 1.111 + for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) 1.112 + { 1.113 + if ( !mfn_valid(l3[i3]) ) 1.114 + continue; 1.115 + 1.116 + l2 = map_domain_page(mfn_x(l3[i3])); 1.117 + 1.118 + for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ ) 1.119 + if ( mfn_valid(l2[i2]) ) 1.120 + paging_free_log_dirty_page(d, l2[i2]); 1.121 + 1.122 + unmap_domain_page(l2); 1.123 + paging_free_log_dirty_page(d, l3[i3]); 1.124 } 1.125 - unmap_domain_page(l4); 1.126 - paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); 1.127 - d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); 1.128 - ASSERT(d->arch.paging.log_dirty.allocs == 0); 1.129 - d->arch.paging.log_dirty.failed_allocs = 0; 1.130 + 1.131 + unmap_domain_page(l3); 1.132 + paging_free_log_dirty_page(d, l4[i4]); 1.133 } 1.134 + 1.135 + unmap_domain_page(l4); 1.136 + paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); 1.137 + 1.138 + d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); 1.139 + ASSERT(d->arch.paging.log_dirty.allocs == 0); 1.140 + d->arch.paging.log_dirty.failed_allocs = 0; 1.141 } 1.142 1.143 int paging_log_dirty_enable(struct domain *d) 1.144 @@ -369,39 +383,52 @@ int paging_log_dirty_op(struct domain *d 1.145 1.146 pages = 0; 1.147 l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top)); 1.148 - for ( i4 = 0; pages < sc->pages && i4 < LOGDIRTY_NODE_ENTRIES; i4++ ) { 1.149 + 1.150 + for ( i4 = 0; 1.151 + (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); 1.152 + i4++ ) 1.153 + { 1.154 l3 = mfn_valid(l4[i4]) ? map_domain_page(mfn_x(l4[i4])) : NULL; 1.155 - for ( i3 = 0; pages < sc->pages && i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) { 1.156 - l2 = l3 && mfn_valid(l3[i3]) ? map_domain_page(mfn_x(l3[i3])) : NULL; 1.157 - for ( i2 = 0; pages < sc->pages && i2 < LOGDIRTY_NODE_ENTRIES; i2++ ) { 1.158 + for ( i3 = 0; 1.159 + (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); 1.160 + i3++ ) 1.161 + { 1.162 + l2 = ((l3 && mfn_valid(l3[i3])) ? 1.163 + map_domain_page(mfn_x(l3[i3])) : NULL); 1.164 + for ( i2 = 0; 1.165 + (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); 1.166 + i2++ ) 1.167 + { 1.168 static uint8_t zeroes[PAGE_SIZE]; 1.169 unsigned int bytes = PAGE_SIZE; 1.170 - l1 = l2 && mfn_valid(l2[i2]) ? map_domain_page(mfn_x(l2[i2])) : zeroes; 1.171 + l1 = ((l2 && mfn_valid(l2[i2])) ? 1.172 + map_domain_page(mfn_x(l2[i2])) : zeroes); 1.173 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) ) 1.174 bytes = (unsigned int)((sc->pages - pages + 7) >> 3); 1.175 - if ( likely(peek) ) { 1.176 - if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3, l1, bytes) != 0) { 1.177 + if ( likely(peek) ) 1.178 + { 1.179 + if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3, 1.180 + l1, bytes) != 0 ) 1.181 + { 1.182 rv = -EFAULT; 1.183 goto out; 1.184 } 1.185 } 1.186 - 1.187 if ( clean && l1 != zeroes ) 1.188 clear_page(l1); 1.189 - 1.190 pages += bytes << 3; 1.191 - if (l1 != zeroes) 1.192 + if ( l1 != zeroes ) 1.193 unmap_domain_page(l1); 1.194 } 1.195 - if (l2) 1.196 + if ( l2 ) 1.197 unmap_domain_page(l2); 1.198 } 1.199 - if (l3) 1.200 + if ( l3 ) 1.201 unmap_domain_page(l3); 1.202 } 1.203 unmap_domain_page(l4); 1.204 1.205 - if (pages < sc->pages) 1.206 + if ( pages < sc->pages ) 1.207 sc->pages = pages; 1.208 1.209 log_dirty_unlock(d);
2.1 --- a/xen/arch/x86/mm/shadow/private.h Fri Nov 16 19:07:46 2007 +0000 2.2 +++ b/xen/arch/x86/mm/shadow/private.h Fri Nov 16 20:06:15 2007 +0000 2.3 @@ -503,7 +503,7 @@ sh_mfn_is_dirty(struct domain *d, mfn_t 2.4 if ( unlikely(!VALID_M2P(pfn)) ) 2.5 return 0; 2.6 2.7 - if (d->arch.paging.log_dirty.failed_allocs > 0) 2.8 + if ( d->arch.paging.log_dirty.failed_allocs > 0 ) 2.9 /* If we have any failed allocations our dirty log is bogus. 2.10 * Since we can't signal an error here, be conservative and 2.11 * report "dirty" in this case. (The only current caller, 2.12 @@ -515,19 +515,19 @@ sh_mfn_is_dirty(struct domain *d, mfn_t 2.13 l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top)); 2.14 mfn = l4[L4_LOGDIRTY_IDX(pfn)]; 2.15 unmap_domain_page(l4); 2.16 - if (!mfn_valid(mfn)) 2.17 + if ( !mfn_valid(mfn) ) 2.18 return 0; 2.19 2.20 l3 = map_domain_page(mfn_x(mfn)); 2.21 mfn = l3[L3_LOGDIRTY_IDX(pfn)]; 2.22 unmap_domain_page(l3); 2.23 - if (!mfn_valid(mfn)) 2.24 + if ( !mfn_valid(mfn) ) 2.25 return 0; 2.26 2.27 l2 = map_domain_page(mfn_x(mfn)); 2.28 mfn = l2[L2_LOGDIRTY_IDX(pfn)]; 2.29 unmap_domain_page(l2); 2.30 - if (!mfn_valid(mfn)) 2.31 + if ( !mfn_valid(mfn) ) 2.32 return 0; 2.33 2.34 l1 = map_domain_page(mfn_x(mfn));