/root/src/xen/xen/common/xmalloc_tlsf.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Two Levels Segregate Fit memory allocator (TLSF) |
3 | | * Version 2.3.2 |
4 | | * |
5 | | * Written by Miguel Masmano Tello <mimastel@doctor.upv.es> |
6 | | * |
7 | | * Thanks to Ismael Ripoll for his suggestions and reviews |
8 | | * |
9 | | * Copyright (C) 2007, 2006, 2005, 2004 |
10 | | * |
11 | | * This code is released using a dual license strategy: GPL/LGPL |
12 | | * You can choose the licence that better fits your requirements. |
13 | | * |
14 | | * Released under the terms of the GNU General Public License Version 2.0 |
15 | | * Released under the terms of the GNU Lesser General Public License |
16 | | * Version 2.1 |
17 | | * |
18 | | * This is kernel port of TLSF allocator. |
19 | | * Original code can be found at: http://rtportal.upv.es/rtmalloc/ |
20 | | * Adapted for Linux by Nitin Gupta (nitingupta910@gmail.com) |
21 | | * (http://code.google.com/p/compcache/source/browse/trunk/sub-projects |
22 | | * /allocators/tlsf-kmod r229 dated Aug 27, 2008 |
23 | | * Adapted for Xen by Dan Magenheimer (dan.magenheimer@oracle.com) |
24 | | */ |
25 | | |
26 | | #include <xen/irq.h> |
27 | | #include <xen/mm.h> |
28 | | #include <xen/pfn.h> |
29 | | #include <asm/time.h> |
30 | | |
31 | | #define MAX_POOL_NAME_LEN 16 |
32 | | |
33 | | /* Some IMPORTANT TLSF parameters */ |
34 | 10.1k | #define MEM_ALIGN (sizeof(void *) * 2) |
35 | 2.04k | #define MEM_ALIGN_MASK (~(MEM_ALIGN - 1)) |
36 | | |
37 | | #define MAX_FLI (30) |
38 | 9.07k | #define MAX_LOG2_SLI (5) |
39 | 5.34k | #define MAX_SLI (1 << MAX_LOG2_SLI) |
40 | | |
41 | 3.32k | #define FLI_OFFSET (6) |
42 | | /* tlsf structure just will manage blocks bigger than 128 bytes */ |
43 | 7.36k | #define SMALL_BLOCK (128) |
44 | | #define REAL_FLI (MAX_FLI - FLI_OFFSET) |
45 | 8.74k | #define MIN_BLOCK_SIZE (sizeof(struct free_ptr)) |
46 | 6.60k | #define BHDR_OVERHEAD (sizeof(struct bhdr) - MIN_BLOCK_SIZE) |
47 | | |
48 | 6.51k | #define PTR_MASK (sizeof(void *) - 1) |
49 | 6.51k | #define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK) |
50 | | |
51 | 5.30k | #define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \ |
52 | 5.30k | ((char *)(addr) + (r))) |
53 | 4.06k | #define ROUNDUP_SIZE(r) (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK) |
54 | 47 | #define ROUNDDOWN_SIZE(r) ((r) & MEM_ALIGN_MASK) |
55 | 3 | #define ROUNDUP_PAGE(r) (((r) + PAGE_SIZE - 1) & PAGE_MASK) |
56 | | |
57 | | #define BLOCK_STATE (0x1) |
58 | 2.02k | #define PREV_STATE (0x2) |
59 | | |
60 | | /* bit 0 of the block size */ |
61 | 3.28k | #define FREE_BLOCK (0x1) |
62 | 47 | #define USED_BLOCK (0x0) |
63 | | |
64 | | /* bit 1 of the block size */ |
65 | 1.25k | #define PREV_FREE (0x2) |
66 | 2.06k | #define PREV_USED (0x0) |
67 | | |
68 | | static spinlock_t pool_list_lock; |
69 | | static struct list_head pool_list_head; |
70 | | |
71 | | struct free_ptr { |
72 | | struct bhdr *prev; |
73 | | struct bhdr *next; |
74 | | }; |
75 | | |
76 | | struct bhdr { |
77 | | /* All blocks in a region are linked in order of physical address */ |
78 | | struct bhdr *prev_hdr; |
79 | | /* |
80 | | * The size is stored in bytes |
81 | | * bit 0: block is free, if set |
82 | | * bit 1: previous block is free, if set |
83 | | */ |
84 | | u32 size; |
85 | | /* Free blocks in individual freelists are linked */ |
86 | | union { |
87 | | struct free_ptr free_ptr; |
88 | | u8 buffer[sizeof(struct free_ptr)]; |
89 | | } ptr; |
90 | | }; |
91 | | |
92 | | struct xmem_pool { |
93 | | /* First level bitmap (REAL_FLI bits) */ |
94 | | u32 fl_bitmap; |
95 | | |
96 | | /* Second level bitmap */ |
97 | | u32 sl_bitmap[REAL_FLI]; |
98 | | |
99 | | /* Free lists */ |
100 | | struct bhdr *matrix[REAL_FLI][MAX_SLI]; |
101 | | |
102 | | spinlock_t lock; |
103 | | |
104 | | unsigned long init_size; |
105 | | unsigned long max_size; |
106 | | unsigned long grow_size; |
107 | | |
108 | | /* Basic stats */ |
109 | | unsigned long used_size; |
110 | | unsigned long num_regions; |
111 | | |
112 | | /* User provided functions for expanding/shrinking pool */ |
113 | | xmem_pool_get_memory *get_mem; |
114 | | xmem_pool_put_memory *put_mem; |
115 | | |
116 | | struct list_head list; |
117 | | |
118 | | void *init_region; |
119 | | char name[MAX_POOL_NAME_LEN]; |
120 | | }; |
121 | | |
122 | | /* |
123 | | * Helping functions |
124 | | */ |
125 | | |
126 | | /** |
127 | | * Returns indexes (fl, sl) of the list used to serve request of size r |
128 | | */ |
129 | | static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl) |
130 | 2.11k | { |
131 | 2.11k | int t; |
132 | 2.11k | |
133 | 2.11k | if ( *r < SMALL_BLOCK ) |
134 | 1.71k | { |
135 | 1.71k | *fl = 0; |
136 | 1.71k | *sl = *r / (SMALL_BLOCK / MAX_SLI); |
137 | 1.71k | } |
138 | 2.11k | else |
139 | 403 | { |
140 | 403 | t = (1 << (flsl(*r) - 1 - MAX_LOG2_SLI)) - 1; |
141 | 403 | *r = *r + t; |
142 | 403 | *fl = flsl(*r) - 1; |
143 | 403 | *sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI; |
144 | 403 | *fl -= FLI_OFFSET; |
145 | 403 | /*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0! |
146 | 403 | *fl = *sl = 0; |
147 | 403 | */ |
148 | 403 | *r &= ~t; |
149 | 403 | } |
150 | 2.11k | } |
151 | | |
152 | | /** |
153 | | * Returns indexes (fl, sl) which is used as starting point to search |
154 | | * for a block of size r. It also rounds up requested size(r) to the |
155 | | * next list. |
156 | | */ |
157 | | static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl) |
158 | 3.22k | { |
159 | 3.22k | if ( r < SMALL_BLOCK ) |
160 | 306 | { |
161 | 306 | *fl = 0; |
162 | 306 | *sl = r / (SMALL_BLOCK / MAX_SLI); |
163 | 306 | } |
164 | 3.22k | else |
165 | 2.92k | { |
166 | 2.92k | *fl = flsl(r) - 1; |
167 | 2.92k | *sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI; |
168 | 2.92k | *fl -= FLI_OFFSET; |
169 | 2.92k | } |
170 | 3.22k | } |
171 | | |
172 | | /** |
173 | | * Returns first block from a list that hold blocks larger than or |
174 | | * equal to the one pointed by the indexes (fl, sl) |
175 | | */ |
176 | | static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl, |
177 | | int *sl) |
178 | 2.11k | { |
179 | 2.11k | u32 tmp = p->sl_bitmap[*fl] & (~0u << *sl); |
180 | 2.11k | struct bhdr *b = NULL; |
181 | 2.11k | |
182 | 2.11k | if ( tmp ) |
183 | 114 | { |
184 | 114 | *sl = ffs(tmp) - 1; |
185 | 114 | b = p->matrix[*fl][*sl]; |
186 | 114 | } |
187 | 2.11k | else |
188 | 2.00k | { |
189 | 2.00k | *fl = ffs(p->fl_bitmap & (~0u << (*fl + 1))) - 1; |
190 | 2.00k | if ( likely(*fl > 0) ) |
191 | 1.95k | { |
192 | 1.95k | *sl = ffs(p->sl_bitmap[*fl]) - 1; |
193 | 1.95k | b = p->matrix[*fl][*sl]; |
194 | 1.95k | } |
195 | 2.00k | } |
196 | 2.11k | |
197 | 2.11k | return b; |
198 | 2.11k | } |
199 | | |
200 | | /** |
201 | | * Remove first free block(b) from free list with indexes (fl, sl). |
202 | | */ |
203 | | static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl, |
204 | | int sl) |
205 | 2.06k | { |
206 | 2.06k | p->matrix[fl][sl] = b->ptr.free_ptr.next; |
207 | 2.06k | if ( p->matrix[fl][sl] ) |
208 | 4 | { |
209 | 4 | p->matrix[fl][sl]->ptr.free_ptr.prev = NULL; |
210 | 4 | } |
211 | 2.06k | else |
212 | 2.06k | { |
213 | 2.06k | clear_bit(sl, &p->sl_bitmap[fl]); |
214 | 2.06k | if ( !p->sl_bitmap[fl] ) |
215 | 2.03k | clear_bit(fl, &p->fl_bitmap); |
216 | 2.06k | } |
217 | 2.06k | b->ptr.free_ptr = (struct free_ptr) {NULL, NULL}; |
218 | 2.06k | } |
219 | | |
220 | | /** |
221 | | * Removes block(b) from free list with indexes (fl, sl) |
222 | | */ |
223 | | static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, |
224 | | int sl) |
225 | 578 | { |
226 | 578 | if ( b->ptr.free_ptr.next ) |
227 | 0 | b->ptr.free_ptr.next->ptr.free_ptr.prev = |
228 | 0 | b->ptr.free_ptr.prev; |
229 | 578 | if ( b->ptr.free_ptr.prev ) |
230 | 0 | b->ptr.free_ptr.prev->ptr.free_ptr.next = |
231 | 0 | b->ptr.free_ptr.next; |
232 | 578 | if ( p->matrix[fl][sl] == b ) |
233 | 578 | { |
234 | 578 | p->matrix[fl][sl] = b->ptr.free_ptr.next; |
235 | 578 | if ( !p->matrix[fl][sl] ) |
236 | 578 | { |
237 | 578 | clear_bit(sl, &p->sl_bitmap[fl]); |
238 | 578 | if ( !p->sl_bitmap[fl] ) |
239 | 469 | clear_bit (fl, &p->fl_bitmap); |
240 | 578 | } |
241 | 578 | } |
242 | 578 | b->ptr.free_ptr = (struct free_ptr) {NULL, NULL}; |
243 | 578 | } |
244 | | |
245 | | /** |
246 | | * Insert block(b) in free list with indexes (fl, sl) |
247 | | */ |
248 | | static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl) |
249 | 2.65k | { |
250 | 2.65k | b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]}; |
251 | 2.65k | if ( p->matrix[fl][sl] ) |
252 | 6 | p->matrix[fl][sl]->ptr.free_ptr.prev = b; |
253 | 2.65k | p->matrix[fl][sl] = b; |
254 | 2.65k | set_bit(sl, &p->sl_bitmap[fl]); |
255 | 2.65k | set_bit(fl, &p->fl_bitmap); |
256 | 2.65k | } |
257 | | |
258 | | /** |
259 | | * Region is a virtually contiguous memory region and Pool is |
260 | | * collection of such regions |
261 | | */ |
262 | | static inline void ADD_REGION(void *region, unsigned long region_size, |
263 | | struct xmem_pool *pool) |
264 | 47 | { |
265 | 47 | int fl, sl; |
266 | 47 | struct bhdr *b, *lb; |
267 | 47 | |
268 | 47 | b = (struct bhdr *)(region); |
269 | 47 | b->prev_hdr = NULL; |
270 | 47 | b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD) |
271 | 47 | | FREE_BLOCK | PREV_USED; |
272 | 47 | MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl); |
273 | 47 | INSERT_BLOCK(b, pool, fl, sl); |
274 | 47 | /* The sentinel block: allows us to know when we're in the last block */ |
275 | 47 | lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); |
276 | 47 | lb->prev_hdr = b; |
277 | 47 | lb->size = 0 | USED_BLOCK | PREV_FREE; |
278 | 47 | pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */ |
279 | 47 | pool->num_regions++; |
280 | 47 | } |
281 | | |
282 | | /* |
283 | | * TLSF pool-based allocator start. |
284 | | */ |
285 | | |
286 | | struct xmem_pool *xmem_pool_create( |
287 | | const char *name, |
288 | | xmem_pool_get_memory get_mem, |
289 | | xmem_pool_put_memory put_mem, |
290 | | unsigned long init_size, |
291 | | unsigned long max_size, |
292 | | unsigned long grow_size) |
293 | 1 | { |
294 | 1 | struct xmem_pool *pool; |
295 | 1 | int pool_bytes, pool_order; |
296 | 1 | |
297 | 1 | BUG_ON(max_size && (max_size < init_size)); |
298 | 1 | |
299 | 1 | pool_bytes = ROUNDUP_SIZE(sizeof(*pool)); |
300 | 1 | pool_order = get_order_from_bytes(pool_bytes); |
301 | 1 | |
302 | 1 | pool = (void *)alloc_xenheap_pages(pool_order, 0); |
303 | 1 | if ( pool == NULL ) |
304 | 0 | return NULL; |
305 | 1 | memset(pool, 0, pool_bytes); |
306 | 1 | |
307 | 1 | /* Round to next page boundary */ |
308 | 1 | init_size = ROUNDUP_PAGE(init_size); |
309 | 1 | max_size = ROUNDUP_PAGE(max_size); |
310 | 1 | grow_size = ROUNDUP_PAGE(grow_size); |
311 | 1 | |
312 | 1 | /* pool global overhead not included in used size */ |
313 | 1 | pool->used_size = 0; |
314 | 1 | |
315 | 1 | pool->init_size = init_size; |
316 | 1 | pool->max_size = max_size; |
317 | 1 | pool->grow_size = grow_size; |
318 | 1 | pool->get_mem = get_mem; |
319 | 1 | pool->put_mem = put_mem; |
320 | 1 | strlcpy(pool->name, name, sizeof(pool->name)); |
321 | 1 | |
322 | 1 | /* always obtain init_region lazily now to ensure it is get_mem'd |
323 | 1 | * in the same "context" as all other regions */ |
324 | 1 | |
325 | 1 | spin_lock_init(&pool->lock); |
326 | 1 | |
327 | 1 | spin_lock(&pool_list_lock); |
328 | 1 | list_add_tail(&pool->list, &pool_list_head); |
329 | 1 | spin_unlock(&pool_list_lock); |
330 | 1 | |
331 | 1 | return pool; |
332 | 1 | } |
333 | | |
334 | | unsigned long xmem_pool_get_used_size(struct xmem_pool *pool) |
335 | 0 | { |
336 | 0 | return pool->used_size; |
337 | 0 | } |
338 | | |
339 | | unsigned long xmem_pool_get_total_size(struct xmem_pool *pool) |
340 | 0 | { |
341 | 0 | unsigned long total; |
342 | 0 | total = ROUNDUP_SIZE(sizeof(*pool)) |
343 | 0 | + pool->init_size |
344 | 0 | + (pool->num_regions - 1) * pool->grow_size; |
345 | 0 | return total; |
346 | 0 | } |
347 | | |
348 | | void xmem_pool_destroy(struct xmem_pool *pool) |
349 | 0 | { |
350 | 0 | int pool_bytes, pool_order; |
351 | 0 |
|
352 | 0 | if ( pool == NULL ) |
353 | 0 | return; |
354 | 0 |
|
355 | 0 | /* User is destroying without ever allocating from this pool */ |
356 | 0 | if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD ) |
357 | 0 | { |
358 | 0 | ASSERT(!pool->init_region); |
359 | 0 | pool->used_size -= BHDR_OVERHEAD; |
360 | 0 | } |
361 | 0 |
|
362 | 0 | /* Check for memory leaks in this pool */ |
363 | 0 | if ( xmem_pool_get_used_size(pool) ) |
364 | 0 | printk("memory leak in pool: %s (%p). " |
365 | 0 | "%lu bytes still in use.\n", |
366 | 0 | pool->name, pool, xmem_pool_get_used_size(pool)); |
367 | 0 |
|
368 | 0 | spin_lock(&pool_list_lock); |
369 | 0 | list_del_init(&pool->list); |
370 | 0 | spin_unlock(&pool_list_lock); |
371 | 0 |
|
372 | 0 | pool_bytes = ROUNDUP_SIZE(sizeof(*pool)); |
373 | 0 | pool_order = get_order_from_bytes(pool_bytes); |
374 | 0 | free_xenheap_pages(pool,pool_order); |
375 | 0 | } |
376 | | |
377 | | void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool) |
378 | 2.06k | { |
379 | 2.06k | struct bhdr *b, *b2, *next_b, *region; |
380 | 2.06k | int fl, sl; |
381 | 2.06k | unsigned long tmp_size; |
382 | 2.06k | |
383 | 2.06k | if ( pool->init_region == NULL ) |
384 | 1 | { |
385 | 1 | if ( (region = pool->get_mem(pool->init_size)) == NULL ) |
386 | 0 | goto out; |
387 | 1 | ADD_REGION(region, pool->init_size, pool); |
388 | 1 | pool->init_region = region; |
389 | 1 | } |
390 | 2.06k | |
391 | 2.06k | size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size); |
392 | 2.06k | /* Rounding up the requested size and calculating fl and sl */ |
393 | 2.06k | |
394 | 2.06k | spin_lock(&pool->lock); |
395 | 2.11k | retry_find: |
396 | 2.11k | MAPPING_SEARCH(&size, &fl, &sl); |
397 | 2.11k | |
398 | 2.11k | /* Searching a free block */ |
399 | 2.11k | if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) ) |
400 | 46 | { |
401 | 46 | /* Not found */ |
402 | 46 | if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) ) |
403 | 0 | goto out_locked; |
404 | 46 | if ( pool->max_size && (pool->init_size + |
405 | 0 | pool->num_regions * pool->grow_size |
406 | 0 | > pool->max_size) ) |
407 | 0 | goto out_locked; |
408 | 46 | spin_unlock(&pool->lock); |
409 | 46 | if ( (region = pool->get_mem(pool->grow_size)) == NULL ) |
410 | 0 | goto out; |
411 | 46 | spin_lock(&pool->lock); |
412 | 46 | ADD_REGION(region, pool->grow_size, pool); |
413 | 46 | goto retry_find; |
414 | 46 | } |
415 | 2.06k | EXTRACT_BLOCK_HDR(b, pool, fl, sl); |
416 | 2.06k | |
417 | 2.06k | /*-- found: */ |
418 | 2.06k | next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); |
419 | 2.06k | /* Should the block be split? */ |
420 | 2.06k | tmp_size = (b->size & BLOCK_SIZE_MASK) - size; |
421 | 2.06k | if ( tmp_size >= sizeof(struct bhdr) ) |
422 | 2.02k | { |
423 | 2.02k | tmp_size -= BHDR_OVERHEAD; |
424 | 2.02k | b2 = GET_NEXT_BLOCK(b->ptr.buffer, size); |
425 | 2.02k | |
426 | 2.02k | b2->size = tmp_size | FREE_BLOCK | PREV_USED; |
427 | 2.02k | b2->prev_hdr = b; |
428 | 2.02k | |
429 | 2.02k | next_b->prev_hdr = b2; |
430 | 2.02k | |
431 | 2.02k | MAPPING_INSERT(tmp_size, &fl, &sl); |
432 | 2.02k | INSERT_BLOCK(b2, pool, fl, sl); |
433 | 2.02k | |
434 | 2.02k | b->size = size | (b->size & PREV_STATE); |
435 | 2.02k | } |
436 | 2.06k | else |
437 | 47 | { |
438 | 47 | next_b->size &= (~PREV_FREE); |
439 | 47 | b->size &= (~FREE_BLOCK); /* Now it's used */ |
440 | 47 | } |
441 | 2.06k | |
442 | 2.06k | pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; |
443 | 2.06k | |
444 | 2.06k | spin_unlock(&pool->lock); |
445 | 2.06k | return (void *)b->ptr.buffer; |
446 | 2.11k | |
447 | 2.11k | /* Failed alloc */ |
448 | 0 | out_locked: |
449 | 0 | spin_unlock(&pool->lock); |
450 | 0 |
|
451 | 0 | out: |
452 | 0 | return NULL; |
453 | 0 | } |
454 | | |
455 | | void xmem_pool_free(void *ptr, struct xmem_pool *pool) |
456 | 583 | { |
457 | 583 | struct bhdr *b, *tmp_b; |
458 | 583 | int fl = 0, sl = 0; |
459 | 583 | |
460 | 583 | if ( unlikely(ptr == NULL) ) |
461 | 0 | return; |
462 | 583 | |
463 | 583 | b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD); |
464 | 583 | |
465 | 583 | spin_lock(&pool->lock); |
466 | 583 | b->size |= FREE_BLOCK; |
467 | 583 | pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; |
468 | 583 | b->ptr.free_ptr = (struct free_ptr) { NULL, NULL}; |
469 | 583 | tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); |
470 | 583 | if ( tmp_b->size & FREE_BLOCK ) |
471 | 463 | { |
472 | 463 | MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl); |
473 | 463 | EXTRACT_BLOCK(tmp_b, pool, fl, sl); |
474 | 463 | b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; |
475 | 463 | } |
476 | 583 | if ( b->size & PREV_FREE ) |
477 | 115 | { |
478 | 115 | tmp_b = b->prev_hdr; |
479 | 115 | MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl); |
480 | 115 | EXTRACT_BLOCK(tmp_b, pool, fl, sl); |
481 | 115 | tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; |
482 | 115 | b = tmp_b; |
483 | 115 | } |
484 | 583 | tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); |
485 | 583 | tmp_b->prev_hdr = b; |
486 | 583 | |
487 | 583 | MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl); |
488 | 583 | |
489 | 583 | if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) ) |
490 | 1 | { |
491 | 1 | pool->put_mem(b); |
492 | 1 | pool->num_regions--; |
493 | 1 | pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */ |
494 | 1 | goto out; |
495 | 1 | } |
496 | 583 | |
497 | 582 | INSERT_BLOCK(b, pool, fl, sl); |
498 | 582 | |
499 | 582 | tmp_b->size |= PREV_FREE; |
500 | 582 | tmp_b->prev_hdr = b; |
501 | 583 | out: |
502 | 583 | spin_unlock(&pool->lock); |
503 | 583 | } |
504 | | |
505 | | int xmem_pool_maxalloc(struct xmem_pool *pool) |
506 | 0 | { |
507 | 0 | return pool->grow_size - (2 * BHDR_OVERHEAD); |
508 | 0 | } |
509 | | |
510 | | /* |
511 | | * Glue for xmalloc(). |
512 | | */ |
513 | | |
514 | | static struct xmem_pool *xenpool; |
515 | | |
516 | | static void *xmalloc_pool_get(unsigned long size) |
517 | 47 | { |
518 | 47 | ASSERT(size == PAGE_SIZE); |
519 | 47 | return alloc_xenheap_page(); |
520 | 47 | } |
521 | | |
522 | | static void xmalloc_pool_put(void *p) |
523 | 1 | { |
524 | 1 | free_xenheap_page(p); |
525 | 1 | } |
526 | | |
527 | | static void *xmalloc_whole_pages(unsigned long size, unsigned long align) |
528 | 9 | { |
529 | 9 | unsigned int i, order; |
530 | 9 | void *res, *p; |
531 | 9 | |
532 | 9 | order = get_order_from_bytes(max(align, size)); |
533 | 9 | |
534 | 9 | res = alloc_xenheap_pages(order, 0); |
535 | 9 | if ( res == NULL ) |
536 | 0 | return NULL; |
537 | 9 | |
538 | 28 | for ( p = res + PAGE_ALIGN(size), i = 0; i < order; ++i ) |
539 | 19 | if ( (unsigned long)p & (PAGE_SIZE << i) ) |
540 | 8 | { |
541 | 8 | free_xenheap_pages(p, i); |
542 | 8 | p += PAGE_SIZE << i; |
543 | 8 | } |
544 | 9 | |
545 | 9 | PFN_ORDER(virt_to_page(res)) = PFN_UP(size); |
546 | 9 | /* Check that there was no truncation: */ |
547 | 9 | ASSERT(PFN_ORDER(virt_to_page(res)) == PFN_UP(size)); |
548 | 9 | |
549 | 9 | return res; |
550 | 9 | } |
551 | | |
552 | | static void tlsf_init(void) |
553 | 1 | { |
554 | 1 | INIT_LIST_HEAD(&pool_list_head); |
555 | 1 | spin_lock_init(&pool_list_lock); |
556 | 1 | xenpool = xmem_pool_create( |
557 | 1 | "xmalloc", xmalloc_pool_get, xmalloc_pool_put, |
558 | 1 | PAGE_SIZE, 0, PAGE_SIZE); |
559 | 1 | BUG_ON(!xenpool); |
560 | 1 | } |
561 | | |
562 | | /* |
563 | | * xmalloc() |
564 | | */ |
565 | | |
566 | | #ifndef ZERO_BLOCK_PTR |
567 | | /* Return value for zero-size allocation, distinguished from NULL. */ |
568 | | #define ZERO_BLOCK_PTR ((void *)-1L) |
569 | | #endif |
570 | | |
571 | | void *_xmalloc(unsigned long size, unsigned long align) |
572 | 2.07k | { |
573 | 2.07k | void *p = NULL; |
574 | 2.07k | u32 pad; |
575 | 2.07k | |
576 | 2.07k | ASSERT(!in_irq()); |
577 | 2.07k | |
578 | 2.07k | if ( !size ) |
579 | 0 | return ZERO_BLOCK_PTR; |
580 | 2.07k | |
581 | 2.07k | ASSERT((align & (align - 1)) == 0); |
582 | 2.07k | if ( align < MEM_ALIGN ) |
583 | 1.98k | align = MEM_ALIGN; |
584 | 2.07k | size += align - MEM_ALIGN; |
585 | 2.07k | |
586 | 2.07k | if ( !xenpool ) |
587 | 1 | tlsf_init(); |
588 | 2.07k | |
589 | 2.07k | if ( size < PAGE_SIZE ) |
590 | 2.06k | p = xmem_pool_alloc(size, xenpool); |
591 | 2.07k | if ( p == NULL ) |
592 | 9 | return xmalloc_whole_pages(size - align + MEM_ALIGN, align); |
593 | 2.07k | |
594 | 2.07k | /* Add alignment padding. */ |
595 | 2.06k | if ( (pad = -(long)p & (align - 1)) != 0 ) |
596 | 83 | { |
597 | 83 | char *q = (char *)p + pad; |
598 | 83 | struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD); |
599 | 83 | ASSERT(q > (char *)p); |
600 | 83 | b->size = pad | 1; |
601 | 83 | p = q; |
602 | 83 | } |
603 | 2.06k | |
604 | 2.06k | ASSERT(((unsigned long)p & (align - 1)) == 0); |
605 | 2.06k | return p; |
606 | 2.07k | } |
607 | | |
608 | | void *_xzalloc(unsigned long size, unsigned long align) |
609 | 799 | { |
610 | 799 | void *p = _xmalloc(size, align); |
611 | 799 | |
612 | 799 | return p ? memset(p, 0, size) : p; |
613 | 799 | } |
614 | | |
615 | | void xfree(void *p) |
616 | 584 | { |
617 | 584 | struct bhdr *b; |
618 | 584 | |
619 | 584 | if ( p == NULL || p == ZERO_BLOCK_PTR ) |
620 | 1 | return; |
621 | 584 | |
622 | 583 | ASSERT(!in_irq()); |
623 | 583 | |
624 | 583 | if ( !((unsigned long)p & (PAGE_SIZE - 1)) ) |
625 | 0 | { |
626 | 0 | unsigned long size = PFN_ORDER(virt_to_page(p)); |
627 | 0 | unsigned int i, order = get_order_from_pages(size); |
628 | 0 |
|
629 | 0 | BUG_ON((unsigned long)p & ((PAGE_SIZE << order) - 1)); |
630 | 0 | PFN_ORDER(virt_to_page(p)) = 0; |
631 | 0 | for ( i = 0; ; ++i ) |
632 | 0 | { |
633 | 0 | if ( !(size & (1 << i)) ) |
634 | 0 | continue; |
635 | 0 | size -= 1 << i; |
636 | 0 | free_xenheap_pages(p + (size << PAGE_SHIFT), i); |
637 | 0 | if ( i + 1 >= order ) |
638 | 0 | return; |
639 | 0 | } |
640 | 0 | } |
641 | 583 | |
642 | 583 | /* Strip alignment padding. */ |
643 | 583 | b = (struct bhdr *)((char *) p - BHDR_OVERHEAD); |
644 | 583 | if ( b->size & 1 ) |
645 | 10 | { |
646 | 10 | p = (char *)p - (b->size & ~1u); |
647 | 10 | b = (struct bhdr *)((char *)p - BHDR_OVERHEAD); |
648 | 10 | ASSERT(!(b->size & 1)); |
649 | 10 | } |
650 | 583 | |
651 | 583 | xmem_pool_free(p, xenpool); |
652 | 583 | } |