debuggers.hg

annotate xen/common/xmalloc_tlsf.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 87bc0d49137b
children
rev   line source
keir@18676 1 /*
keir@18676 2 * Two Levels Segregate Fit memory allocator (TLSF)
keir@18676 3 * Version 2.3.2
keir@18676 4 *
keir@18676 5 * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
keir@18676 6 *
keir@18676 7 * Thanks to Ismael Ripoll for his suggestions and reviews
keir@18676 8 *
keir@18676 9 * Copyright (C) 2007, 2006, 2005, 2004
keir@18676 10 *
keir@18676 11 * This code is released using a dual license strategy: GPL/LGPL
keir@18676 12 * You can choose the licence that better fits your requirements.
keir@18676 13 *
keir@18676 14 * Released under the terms of the GNU General Public License Version 2.0
keir@18676 15 * Released under the terms of the GNU Lesser General Public License
keir@18676 16 * Version 2.1
keir@18676 17 *
keir@18676 18 * This is kernel port of TLSF allocator.
keir@18676 19 * Original code can be found at: http://rtportal.upv.es/rtmalloc/
keir@18676 20 * Adapted for Linux by Nitin Gupta (nitingupta910@gmail.com)
keir@18676 21 * (http://code.google.com/p/compcache/source/browse/trunk/sub-projects
keir@18676 22 * /allocators/tlsf-kmod r229 dated Aug 27, 2008
keir@18676 23 * Adapted for Xen by Dan Magenheimer (dan.magenheimer@oracle.com)
keir@18676 24 */
keir@18676 25
keir@18676 26 #include <xen/config.h>
keir@18676 27 #include <xen/irq.h>
keir@18676 28 #include <xen/mm.h>
keir@18676 29 #include <asm/time.h>
keir@18676 30
keir@18676 31 #define MAX_POOL_NAME_LEN 16
keir@18676 32
keir@18676 33 /* Some IMPORTANT TLSF parameters */
keir@18676 34 #define MEM_ALIGN (sizeof(void *) * 2)
keir@18676 35 #define MEM_ALIGN_MASK (~(MEM_ALIGN - 1))
keir@18676 36
keir@18676 37 #define MAX_FLI (30)
keir@18676 38 #define MAX_LOG2_SLI (5)
keir@18676 39 #define MAX_SLI (1 << MAX_LOG2_SLI)
keir@18676 40
keir@18676 41 #define FLI_OFFSET (6)
keir@18676 42 /* tlsf structure just will manage blocks bigger than 128 bytes */
keir@18676 43 #define SMALL_BLOCK (128)
keir@18676 44 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
keir@18676 45 #define MIN_BLOCK_SIZE (sizeof(struct free_ptr))
keir@18676 46 #define BHDR_OVERHEAD (sizeof(struct bhdr) - MIN_BLOCK_SIZE)
keir@18676 47
keir@18676 48 #define PTR_MASK (sizeof(void *) - 1)
keir@18676 49 #define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK)
keir@18676 50
keir@18676 51 #define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \
keir@18676 52 ((char *)(addr) + (r)))
keir@18676 53 #define ROUNDUP_SIZE(r) (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK)
keir@18676 54 #define ROUNDDOWN_SIZE(r) ((r) & MEM_ALIGN_MASK)
keir@18676 55 #define ROUNDUP_PAGE(r) (((r) + PAGE_SIZE - 1) & PAGE_MASK)
keir@18676 56
keir@18676 57 #define BLOCK_STATE (0x1)
keir@18676 58 #define PREV_STATE (0x2)
keir@18676 59
keir@18676 60 /* bit 0 of the block size */
keir@18676 61 #define FREE_BLOCK (0x1)
keir@18676 62 #define USED_BLOCK (0x0)
keir@18676 63
keir@18676 64 /* bit 1 of the block size */
keir@18676 65 #define PREV_FREE (0x2)
keir@18676 66 #define PREV_USED (0x0)
keir@18676 67
keir@18676 68 static spinlock_t pool_list_lock;
keir@18676 69 static struct list_head pool_list_head;
keir@18676 70
keir@18676 71 struct free_ptr {
keir@18676 72 struct bhdr *prev;
keir@18676 73 struct bhdr *next;
keir@18676 74 };
keir@18676 75
keir@18676 76 struct bhdr {
keir@18676 77 /* All blocks in a region are linked in order of physical address */
keir@18676 78 struct bhdr *prev_hdr;
keir@18676 79 /*
keir@18676 80 * The size is stored in bytes
keir@18676 81 * bit 0: block is free, if set
keir@18676 82 * bit 1: previous block is free, if set
keir@18676 83 */
keir@18676 84 u32 size;
keir@18676 85 /* Free blocks in individual freelists are linked */
keir@18676 86 union {
keir@18676 87 struct free_ptr free_ptr;
keir@18676 88 u8 buffer[sizeof(struct free_ptr)];
keir@18676 89 } ptr;
keir@18676 90 };
keir@18676 91
keir@18677 92 struct xmem_pool {
keir@18676 93 /* First level bitmap (REAL_FLI bits) */
keir@18676 94 u32 fl_bitmap;
keir@18676 95
keir@18676 96 /* Second level bitmap */
keir@18676 97 u32 sl_bitmap[REAL_FLI];
keir@18676 98
keir@18676 99 /* Free lists */
keir@18676 100 struct bhdr *matrix[REAL_FLI][MAX_SLI];
keir@18676 101
keir@18676 102 spinlock_t lock;
keir@18676 103
keir@18677 104 unsigned long init_size;
keir@18677 105 unsigned long max_size;
keir@18677 106 unsigned long grow_size;
keir@18676 107
keir@18676 108 /* Basic stats */
keir@18677 109 unsigned long used_size;
keir@18677 110 unsigned long num_regions;
keir@18676 111
keir@18676 112 /* User provided functions for expanding/shrinking pool */
keir@18677 113 xmem_pool_get_memory *get_mem;
keir@18677 114 xmem_pool_put_memory *put_mem;
keir@18676 115
keir@18676 116 struct list_head list;
keir@18676 117
keir@18676 118 void *init_region;
keir@18676 119 char name[MAX_POOL_NAME_LEN];
keir@18676 120 };
keir@18676 121
keir@18676 122 /*
keir@18676 123 * Helping functions
keir@18676 124 */
keir@18676 125
keir@18676 126 /**
keir@18676 127 * Returns indexes (fl, sl) of the list used to serve request of size r
keir@18676 128 */
keir@18677 129 static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl)
keir@18676 130 {
keir@18676 131 int t;
keir@18676 132
keir@18676 133 if ( *r < SMALL_BLOCK )
keir@18676 134 {
keir@18676 135 *fl = 0;
keir@18676 136 *sl = *r / (SMALL_BLOCK / MAX_SLI);
keir@18676 137 }
keir@18676 138 else
keir@18676 139 {
keir@18676 140 t = (1 << (fls(*r) - 1 - MAX_LOG2_SLI)) - 1;
keir@18676 141 *r = *r + t;
keir@18676 142 *fl = fls(*r) - 1;
keir@18676 143 *sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
keir@18676 144 *fl -= FLI_OFFSET;
keir@18676 145 /*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0!
keir@18676 146 *fl = *sl = 0;
keir@18676 147 */
keir@18676 148 *r &= ~t;
keir@18676 149 }
keir@18676 150 }
keir@18676 151
keir@18676 152 /**
keir@18676 153 * Returns indexes (fl, sl) which is used as starting point to search
keir@18676 154 * for a block of size r. It also rounds up requested size(r) to the
keir@18676 155 * next list.
keir@18676 156 */
keir@18677 157 static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl)
keir@18676 158 {
keir@18676 159 if ( r < SMALL_BLOCK )
keir@18676 160 {
keir@18676 161 *fl = 0;
keir@18676 162 *sl = r / (SMALL_BLOCK / MAX_SLI);
keir@18676 163 }
keir@18676 164 else
keir@18676 165 {
keir@18676 166 *fl = fls(r) - 1;
keir@18676 167 *sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
keir@18676 168 *fl -= FLI_OFFSET;
keir@18676 169 }
keir@18676 170 }
keir@18676 171
keir@18676 172 /**
keir@18676 173 * Returns first block from a list that hold blocks larger than or
keir@18676 174 * equal to the one pointed by the indexes (fl, sl)
keir@18676 175 */
keir@18677 176 static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl,
keir@18676 177 int *sl)
keir@18676 178 {
keir@18676 179 u32 tmp = p->sl_bitmap[*fl] & (~0 << *sl);
keir@18676 180 struct bhdr *b = NULL;
keir@18676 181
keir@18676 182 if ( tmp )
keir@18676 183 {
keir@18676 184 *sl = ffs(tmp) - 1;
keir@18676 185 b = p->matrix[*fl][*sl];
keir@18676 186 }
keir@18676 187 else
keir@18676 188 {
keir@18676 189 *fl = ffs(p->fl_bitmap & (~0 << (*fl + 1))) - 1;
keir@18676 190 if ( likely(*fl > 0) )
keir@18676 191 {
keir@18676 192 *sl = ffs(p->sl_bitmap[*fl]) - 1;
keir@18676 193 b = p->matrix[*fl][*sl];
keir@18676 194 }
keir@18676 195 }
keir@18676 196
keir@18676 197 return b;
keir@18676 198 }
keir@18676 199
keir@18676 200 /**
keir@18676 201 * Remove first free block(b) from free list with indexes (fl, sl).
keir@18676 202 */
keir@18677 203 static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl,
keir@18676 204 int sl)
keir@18676 205 {
keir@18676 206 p->matrix[fl][sl] = b->ptr.free_ptr.next;
keir@18676 207 if ( p->matrix[fl][sl] )
keir@18676 208 {
keir@18676 209 p->matrix[fl][sl]->ptr.free_ptr.prev = NULL;
keir@18676 210 }
keir@18676 211 else
keir@18676 212 {
keir@18676 213 clear_bit(sl, &p->sl_bitmap[fl]);
keir@18676 214 if ( !p->sl_bitmap[fl] )
keir@18676 215 clear_bit(fl, &p->fl_bitmap);
keir@18676 216 }
keir@18676 217 b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
keir@18676 218 }
keir@18676 219
keir@18676 220 /**
keir@18676 221 * Removes block(b) from free list with indexes (fl, sl)
keir@18676 222 */
keir@18677 223 static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl,
keir@18676 224 int sl)
keir@18676 225 {
keir@18676 226 if ( b->ptr.free_ptr.next )
keir@18676 227 b->ptr.free_ptr.next->ptr.free_ptr.prev =
keir@18676 228 b->ptr.free_ptr.prev;
keir@18676 229 if ( b->ptr.free_ptr.prev )
keir@18676 230 b->ptr.free_ptr.prev->ptr.free_ptr.next =
keir@18676 231 b->ptr.free_ptr.next;
keir@18676 232 if ( p->matrix[fl][sl] == b )
keir@18676 233 {
keir@18676 234 p->matrix[fl][sl] = b->ptr.free_ptr.next;
keir@18676 235 if ( !p->matrix[fl][sl] )
keir@18676 236 {
keir@18676 237 clear_bit(sl, &p->sl_bitmap[fl]);
keir@18676 238 if ( !p->sl_bitmap[fl] )
keir@18676 239 clear_bit (fl, &p->fl_bitmap);
keir@18676 240 }
keir@18676 241 }
keir@18676 242 b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
keir@18676 243 }
keir@18676 244
keir@18676 245 /**
keir@18676 246 * Insert block(b) in free list with indexes (fl, sl)
keir@18676 247 */
keir@18677 248 static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl)
keir@18676 249 {
keir@18676 250 b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]};
keir@18676 251 if ( p->matrix[fl][sl] )
keir@18676 252 p->matrix[fl][sl]->ptr.free_ptr.prev = b;
keir@18676 253 p->matrix[fl][sl] = b;
keir@18676 254 set_bit(sl, &p->sl_bitmap[fl]);
keir@18676 255 set_bit(fl, &p->fl_bitmap);
keir@18676 256 }
keir@18676 257
keir@18676 258 /**
keir@18676 259 * Region is a virtually contiguous memory region and Pool is
keir@18676 260 * collection of such regions
keir@18676 261 */
keir@18677 262 static inline void ADD_REGION(void *region, unsigned long region_size,
keir@18677 263 struct xmem_pool *pool)
keir@18676 264 {
keir@18676 265 int fl, sl;
keir@18676 266 struct bhdr *b, *lb;
keir@18676 267
keir@18676 268 b = (struct bhdr *)(region);
keir@18676 269 b->prev_hdr = NULL;
keir@18676 270 b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD)
keir@18676 271 | FREE_BLOCK | PREV_USED;
keir@18676 272 MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
keir@18676 273 INSERT_BLOCK(b, pool, fl, sl);
keir@18676 274 /* The sentinel block: allows us to know when we're in the last block */
keir@18676 275 lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
keir@18676 276 lb->prev_hdr = b;
keir@18676 277 lb->size = 0 | USED_BLOCK | PREV_FREE;
keir@18676 278 pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */
keir@18676 279 pool->num_regions++;
keir@18676 280 }
keir@18676 281
keir@18676 282 /*
keir@18677 283 * TLSF pool-based allocator start.
keir@18676 284 */
keir@18676 285
keir@18677 286 struct xmem_pool *xmem_pool_create(
keir@18677 287 const char *name,
keir@18677 288 xmem_pool_get_memory get_mem,
keir@18677 289 xmem_pool_put_memory put_mem,
keir@18677 290 unsigned long init_size,
keir@18677 291 unsigned long max_size,
keir@18677 292 unsigned long grow_size)
keir@18676 293 {
keir@18677 294 struct xmem_pool *pool;
keir@18676 295 int pool_bytes, pool_order;
keir@18676 296
keir@18676 297 BUG_ON(max_size && (max_size < init_size));
keir@18676 298
keir@18676 299 pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
keir@18676 300 pool_order = get_order_from_bytes(pool_bytes);
keir@18676 301
keir@19143 302 pool = (void *)alloc_xenheap_pages(pool_order, 0);
keir@18676 303 if ( pool == NULL )
keir@18676 304 return NULL;
keir@18676 305 memset(pool, 0, pool_bytes);
keir@18676 306
keir@18676 307 /* Round to next page boundary */
keir@18676 308 init_size = ROUNDUP_PAGE(init_size);
keir@18676 309 max_size = ROUNDUP_PAGE(max_size);
keir@18676 310 grow_size = ROUNDUP_PAGE(grow_size);
keir@18676 311
keir@18676 312 /* pool global overhead not included in used size */
keir@18676 313 pool->used_size = 0;
keir@18676 314
keir@18676 315 pool->init_size = init_size;
keir@18676 316 pool->max_size = max_size;
keir@18676 317 pool->grow_size = grow_size;
keir@18676 318 pool->get_mem = get_mem;
keir@18676 319 pool->put_mem = put_mem;
keir@18677 320 strlcpy(pool->name, name, sizeof(pool->name));
keir@19684 321
keir@19684 322 /* always obtain init_region lazily now to ensure it is get_mem'd
keir@19684 323 * in the same "context" as all other regions */
keir@18676 324
keir@18676 325 spin_lock_init(&pool->lock);
keir@18676 326
keir@18676 327 spin_lock(&pool_list_lock);
keir@18676 328 list_add_tail(&pool->list, &pool_list_head);
keir@18676 329 spin_unlock(&pool_list_lock);
keir@18676 330
keir@18676 331 return pool;
keir@18676 332 }
keir@18676 333
keir@18677 334 unsigned long xmem_pool_get_used_size(struct xmem_pool *pool)
keir@18676 335 {
keir@18676 336 return pool->used_size;
keir@18676 337 }
keir@18676 338
keir@18677 339 unsigned long xmem_pool_get_total_size(struct xmem_pool *pool)
keir@18676 340 {
keir@18677 341 unsigned long total;
keir@18676 342 total = ROUNDUP_SIZE(sizeof(*pool))
keir@18676 343 + pool->init_size
keir@18676 344 + (pool->num_regions - 1) * pool->grow_size;
keir@18676 345 return total;
keir@18676 346 }
keir@18676 347
keir@18677 348 void xmem_pool_destroy(struct xmem_pool *pool)
keir@18676 349 {
keir@19684 350 int pool_bytes, pool_order;
keir@19684 351
keir@18677 352 if ( pool == NULL )
keir@18676 353 return;
keir@18676 354
keir@18676 355 /* User is destroying without ever allocating from this pool */
keir@18677 356 if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD )
keir@18676 357 {
keir@19684 358 ASSERT(!pool->init_region);
keir@18676 359 pool->used_size -= BHDR_OVERHEAD;
keir@18676 360 }
keir@18676 361
keir@18676 362 /* Check for memory leaks in this pool */
keir@18677 363 if ( xmem_pool_get_used_size(pool) )
keir@18676 364 printk("memory leak in pool: %s (%p). "
keir@18676 365 "%lu bytes still in use.\n",
keir@18677 366 pool->name, pool, xmem_pool_get_used_size(pool));
keir@18676 367
keir@18676 368 spin_lock(&pool_list_lock);
keir@18676 369 list_del_init(&pool->list);
keir@18676 370 spin_unlock(&pool_list_lock);
keir@19684 371
keir@19684 372 pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
keir@19684 373 pool_order = get_order_from_bytes(pool_bytes);
keir@19684 374 free_xenheap_pages(pool,pool_order);
keir@18676 375 }
keir@18676 376
keir@18677 377 void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
keir@18676 378 {
keir@18676 379 struct bhdr *b, *b2, *next_b, *region;
keir@18676 380 int fl, sl;
keir@18677 381 unsigned long tmp_size;
keir@18676 382
keir@19684 383 if ( pool->init_region == NULL )
keir@19684 384 {
keir@19684 385 if ( (region = pool->get_mem(pool->init_size)) == NULL )
keir@19684 386 goto out;
keir@19684 387 ADD_REGION(region, pool->init_size, pool);
keir@19684 388 pool->init_region = region;
keir@19684 389 }
keir@19684 390
keir@18676 391 size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
keir@18676 392 /* Rounding up the requested size and calculating fl and sl */
keir@18676 393
keir@18676 394 spin_lock(&pool->lock);
keir@18676 395 retry_find:
keir@18676 396 MAPPING_SEARCH(&size, &fl, &sl);
keir@18676 397
keir@18676 398 /* Searching a free block */
keir@18676 399 if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) )
keir@18676 400 {
keir@18676 401 /* Not found */
keir@18676 402 if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) )
keir@18676 403 goto out_locked;
keir@18676 404 if ( pool->max_size && (pool->init_size +
keir@18676 405 pool->num_regions * pool->grow_size
keir@18676 406 > pool->max_size) )
keir@18676 407 goto out_locked;
keir@18676 408 spin_unlock(&pool->lock);
keir@18676 409 if ( (region = pool->get_mem(pool->grow_size)) == NULL )
keir@18676 410 goto out;
keir@18676 411 spin_lock(&pool->lock);
keir@18676 412 ADD_REGION(region, pool->grow_size, pool);
keir@18676 413 goto retry_find;
keir@18676 414 }
keir@18676 415 EXTRACT_BLOCK_HDR(b, pool, fl, sl);
keir@18676 416
keir@18676 417 /*-- found: */
keir@18676 418 next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
keir@18676 419 /* Should the block be split? */
keir@18676 420 tmp_size = (b->size & BLOCK_SIZE_MASK) - size;
keir@18676 421 if ( tmp_size >= sizeof(struct bhdr) )
keir@18676 422 {
keir@18676 423 tmp_size -= BHDR_OVERHEAD;
keir@18676 424 b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
keir@18676 425
keir@18676 426 b2->size = tmp_size | FREE_BLOCK | PREV_USED;
keir@18676 427 b2->prev_hdr = b;
keir@18676 428
keir@18676 429 next_b->prev_hdr = b2;
keir@18676 430
keir@18676 431 MAPPING_INSERT(tmp_size, &fl, &sl);
keir@18676 432 INSERT_BLOCK(b2, pool, fl, sl);
keir@18676 433
keir@18676 434 b->size = size | (b->size & PREV_STATE);
keir@18676 435 }
keir@18676 436 else
keir@18676 437 {
keir@18676 438 next_b->size &= (~PREV_FREE);
keir@18676 439 b->size &= (~FREE_BLOCK); /* Now it's used */
keir@18676 440 }
keir@18676 441
keir@18676 442 pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
keir@18676 443
keir@18676 444 spin_unlock(&pool->lock);
keir@18676 445 return (void *)b->ptr.buffer;
keir@18676 446
keir@18676 447 /* Failed alloc */
keir@18676 448 out_locked:
keir@18676 449 spin_unlock(&pool->lock);
keir@18676 450
keir@18676 451 out:
keir@18676 452 return NULL;
keir@18676 453 }
keir@18676 454
keir@18677 455 void xmem_pool_free(void *ptr, struct xmem_pool *pool)
keir@18676 456 {
keir@18676 457 struct bhdr *b, *tmp_b;
keir@18676 458 int fl = 0, sl = 0;
keir@18676 459
keir@18676 460 if ( unlikely(ptr == NULL) )
keir@18676 461 return;
keir@18676 462
keir@18676 463 b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD);
keir@18676 464
keir@18676 465 spin_lock(&pool->lock);
keir@18676 466 b->size |= FREE_BLOCK;
keir@18676 467 pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
keir@18676 468 b->ptr.free_ptr = (struct free_ptr) { NULL, NULL};
keir@18676 469 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
keir@18676 470 if ( tmp_b->size & FREE_BLOCK )
keir@18676 471 {
keir@18676 472 MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
keir@18676 473 EXTRACT_BLOCK(tmp_b, pool, fl, sl);
keir@18676 474 b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
keir@18676 475 }
keir@18676 476 if ( b->size & PREV_FREE )
keir@18676 477 {
keir@18676 478 tmp_b = b->prev_hdr;
keir@18676 479 MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
keir@18676 480 EXTRACT_BLOCK(tmp_b, pool, fl, sl);
keir@18676 481 tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
keir@18676 482 b = tmp_b;
keir@18676 483 }
keir@18676 484 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
keir@18676 485 tmp_b->prev_hdr = b;
keir@18676 486
keir@18676 487 MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
keir@18676 488
keir@18676 489 if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) )
keir@18676 490 {
keir@18676 491 pool->put_mem(b);
keir@18676 492 pool->num_regions--;
keir@18676 493 pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */
keir@18676 494 goto out;
keir@18676 495 }
keir@18676 496
keir@18676 497 INSERT_BLOCK(b, pool, fl, sl);
keir@18676 498
keir@18676 499 tmp_b->size |= PREV_FREE;
keir@18676 500 tmp_b->prev_hdr = b;
keir@18676 501 out:
keir@18676 502 spin_unlock(&pool->lock);
keir@18676 503 }
keir@18676 504
keir@19684 505 int xmem_pool_maxalloc(struct xmem_pool *pool)
keir@19684 506 {
keir@19684 507 return pool->grow_size - (2 * BHDR_OVERHEAD);
keir@19684 508 }
keir@19684 509
keir@18676 510 /*
keir@18676 511 * Glue for xmalloc().
keir@18676 512 */
keir@18676 513
keir@18677 514 static struct xmem_pool *xenpool;
keir@18676 515
keir@18677 516 static void *xmalloc_pool_get(unsigned long size)
keir@18676 517 {
keir@18676 518 ASSERT(size == PAGE_SIZE);
keir@19143 519 return alloc_xenheap_page();
keir@18676 520 }
keir@18676 521
keir@18677 522 static void xmalloc_pool_put(void *p)
keir@18676 523 {
keir@19143 524 free_xenheap_page(p);
keir@18676 525 }
keir@18676 526
keir@18677 527 static void *xmalloc_whole_pages(unsigned long size)
keir@18676 528 {
keir@18676 529 struct bhdr *b;
keir@18676 530 unsigned int pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
keir@18676 531
keir@19143 532 b = alloc_xenheap_pages(pageorder, 0);
keir@18676 533 if ( b == NULL )
keir@18676 534 return NULL;
keir@18676 535
keir@18676 536 b->size = (1 << (pageorder + PAGE_SHIFT));
keir@18676 537 return (void *)b->ptr.buffer;
keir@18676 538 }
keir@18676 539
keir@18677 540 static void tlsf_init(void)
keir@18676 541 {
keir@18676 542 INIT_LIST_HEAD(&pool_list_head);
keir@18676 543 spin_lock_init(&pool_list_lock);
keir@18677 544 xenpool = xmem_pool_create(
keir@18677 545 "xmalloc", xmalloc_pool_get, xmalloc_pool_put,
keir@18677 546 PAGE_SIZE, 0, PAGE_SIZE);
keir@18676 547 BUG_ON(!xenpool);
keir@18676 548 }
keir@18676 549
keir@18676 550 /*
keir@18676 551 * xmalloc()
keir@18676 552 */
keir@18676 553
keir@18677 554 void *_xmalloc(unsigned long size, unsigned long align)
keir@18676 555 {
keir@20395 556 void *p = NULL;
keir@18676 557 u32 pad;
keir@18676 558
keir@18676 559 ASSERT(!in_irq());
keir@18676 560
keir@18676 561 ASSERT((align & (align - 1)) == 0);
keir@18676 562 if ( align < MEM_ALIGN )
keir@18676 563 align = MEM_ALIGN;
keir@18676 564 size += align - MEM_ALIGN;
keir@18676 565
keir@18676 566 if ( !xenpool )
keir@18677 567 tlsf_init();
keir@18676 568
keir@20395 569 if ( size < PAGE_SIZE )
keir@20395 570 p = xmem_pool_alloc(size, xenpool);
keir@20395 571 if ( p == NULL )
keir@18677 572 p = xmalloc_whole_pages(size);
keir@18676 573
keir@18676 574 /* Add alignment padding. */
keir@18676 575 if ( (pad = -(long)p & (align - 1)) != 0 )
keir@18676 576 {
keir@18676 577 char *q = (char *)p + pad;
keir@18676 578 struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
keir@18676 579 ASSERT(q > (char *)p);
keir@18676 580 b->size = pad | 1;
keir@18676 581 p = q;
keir@18676 582 }
keir@18676 583
keir@18676 584 ASSERT(((unsigned long)p & (align - 1)) == 0);
keir@18676 585 return p;
keir@18676 586 }
keir@18676 587
keir@18676 588 void xfree(void *p)
keir@18676 589 {
keir@18676 590 struct bhdr *b;
keir@18676 591
keir@18676 592 ASSERT(!in_irq());
keir@18676 593
keir@18676 594 if ( p == NULL )
keir@18676 595 return;
keir@18676 596
keir@18676 597 /* Strip alignment padding. */
keir@18676 598 b = (struct bhdr *)((char *) p - BHDR_OVERHEAD);
keir@18676 599 if ( b->size & 1 )
keir@18676 600 {
keir@18676 601 p = (char *)p - (b->size & ~1u);
keir@18676 602 b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
keir@18676 603 ASSERT(!(b->size & 1));
keir@18676 604 }
keir@18676 605
keir@20395 606 if ( b->size >= PAGE_SIZE )
keir@18676 607 free_xenheap_pages((void *)b, get_order_from_bytes(b->size));
keir@18676 608 else
keir@18677 609 xmem_pool_free(p, xenpool);
keir@18676 610 }