debuggers.hg
changeset 3653:49103eca5edb
bitkeeper revision 1.1159.238.4 (4200cf15b8T6NKAN_1Kqx3nMy4I7WA)
slab.c in Linux is not a very nice piece of code: the version in Xen has
been hacked a certain amount and is not a vision of beauty either.
Given how rare and non-time-critical dynamic allocations are in Xen,
this replaces the 1800-line slab.c with a 160-line malloc.c which is
written as simply as possible for future enhancement.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (authored)
Signed-off-by: ian.pratt@cl.cam.ac.uk
slab.c in Linux is not a very nice piece of code: the version in Xen has
been hacked a certain amount and is not a vision of beauty either.
Given how rare and non-time-critical dynamic allocations are in Xen,
this replaces the 1800-line slab.c with a 160-line malloc.c which is
written as simply as possible for future enhancement.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (authored)
Signed-off-by: ian.pratt@cl.cam.ac.uk
author | iap10@labyrinth.cl.cam.ac.uk |
---|---|
date | Wed Feb 02 13:01:09 2005 +0000 (2005-02-02) |
parents | 10a0f6b0a996 |
children | 552dd1f1c64c |
files | .rootkeys xen/arch/x86/setup.c xen/common/malloc.c xen/common/page_alloc.c xen/include/xen/domain.h xen/include/xen/lib.h xen/include/xen/list.h xen/include/xen/slab.h |
line diff
1.1 --- a/.rootkeys Wed Feb 02 12:54:40 2005 +0000 1.2 +++ b/.rootkeys Wed Feb 02 13:01:09 2005 +0000 1.3 @@ -923,6 +923,7 @@ 41262590gGIOn-1pvF5KpUu8Wb6_JA xen/commo 1.4 3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen/common/kernel.c 1.5 3e4cd9d8LAAghUY0hNIK72uc2ch_Nw xen/common/keyhandler.c 1.6 3ddb79bduhSEZI8xa7IbGQCpap5y2A xen/common/lib.c 1.7 +4200cf14XGr26_PCC8NxREDhr7Hk5Q xen/common/malloc.c 1.8 41a61536SZbR6cj1ukWTb0DYU-vz9w xen/common/multicall.c 1.9 3ddb79bdD4SLmmdMD7yLW5HcUWucXw xen/common/page_alloc.c 1.10 3e54c38dkHAev597bPr71-hGzTdocg xen/common/perfc.c
2.1 --- a/xen/arch/x86/setup.c Wed Feb 02 12:54:40 2005 +0000 2.2 +++ b/xen/arch/x86/setup.c Wed Feb 02 13:01:09 2005 +0000 2.3 @@ -598,10 +598,6 @@ void __init __start_xen(multiboot_info_t 2.4 2.5 early_boot = 0; 2.6 2.7 - /* Initialise the slab allocator. */ 2.8 - xmem_cache_init(); 2.9 - xmem_cache_sizes_init(max_page); 2.10 - 2.11 start_of_day(); 2.12 2.13 grant_table_init();
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/xen/common/malloc.c Wed Feb 02 13:01:09 2005 +0000 3.3 @@ -0,0 +1,164 @@ 3.4 +/* Simple allocator for Xen. If larger than a page, simply use the 3.5 + * page-order allocator. 3.6 + * 3.7 + * Copyright (C) 2005 Rusty Russell IBM Corporation 3.8 + * 3.9 + * This program is free software; you can redistribute it and/or modify 3.10 + * it under the terms of the GNU General Public License as published by 3.11 + * the Free Software Foundation; either version 2 of the License, or 3.12 + * (at your option) any later version. 3.13 + * 3.14 + * This program is distributed in the hope that it will be useful, 3.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 3.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3.17 + * GNU General Public License for more details. 3.18 + * 3.19 + * You should have received a copy of the GNU General Public License 3.20 + * along with this program; if not, write to the Free Software 3.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 3.22 + */ 3.23 +#include <xen/mm.h> 3.24 +#include <xen/spinlock.h> 3.25 +#include <xen/ac_timer.h> 3.26 + 3.27 +#define BUG_ON(x) do { if (x) BUG(); }while(0) 3.28 + 3.29 +static LIST_HEAD(freelist); 3.30 +static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED; 3.31 + 3.32 +struct xmalloc_hdr 3.33 +{ 3.34 + /* Total including this hdr: negative means allocated. */ 3.35 + long size; 3.36 + union { 3.37 + struct list_head freelist; 3.38 + char data[0]; 3.39 + } u; 3.40 +}; 3.41 + 3.42 +static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block) 3.43 +{ 3.44 + size_t leftover = block - size; 3.45 + 3.46 + /* If enough left to make a block, put it on free list. */ 3.47 + if (leftover >= sizeof(struct xmalloc_hdr)) { 3.48 + struct xmalloc_hdr *extra; 3.49 + 3.50 + extra = (void *)hdr + size; 3.51 + extra->size = leftover; 3.52 + list_add(&extra->u.freelist, &freelist); 3.53 + } else 3.54 + size = block; 3.55 + 3.56 + hdr->size = -size; 3.57 +} 3.58 + 3.59 +static void *xmalloc_new_page(size_t size) 3.60 +{ 3.61 + struct xmalloc_hdr *hdr; 3.62 + unsigned long flags; 3.63 + 3.64 + hdr = (void *)alloc_xenheap_pages(0); 3.65 + if (!hdr) 3.66 + return NULL; 3.67 + 3.68 + spin_lock_irqsave(&freelist_lock, flags); 3.69 + maybe_split(hdr, size, PAGE_SIZE); 3.70 + spin_unlock_irqrestore(&freelist_lock, flags); 3.71 + return hdr->u.data; 3.72 +} 3.73 + 3.74 +/* Big object? Just use page allocator. */ 3.75 +static void *xmalloc_whole_pages(size_t size) 3.76 +{ 3.77 + struct xmalloc_hdr *hdr; 3.78 + unsigned int pageorder = get_order(size); 3.79 + 3.80 + hdr = (void *)alloc_xenheap_pages(pageorder); 3.81 + if (!hdr) 3.82 + return NULL; 3.83 + 3.84 + hdr->size = -(1 << (pageorder + PAGE_SHIFT)); 3.85 + return hdr->u.data; 3.86 +} 3.87 + 3.88 +void *__xmalloc(size_t size, const char *file, unsigned int line) 3.89 +{ 3.90 + struct xmalloc_hdr *i; 3.91 + unsigned long flags; 3.92 + 3.93 + /* Add room for header, align to unsigned long. */ 3.94 + size += offsetof(struct xmalloc_hdr, u.data); 3.95 + size = ((size + sizeof(unsigned long)-1)&~(sizeof(unsigned long)-1)); 3.96 + 3.97 + /* Minimum size is size of freelist entry. */ 3.98 + if (size < sizeof(*i)) 3.99 + size = sizeof(*i); 3.100 + 3.101 + /* For big allocs, give them whole pages. */ 3.102 + if (size >= PAGE_SIZE) 3.103 + return xmalloc_whole_pages(size); 3.104 + 3.105 + /* Search free list */ 3.106 + spin_lock_irqsave(&freelist_lock, flags); 3.107 + list_for_each_entry(i, &freelist, u.freelist) { 3.108 + if (i->size >= size) { 3.109 + list_del(&i->u.freelist); 3.110 + maybe_split(i, size, i->size); 3.111 + spin_unlock_irqrestore(&freelist_lock, flags); 3.112 + return i->u.data; 3.113 + } 3.114 + } 3.115 + spin_unlock_irqrestore(&freelist_lock, flags); 3.116 + 3.117 + /* Alloc a new page and return from that. */ 3.118 + return xmalloc_new_page(size); 3.119 +} 3.120 + 3.121 +void __xfree(const void *p, const char *file, unsigned int line) 3.122 +{ 3.123 + unsigned long flags; 3.124 + struct xmalloc_hdr *i, *tmp, *hdr; 3.125 + 3.126 + if (!p) 3.127 + return; 3.128 + 3.129 + hdr = container_of((void *)p, struct xmalloc_hdr, u.data); 3.130 + 3.131 + /* We know hdr will be on same page. */ 3.132 + BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK)); 3.133 + 3.134 + /* Not previously freed. */ 3.135 + BUG_ON(hdr->size > 0); 3.136 + hdr->size = -hdr->size; 3.137 + 3.138 + /* Big allocs free directly. */ 3.139 + if (hdr->size >= PAGE_SIZE) { 3.140 + free_xenheap_pages((unsigned long)hdr, get_order(hdr->size)); 3.141 + return; 3.142 + } 3.143 + 3.144 + /* Merge with other free block, or put in list. */ 3.145 + spin_lock_irqsave(&freelist_lock, flags); 3.146 + list_for_each_entry_safe(i, tmp, &freelist, u.freelist) { 3.147 + /* We follow this block? Swallow it. */ 3.148 + if ((void *)i + i->size == (void *)hdr) { 3.149 + list_del(&i->u.freelist); 3.150 + i->size += hdr->size; 3.151 + hdr = i; 3.152 + } 3.153 + /* It follows us? Delete it and add it to us. */ 3.154 + if ((void *)hdr + hdr->size == (void *)i) { 3.155 + list_del(&i->u.freelist); 3.156 + hdr->size += i->size; 3.157 + } 3.158 + } 3.159 + 3.160 + /* Did we free entire page? */ 3.161 + if (hdr->size == PAGE_SIZE) { 3.162 + BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0); 3.163 + free_xenheap_pages((unsigned long)hdr, 0); 3.164 + } else 3.165 + list_add(&hdr->u.freelist, &freelist); 3.166 + spin_unlock_irqrestore(&freelist_lock, flags); 3.167 +}
4.1 --- a/xen/common/page_alloc.c Wed Feb 02 12:54:40 2005 +0000 4.2 +++ b/xen/common/page_alloc.c Wed Feb 02 13:01:09 2005 +0000 4.3 @@ -403,9 +403,8 @@ unsigned long alloc_xenheap_pages(unsign 4.4 { 4.5 unsigned long flags; 4.6 struct pfn_info *pg; 4.7 - int i, attempts = 0; 4.8 + int i; 4.9 4.10 - retry: 4.11 local_irq_save(flags); 4.12 pg = alloc_heap_pages(MEMZONE_XEN, order); 4.13 local_irq_restore(flags); 4.14 @@ -425,14 +424,7 @@ unsigned long alloc_xenheap_pages(unsign 4.15 return (unsigned long)page_to_virt(pg); 4.16 4.17 no_memory: 4.18 - if ( attempts++ < 8 ) 4.19 - { 4.20 - xmem_cache_reap(); 4.21 - goto retry; 4.22 - } 4.23 - 4.24 printk("Cannot handle page request order %d!\n", order); 4.25 - dump_slabinfo(); 4.26 return 0; 4.27 } 4.28
5.1 --- a/xen/include/xen/domain.h Wed Feb 02 12:54:40 2005 +0000 5.2 +++ b/xen/include/xen/domain.h Wed Feb 02 13:01:09 2005 +0000 5.3 @@ -6,8 +6,6 @@ 5.4 * Arch-specifics. 5.5 */ 5.6 5.7 -extern void domain_startofday(void); 5.8 - 5.9 extern struct domain *arch_alloc_domain_struct(void); 5.10 5.11 extern void arch_free_domain_struct(struct domain *d);
6.1 --- a/xen/include/xen/lib.h Wed Feb 02 12:54:40 2005 +0000 6.2 +++ b/xen/include/xen/lib.h Wed Feb 02 13:01:09 2005 +0000 6.3 @@ -20,6 +20,18 @@ struct domain; 6.4 6.5 void cmdline_parse(char *cmdline); 6.6 6.7 +/** 6.8 + * container_of - cast a member of a structure out to the containing structure 6.9 + * 6.10 + * @ptr: the pointer to the member. 6.11 + * @type: the type of the container struct this is embedded in. 6.12 + * @member: the name of the member within the struct. 6.13 + * 6.14 + */ 6.15 +#define container_of(ptr, type, member) ({ \ 6.16 + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ 6.17 + (type *)( (char *)__mptr - offsetof(type,member) );}) 6.18 + 6.19 #define printk printf 6.20 void printf(const char *format, ...); 6.21 void panic(const char *format, ...);
7.1 --- a/xen/include/xen/list.h Wed Feb 02 12:54:40 2005 +0000 7.2 +++ b/xen/include/xen/list.h Wed Feb 02 13:01:09 2005 +0000 7.3 @@ -174,5 +174,17 @@ static __inline__ void list_splice(struc 7.4 pos = list_entry(pos->member.next, typeof(*pos), member), \ 7.5 prefetch(pos->member.next)) 7.6 7.7 +/** 7.8 + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 7.9 + * @pos: the type * to use as a loop counter. 7.10 + * @n: another type * to use as temporary storage 7.11 + * @head: the head for your list. 7.12 + * @member: the name of the list_struct within the struct. 7.13 + */ 7.14 +#define list_for_each_entry_safe(pos, n, head, member) \ 7.15 + for (pos = list_entry((head)->next, typeof(*pos), member), \ 7.16 + n = list_entry(pos->member.next, typeof(*pos), member); \ 7.17 + &pos->member != (head); \ 7.18 + pos = n, n = list_entry(n->member.next, typeof(*n), member)) 7.19 #endif /* _LINUX_LIST_H */ 7.20
8.1 --- a/xen/include/xen/slab.h Wed Feb 02 12:54:40 2005 +0000 8.2 +++ b/xen/include/xen/slab.h Wed Feb 02 13:01:09 2005 +0000 8.3 @@ -13,45 +13,14 @@ 8.4 #include <asm/slab.h> 8.5 8.6 #else 8.7 - 8.8 -typedef struct xmem_cache_s xmem_cache_t; 8.9 - 8.10 #include <xen/mm.h> 8.11 #include <xen/cache.h> 8.12 #include <xen/types.h> 8.13 8.14 -/* Flags to pass to xmem_cache_create(). */ 8.15 -/* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */ 8.16 -#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor */ 8.17 -#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 8.18 -#define SLAB_POISON 0x00000800UL /* Poison objects */ 8.19 -#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */ 8.20 -#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align obj on a cache line */ 8.21 - 8.22 -/* Flags passed to a constructor function. */ 8.23 -#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ 8.24 -#define SLAB_CTOR_ATOMIC 0x002UL /* tell cons. it can't sleep */ 8.25 -#define SLAB_CTOR_VERIFY 0x004UL /* tell cons. it's a verify call */ 8.26 - 8.27 -extern void xmem_cache_init(void); 8.28 -extern void xmem_cache_sizes_init(unsigned long); 8.29 - 8.30 -extern xmem_cache_t *xmem_find_general_cachep(size_t); 8.31 -extern xmem_cache_t *xmem_cache_create( 8.32 - const char *, size_t, size_t, unsigned long, 8.33 - void (*)(void *, xmem_cache_t *, unsigned long), 8.34 - void (*)(void *, xmem_cache_t *, unsigned long)); 8.35 -extern int xmem_cache_destroy(xmem_cache_t *); 8.36 -extern int xmem_cache_shrink(xmem_cache_t *); 8.37 -extern void *xmem_cache_alloc(xmem_cache_t *); 8.38 -extern void xmem_cache_free(xmem_cache_t *, void *); 8.39 - 8.40 -extern void *_xmalloc(size_t); 8.41 -extern void xfree(const void *); 8.42 - 8.43 -extern int xmem_cache_reap(void); 8.44 - 8.45 -extern void dump_slabinfo(); 8.46 +#define _xmalloc(size) __xmalloc(size, __FILE__, __LINE__) 8.47 +#define xfree(ptr) __xfree(ptr, __FILE__, __LINE__) 8.48 +extern void *__xmalloc(size_t size, const char *file, unsigned int line); 8.49 +extern void __xfree(const void *p, const char *file, unsigned int line); 8.50 8.51 /* Nicely typesafe for you. */ 8.52 #define xmalloc(type) ((type *)_xmalloc(sizeof(type)))