debuggers.hg
changeset 3691:32d29625d39b
bitkeeper revision 1.1159.212.88 (4203fbb5CoEmX_fzlXc90dxNw6gqoA)
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bkX
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bkX
author | iap10@labyrinth.cl.cam.ac.uk |
---|---|
date | Fri Feb 04 22:48:21 2005 +0000 (2005-02-04) |
parents | 8c6281ec8860 6e6976c1a545 |
children | 924777207448 |
files | .rootkeys xen/arch/x86/setup.c xen/common/page_alloc.c xen/common/xmalloc.c xen/include/xen/domain.h xen/include/xen/list.h xen/include/xen/slab.h |
line diff
1.1 --- a/.rootkeys Fri Feb 04 17:35:22 2005 +0000 1.2 +++ b/.rootkeys Fri Feb 04 22:48:21 2005 +0000 1.3 @@ -940,6 +940,7 @@ 3ddb79bd0gVQYmL2zvuJnldvD0AGxQ xen/commo 1.4 3e7f358awXBC3Vw-wFRwPw18qL1khg xen/common/string.c 1.5 403a3edbejm33XLTGMuinKEwQBrOIg xen/common/trace.c 1.6 3ddb79bd3zgV33PHdt-cgh3sxcb1hw xen/common/vsprintf.c 1.7 +4203fb92Qcy7mGpauBdq09J-WAqfoA xen/common/xmalloc.c 1.8 3ddb79c0ppNeJtjC4va8j41ADCnchA xen/drivers/Makefile 1.9 40715b2bi9gU43-cYzlmPDgreYQchw xen/drivers/acpi/Makefile 1.10 40715b2bDxNCz5LFV8FAXihmYJZFUQ xen/drivers/acpi/acpi_ksyms.c
2.1 --- a/xen/arch/x86/setup.c Fri Feb 04 17:35:22 2005 +0000 2.2 +++ b/xen/arch/x86/setup.c Fri Feb 04 22:48:21 2005 +0000 2.3 @@ -597,10 +597,6 @@ void __init __start_xen(multiboot_info_t 2.4 2.5 early_boot = 0; 2.6 2.7 - /* Initialise the slab allocator. */ 2.8 - xmem_cache_init(); 2.9 - xmem_cache_sizes_init(max_page); 2.10 - 2.11 start_of_day(); 2.12 2.13 grant_table_init();
3.1 --- a/xen/common/page_alloc.c Fri Feb 04 17:35:22 2005 +0000 3.2 +++ b/xen/common/page_alloc.c Fri Feb 04 22:48:21 2005 +0000 3.3 @@ -403,9 +403,8 @@ unsigned long alloc_xenheap_pages(unsign 3.4 { 3.5 unsigned long flags; 3.6 struct pfn_info *pg; 3.7 - int i, attempts = 0; 3.8 + int i; 3.9 3.10 - retry: 3.11 local_irq_save(flags); 3.12 pg = alloc_heap_pages(MEMZONE_XEN, order); 3.13 local_irq_restore(flags); 3.14 @@ -425,14 +424,7 @@ unsigned long alloc_xenheap_pages(unsign 3.15 return (unsigned long)page_to_virt(pg); 3.16 3.17 no_memory: 3.18 - if ( attempts++ < 8 ) 3.19 - { 3.20 - xmem_cache_reap(); 3.21 - goto retry; 3.22 - } 3.23 - 3.24 printk("Cannot handle page request order %d!\n", order); 3.25 - dump_slabinfo(); 3.26 return 0; 3.27 } 3.28
4.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 4.2 +++ b/xen/common/xmalloc.c Fri Feb 04 22:48:21 2005 +0000 4.3 @@ -0,0 +1,170 @@ 4.4 +/* Simple allocator for Xen. If larger than a page, simply use the 4.5 + * page-order allocator. 4.6 + * 4.7 + * Copyright (C) 2005 Rusty Russell IBM Corporation 4.8 + * 4.9 + * This program is free software; you can redistribute it and/or modify 4.10 + * it under the terms of the GNU General Public License as published by 4.11 + * the Free Software Foundation; either version 2 of the License, or 4.12 + * (at your option) any later version. 4.13 + * 4.14 + * This program is distributed in the hope that it will be useful, 4.15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 4.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 4.17 + * GNU General Public License for more details. 4.18 + * 4.19 + * You should have received a copy of the GNU General Public License 4.20 + * along with this program; if not, write to the Free Software 4.21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 4.22 + */ 4.23 +#include <xen/mm.h> 4.24 +#include <xen/spinlock.h> 4.25 +#include <xen/ac_timer.h> 4.26 +#include <xen/cache.h> 4.27 + 4.28 +#define BUG_ON(x) do { if (x) BUG(); }while(0) 4.29 + 4.30 +static LIST_HEAD(freelist); 4.31 +static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED; 4.32 + 4.33 +struct xmalloc_hdr 4.34 +{ 4.35 + /* Total including this hdr. */ 4.36 + size_t size; 4.37 + struct list_head freelist; 4.38 +} __attribute__((__aligned__(SMP_CACHE_BYTES))); 4.39 + 4.40 +static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block) 4.41 +{ 4.42 + size_t leftover = block - size; 4.43 + 4.44 + /* If enough left to make a block, put it on free list. */ 4.45 + if (leftover >= sizeof(struct xmalloc_hdr)) { 4.46 + struct xmalloc_hdr *extra; 4.47 + 4.48 + extra = (void *)hdr + size; 4.49 + extra->size = leftover; 4.50 + list_add(&extra->freelist, &freelist); 4.51 + } else 4.52 + size = block; 4.53 + 4.54 + hdr->size = size; 4.55 + /* Debugging aid. */ 4.56 + hdr->freelist.next = hdr->freelist.prev = NULL; 4.57 +} 4.58 + 4.59 +static void *xmalloc_new_page(size_t size) 4.60 +{ 4.61 + struct xmalloc_hdr *hdr; 4.62 + unsigned long flags; 4.63 + 4.64 + hdr = (void *)alloc_xenheap_pages(0); 4.65 + if (!hdr) 4.66 + return NULL; 4.67 + 4.68 + spin_lock_irqsave(&freelist_lock, flags); 4.69 + maybe_split(hdr, size, PAGE_SIZE); 4.70 + spin_unlock_irqrestore(&freelist_lock, flags); 4.71 + return hdr+1; 4.72 +} 4.73 + 4.74 +/* Big object? Just use page allocator. */ 4.75 +static void *xmalloc_whole_pages(size_t size) 4.76 +{ 4.77 + struct xmalloc_hdr *hdr; 4.78 + unsigned int pageorder = get_order(size); 4.79 + 4.80 + hdr = (void *)alloc_xenheap_pages(pageorder); 4.81 + if (!hdr) 4.82 + return NULL; 4.83 + 4.84 + hdr->size = (1 << (pageorder + PAGE_SHIFT)); 4.85 + /* Debugging aid. */ 4.86 + hdr->freelist.next = hdr->freelist.prev = NULL; 4.87 + return hdr+1; 4.88 +} 4.89 + 4.90 +/* Return size, increased to alignment with align. */ 4.91 +static inline size_t align_up(size_t size, size_t align) 4.92 +{ 4.93 + return (size + align-1) & ~(align - 1); 4.94 +} 4.95 + 4.96 +void *_xmalloc(size_t size, size_t align) 4.97 +{ 4.98 + struct xmalloc_hdr *i; 4.99 + unsigned long flags; 4.100 + 4.101 + /* We currently always return cacheline aligned. */ 4.102 + BUG_ON(align > SMP_CACHE_BYTES); 4.103 + 4.104 + /* Add room for header, pad to align next header. */ 4.105 + size += sizeof(struct xmalloc_hdr); 4.106 + size = align_up(size, __alignof__(struct xmalloc_hdr)); 4.107 + 4.108 + /* For big allocs, give them whole pages. */ 4.109 + if (size >= PAGE_SIZE) 4.110 + return xmalloc_whole_pages(size); 4.111 + 4.112 + /* Search free list */ 4.113 + spin_lock_irqsave(&freelist_lock, flags); 4.114 + list_for_each_entry(i, &freelist, freelist) { 4.115 + if (i->size >= size) { 4.116 + list_del(&i->freelist); 4.117 + maybe_split(i, size, i->size); 4.118 + spin_unlock_irqrestore(&freelist_lock, flags); 4.119 + return i+1; 4.120 + } 4.121 + } 4.122 + spin_unlock_irqrestore(&freelist_lock, flags); 4.123 + 4.124 + /* Alloc a new page and return from that. */ 4.125 + return xmalloc_new_page(size); 4.126 +} 4.127 + 4.128 +void xfree(const void *p) 4.129 +{ 4.130 + unsigned long flags; 4.131 + struct xmalloc_hdr *i, *tmp, *hdr; 4.132 + 4.133 + if (!p) 4.134 + return; 4.135 + 4.136 + hdr = (struct xmalloc_hdr *)p - 1; 4.137 + 4.138 + /* We know hdr will be on same page. */ 4.139 + BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK)); 4.140 + 4.141 + /* Not previously freed. */ 4.142 + BUG_ON(hdr->freelist.next || hdr->freelist.prev); 4.143 + 4.144 + /* Big allocs free directly. */ 4.145 + if (hdr->size >= PAGE_SIZE) { 4.146 + free_xenheap_pages((unsigned long)hdr, get_order(hdr->size)); 4.147 + return; 4.148 + } 4.149 + 4.150 + /* Merge with other free block, or put in list. */ 4.151 + spin_lock_irqsave(&freelist_lock, flags); 4.152 + list_for_each_entry_safe(i, tmp, &freelist, freelist) { 4.153 + /* We follow this block? Swallow it. */ 4.154 + if ((void *)i + i->size == (void *)hdr) { 4.155 + list_del(&i->freelist); 4.156 + i->size += hdr->size; 4.157 + hdr = i; 4.158 + } 4.159 + /* It follows us? Delete it and add it to us. */ 4.160 + if ((void *)hdr + hdr->size == (void *)i) { 4.161 + list_del(&i->freelist); 4.162 + hdr->size += i->size; 4.163 + } 4.164 + } 4.165 + 4.166 + /* Did we free entire page? */ 4.167 + if (hdr->size == PAGE_SIZE) { 4.168 + BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0); 4.169 + free_xenheap_pages((unsigned long)hdr, 0); 4.170 + } else 4.171 + list_add(&hdr->freelist, &freelist); 4.172 + spin_unlock_irqrestore(&freelist_lock, flags); 4.173 +}
5.1 --- a/xen/include/xen/domain.h Fri Feb 04 17:35:22 2005 +0000 5.2 +++ b/xen/include/xen/domain.h Fri Feb 04 22:48:21 2005 +0000 5.3 @@ -6,8 +6,6 @@ 5.4 * Arch-specifics. 5.5 */ 5.6 5.7 -extern void domain_startofday(void); 5.8 - 5.9 extern struct domain *arch_alloc_domain_struct(void); 5.10 5.11 extern void arch_free_domain_struct(struct domain *d);
6.1 --- a/xen/include/xen/list.h Fri Feb 04 17:35:22 2005 +0000 6.2 +++ b/xen/include/xen/list.h Fri Feb 04 22:48:21 2005 +0000 6.3 @@ -174,5 +174,17 @@ static __inline__ void list_splice(struc 6.4 pos = list_entry(pos->member.next, typeof(*pos), member), \ 6.5 prefetch(pos->member.next)) 6.6 6.7 +/** 6.8 + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 6.9 + * @pos: the type * to use as a loop counter. 6.10 + * @n: another type * to use as temporary storage 6.11 + * @head: the head for your list. 6.12 + * @member: the name of the list_struct within the struct. 6.13 + */ 6.14 +#define list_for_each_entry_safe(pos, n, head, member) \ 6.15 + for (pos = list_entry((head)->next, typeof(*pos), member), \ 6.16 + n = list_entry(pos->member.next, typeof(*pos), member); \ 6.17 + &pos->member != (head); \ 6.18 + pos = n, n = list_entry(n->member.next, typeof(*n), member)) 6.19 #endif /* _LINUX_LIST_H */ 6.20
7.1 --- a/xen/include/xen/slab.h Fri Feb 04 17:35:22 2005 +0000 7.2 +++ b/xen/include/xen/slab.h Fri Feb 04 22:48:21 2005 +0000 7.3 @@ -14,55 +14,31 @@ 7.4 7.5 #else 7.6 7.7 -typedef struct xmem_cache_s xmem_cache_t; 7.8 - 7.9 #include <xen/mm.h> 7.10 #include <xen/cache.h> 7.11 #include <xen/types.h> 7.12 7.13 -/* Flags to pass to xmem_cache_create(). */ 7.14 -/* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */ 7.15 -#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor */ 7.16 -#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 7.17 -#define SLAB_POISON 0x00000800UL /* Poison objects */ 7.18 -#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */ 7.19 -#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align obj on a cache line */ 7.20 - 7.21 -/* Flags passed to a constructor function. */ 7.22 -#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ 7.23 -#define SLAB_CTOR_ATOMIC 0x002UL /* tell cons. it can't sleep */ 7.24 -#define SLAB_CTOR_VERIFY 0x004UL /* tell cons. it's a verify call */ 7.25 +/* Allocate space for typed object. */ 7.26 +#define xmalloc(_type) ((_type *)_xmalloc(sizeof(_type), __alignof__(_type))) 7.27 7.28 -extern void xmem_cache_init(void); 7.29 -extern void xmem_cache_sizes_init(unsigned long); 7.30 +/* Allocate space for array of typed objects. */ 7.31 +#define xmalloc_array(_type, _num) ((_type *)_xmalloc_array(sizeof(_type), __alignof__(_type), _num)) 7.32 7.33 -extern xmem_cache_t *xmem_find_general_cachep(size_t); 7.34 -extern xmem_cache_t *xmem_cache_create( 7.35 - const char *, size_t, size_t, unsigned long, 7.36 - void (*)(void *, xmem_cache_t *, unsigned long), 7.37 - void (*)(void *, xmem_cache_t *, unsigned long)); 7.38 -extern int xmem_cache_destroy(xmem_cache_t *); 7.39 -extern int xmem_cache_shrink(xmem_cache_t *); 7.40 -extern void *xmem_cache_alloc(xmem_cache_t *); 7.41 -extern void xmem_cache_free(xmem_cache_t *, void *); 7.42 +/* Allocate untyped storage. */ 7.43 +#define xmalloc_bytes(_bytes) (_xmalloc(_bytes, SMP_CACHE_BYTES)) 7.44 7.45 -extern void *_xmalloc(size_t); 7.46 +/* Free any of the above. */ 7.47 extern void xfree(const void *); 7.48 7.49 -extern int xmem_cache_reap(void); 7.50 - 7.51 -extern void dump_slabinfo(); 7.52 - 7.53 -/* Allocate space for typed object. */ 7.54 -#define xmalloc(_type) ((_type *)_xmalloc(sizeof(_type))) 7.55 - 7.56 -/* Allocate space for array of typed objects. */ 7.57 -#define xmalloc_array(_type, _num) \ 7.58 -((_type *)(((_num) > (UINT_MAX / sizeof(_type))) ? \ 7.59 - NULL : _xmalloc((_num) * sizeof(_type)))) 7.60 - 7.61 -/* Allocate untyped storage. */ 7.62 -#define xmalloc_bytes(_bytes) (_xmalloc(_bytes)) 7.63 +/* Underlying functions */ 7.64 +extern void *_xmalloc(size_t size, size_t align); 7.65 +static inline void *_xmalloc_array(size_t size, size_t align, size_t num) 7.66 +{ 7.67 + /* Check for overflow. */ 7.68 + if (size && num > UINT_MAX / size) 7.69 + return NULL; 7.70 + return _xmalloc(size * num, align); 7.71 +} 7.72 7.73 #endif /* __ARCH_HAS_SLAB_ALLOCATOR */ 7.74