debuggers.hg

view xen/common/xmalloc.c @ 3691:32d29625d39b

bitkeeper revision 1.1159.212.88 (4203fbb5CoEmX_fzlXc90dxNw6gqoA)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bkX
author iap10@labyrinth.cl.cam.ac.uk
date Fri Feb 04 22:48:21 2005 +0000 (2005-02-04)
parents 6e6976c1a545
children 4294cfa9fad3
line source
1 /* Simple allocator for Xen. If larger than a page, simply use the
2 * page-order allocator.
3 *
4 * Copyright (C) 2005 Rusty Russell IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <xen/mm.h>
21 #include <xen/spinlock.h>
22 #include <xen/ac_timer.h>
23 #include <xen/cache.h>
25 #define BUG_ON(x) do { if (x) BUG(); }while(0)
27 static LIST_HEAD(freelist);
28 static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED;
30 struct xmalloc_hdr
31 {
32 /* Total including this hdr. */
33 size_t size;
34 struct list_head freelist;
35 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
37 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
38 {
39 size_t leftover = block - size;
41 /* If enough left to make a block, put it on free list. */
42 if (leftover >= sizeof(struct xmalloc_hdr)) {
43 struct xmalloc_hdr *extra;
45 extra = (void *)hdr + size;
46 extra->size = leftover;
47 list_add(&extra->freelist, &freelist);
48 } else
49 size = block;
51 hdr->size = size;
52 /* Debugging aid. */
53 hdr->freelist.next = hdr->freelist.prev = NULL;
54 }
56 static void *xmalloc_new_page(size_t size)
57 {
58 struct xmalloc_hdr *hdr;
59 unsigned long flags;
61 hdr = (void *)alloc_xenheap_pages(0);
62 if (!hdr)
63 return NULL;
65 spin_lock_irqsave(&freelist_lock, flags);
66 maybe_split(hdr, size, PAGE_SIZE);
67 spin_unlock_irqrestore(&freelist_lock, flags);
68 return hdr+1;
69 }
71 /* Big object? Just use page allocator. */
72 static void *xmalloc_whole_pages(size_t size)
73 {
74 struct xmalloc_hdr *hdr;
75 unsigned int pageorder = get_order(size);
77 hdr = (void *)alloc_xenheap_pages(pageorder);
78 if (!hdr)
79 return NULL;
81 hdr->size = (1 << (pageorder + PAGE_SHIFT));
82 /* Debugging aid. */
83 hdr->freelist.next = hdr->freelist.prev = NULL;
84 return hdr+1;
85 }
87 /* Return size, increased to alignment with align. */
88 static inline size_t align_up(size_t size, size_t align)
89 {
90 return (size + align-1) & ~(align - 1);
91 }
93 void *_xmalloc(size_t size, size_t align)
94 {
95 struct xmalloc_hdr *i;
96 unsigned long flags;
98 /* We currently always return cacheline aligned. */
99 BUG_ON(align > SMP_CACHE_BYTES);
101 /* Add room for header, pad to align next header. */
102 size += sizeof(struct xmalloc_hdr);
103 size = align_up(size, __alignof__(struct xmalloc_hdr));
105 /* For big allocs, give them whole pages. */
106 if (size >= PAGE_SIZE)
107 return xmalloc_whole_pages(size);
109 /* Search free list */
110 spin_lock_irqsave(&freelist_lock, flags);
111 list_for_each_entry(i, &freelist, freelist) {
112 if (i->size >= size) {
113 list_del(&i->freelist);
114 maybe_split(i, size, i->size);
115 spin_unlock_irqrestore(&freelist_lock, flags);
116 return i+1;
117 }
118 }
119 spin_unlock_irqrestore(&freelist_lock, flags);
121 /* Alloc a new page and return from that. */
122 return xmalloc_new_page(size);
123 }
125 void xfree(const void *p)
126 {
127 unsigned long flags;
128 struct xmalloc_hdr *i, *tmp, *hdr;
130 if (!p)
131 return;
133 hdr = (struct xmalloc_hdr *)p - 1;
135 /* We know hdr will be on same page. */
136 BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK));
138 /* Not previously freed. */
139 BUG_ON(hdr->freelist.next || hdr->freelist.prev);
141 /* Big allocs free directly. */
142 if (hdr->size >= PAGE_SIZE) {
143 free_xenheap_pages((unsigned long)hdr, get_order(hdr->size));
144 return;
145 }
147 /* Merge with other free block, or put in list. */
148 spin_lock_irqsave(&freelist_lock, flags);
149 list_for_each_entry_safe(i, tmp, &freelist, freelist) {
150 /* We follow this block? Swallow it. */
151 if ((void *)i + i->size == (void *)hdr) {
152 list_del(&i->freelist);
153 i->size += hdr->size;
154 hdr = i;
155 }
156 /* It follows us? Delete it and add it to us. */
157 if ((void *)hdr + hdr->size == (void *)i) {
158 list_del(&i->freelist);
159 hdr->size += i->size;
160 }
161 }
163 /* Did we free entire page? */
164 if (hdr->size == PAGE_SIZE) {
165 BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
166 free_xenheap_pages((unsigned long)hdr, 0);
167 } else
168 list_add(&hdr->freelist, &freelist);
169 spin_unlock_irqrestore(&freelist_lock, flags);
170 }