debuggers.hg

view xen/common/xmalloc.c @ 3705:4294cfa9fad3

bitkeeper revision 1.1159.212.95 (4204aa0ee0re5Xx1zWrJ9ejxzgRs3w)

Various cleanups. Remove PDB pending simpler GDB stub and/or NetBSD debugger.
Force emacs mode to appropriate tabbing in various files.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 05 11:12:14 2005 +0000 (2005-02-05)
parents 32d29625d39b
children 88957a238191
line source
1 /* -*- Mode:C; c-basic-offset:8; tab-width:8; indent-tabs-mode:t -*- */
2 /******************************************************************************
3 * Simple allocator for Xen. If larger than a page, simply use the
4 * page-order allocator.
5 *
6 * Copyright (C) 2005 Rusty Russell IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/mm.h>
24 #include <xen/spinlock.h>
25 #include <xen/ac_timer.h>
26 #include <xen/cache.h>
28 #define BUG_ON(x) do { if (x) BUG(); }while(0)
30 static LIST_HEAD(freelist);
31 static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED;
33 struct xmalloc_hdr
34 {
35 /* Total including this hdr. */
36 size_t size;
37 struct list_head freelist;
38 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
40 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
41 {
42 size_t leftover = block - size;
44 /* If enough left to make a block, put it on free list. */
45 if (leftover >= sizeof(struct xmalloc_hdr)) {
46 struct xmalloc_hdr *extra;
48 extra = (void *)hdr + size;
49 extra->size = leftover;
50 list_add(&extra->freelist, &freelist);
51 } else
52 size = block;
54 hdr->size = size;
55 /* Debugging aid. */
56 hdr->freelist.next = hdr->freelist.prev = NULL;
57 }
59 static void *xmalloc_new_page(size_t size)
60 {
61 struct xmalloc_hdr *hdr;
62 unsigned long flags;
64 hdr = (void *)alloc_xenheap_pages(0);
65 if (!hdr)
66 return NULL;
68 spin_lock_irqsave(&freelist_lock, flags);
69 maybe_split(hdr, size, PAGE_SIZE);
70 spin_unlock_irqrestore(&freelist_lock, flags);
71 return hdr+1;
72 }
74 /* Big object? Just use page allocator. */
75 static void *xmalloc_whole_pages(size_t size)
76 {
77 struct xmalloc_hdr *hdr;
78 unsigned int pageorder = get_order(size);
80 hdr = (void *)alloc_xenheap_pages(pageorder);
81 if (!hdr)
82 return NULL;
84 hdr->size = (1 << (pageorder + PAGE_SHIFT));
85 /* Debugging aid. */
86 hdr->freelist.next = hdr->freelist.prev = NULL;
87 return hdr+1;
88 }
90 /* Return size, increased to alignment with align. */
91 static inline size_t align_up(size_t size, size_t align)
92 {
93 return (size + align-1) & ~(align - 1);
94 }
96 void *_xmalloc(size_t size, size_t align)
97 {
98 struct xmalloc_hdr *i;
99 unsigned long flags;
101 /* We currently always return cacheline aligned. */
102 BUG_ON(align > SMP_CACHE_BYTES);
104 /* Add room for header, pad to align next header. */
105 size += sizeof(struct xmalloc_hdr);
106 size = align_up(size, __alignof__(struct xmalloc_hdr));
108 /* For big allocs, give them whole pages. */
109 if (size >= PAGE_SIZE)
110 return xmalloc_whole_pages(size);
112 /* Search free list */
113 spin_lock_irqsave(&freelist_lock, flags);
114 list_for_each_entry(i, &freelist, freelist) {
115 if (i->size >= size) {
116 list_del(&i->freelist);
117 maybe_split(i, size, i->size);
118 spin_unlock_irqrestore(&freelist_lock, flags);
119 return i+1;
120 }
121 }
122 spin_unlock_irqrestore(&freelist_lock, flags);
124 /* Alloc a new page and return from that. */
125 return xmalloc_new_page(size);
126 }
128 void xfree(const void *p)
129 {
130 unsigned long flags;
131 struct xmalloc_hdr *i, *tmp, *hdr;
133 if (!p)
134 return;
136 hdr = (struct xmalloc_hdr *)p - 1;
138 /* We know hdr will be on same page. */
139 BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK));
141 /* Not previously freed. */
142 BUG_ON(hdr->freelist.next || hdr->freelist.prev);
144 /* Big allocs free directly. */
145 if (hdr->size >= PAGE_SIZE) {
146 free_xenheap_pages((unsigned long)hdr, get_order(hdr->size));
147 return;
148 }
150 /* Merge with other free block, or put in list. */
151 spin_lock_irqsave(&freelist_lock, flags);
152 list_for_each_entry_safe(i, tmp, &freelist, freelist) {
153 /* We follow this block? Swallow it. */
154 if ((void *)i + i->size == (void *)hdr) {
155 list_del(&i->freelist);
156 i->size += hdr->size;
157 hdr = i;
158 }
159 /* It follows us? Delete it and add it to us. */
160 if ((void *)hdr + hdr->size == (void *)i) {
161 list_del(&i->freelist);
162 hdr->size += i->size;
163 }
164 }
166 /* Did we free entire page? */
167 if (hdr->size == PAGE_SIZE) {
168 BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
169 free_xenheap_pages((unsigned long)hdr, 0);
170 } else
171 list_add(&hdr->freelist, &freelist);
172 spin_unlock_irqrestore(&freelist_lock, flags);
173 }