debuggers.hg

view xen/include/xen/tmem_xen.h @ 20812:277bfc2d47b1

tmem: Reduce verbosity on failed memory allocations.

Reduce tmem complaints per Jan's concerns in this thread
http://lists.xensource.com/archives/html/xen-devel/2010-01/msg00155.html
Now complains only if tmem HAS memory to relinquish and
memory request has order>0.

Signed-off by: Dan Magenheimer <dan.magenheimer@oracle.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 08 11:25:22 2010 +0000 (2010-01-08)
parents 18342df0f9dc
children a3fa6d444b25
line source
1 /******************************************************************************
2 * tmem_xen.h
3 *
4 * Xen-specific Transcendent memory
5 *
6 * Copyright (c) 2009, Dan Magenheimer, Oracle Corp.
7 */
9 #ifndef __XEN_TMEM_XEN_H__
10 #define __XEN_TMEM_XEN_H__
12 #include <xen/config.h>
13 #include <xen/mm.h> /* heap alloc/free */
14 #include <xen/xmalloc.h> /* xmalloc/xfree */
15 #include <xen/sched.h> /* struct domain */
16 #include <xen/guest_access.h> /* copy_from_guest */
17 #include <xen/hash.h> /* hash_long */
18 #include <public/tmem.h>
19 #ifdef CONFIG_COMPAT
20 #include <compat/tmem.h>
21 #endif
23 struct tmem_host_dependent_client {
24 struct domain *domain;
25 struct xmem_pool *persistent_pool;
26 };
27 typedef struct tmem_host_dependent_client tmh_client_t;
29 #define IS_PAGE_ALIGNED(addr) \
30 ((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr)
31 #define IS_VALID_PAGE(_pi) ( mfn_valid(page_to_mfn(_pi)) )
33 extern struct xmem_pool *tmh_mempool;
34 extern unsigned int tmh_mempool_maxalloc;
35 extern struct page_list_head tmh_page_list;
36 extern spinlock_t tmh_page_list_lock;
37 extern unsigned long tmh_page_list_pages;
38 extern atomic_t freeable_page_count;
40 extern spinlock_t tmem_lock;
41 extern spinlock_t tmem_spinlock;
42 extern rwlock_t tmem_rwlock;
44 extern void tmh_copy_page(char *to, char*from);
45 extern int tmh_init(void);
46 extern tmh_client_t *tmh_client_init(void);
47 extern void tmh_client_destroy(tmh_client_t *);
48 #define tmh_hash hash_long
50 extern void tmh_release_avail_pages_to_host(void);
51 extern void tmh_scrub_page(struct page_info *pi, unsigned int memflags);
53 extern int opt_tmem_compress;
54 static inline int tmh_compression_enabled(void)
55 {
56 return opt_tmem_compress;
57 }
59 extern int opt_tmem_shared_auth;
60 static inline int tmh_shared_auth(void)
61 {
62 return opt_tmem_shared_auth;
63 }
65 extern int opt_tmem;
66 static inline int tmh_enabled(void)
67 {
68 return opt_tmem;
69 }
71 extern int opt_tmem_lock;
73 extern int opt_tmem_flush_dups;
75 /*
76 * Memory free page list management
77 */
79 static inline struct page_info *tmh_page_list_get(void)
80 {
81 struct page_info *pi;
83 spin_lock(&tmh_page_list_lock);
84 if ( (pi = page_list_remove_head(&tmh_page_list)) != NULL )
85 tmh_page_list_pages--;
86 spin_unlock(&tmh_page_list_lock);
87 ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
88 return pi;
89 }
91 static inline void tmh_page_list_put(struct page_info *pi)
92 {
93 ASSERT(IS_VALID_PAGE(pi));
94 spin_lock(&tmh_page_list_lock);
95 page_list_add(pi, &tmh_page_list);
96 tmh_page_list_pages++;
97 spin_unlock(&tmh_page_list_lock);
98 }
100 static inline unsigned long tmh_avail_pages(void)
101 {
102 return tmh_page_list_pages;
103 }
105 /*
106 * Memory allocation for persistent data
107 */
109 static inline bool_t domain_fully_allocated(struct domain *d)
110 {
111 return ( d->tot_pages >= d->max_pages );
112 }
113 #define tmh_client_memory_fully_allocated(_pool) \
114 domain_fully_allocated(_pool->client->tmh->domain)
116 static inline void *_tmh_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
117 size_t size, size_t align)
118 {
119 #if 0
120 if ( d->tot_pages >= d->max_pages )
121 return NULL;
122 #endif
123 #ifdef __i386__
124 return _xmalloc(size,align);
125 #else
126 ASSERT( size < tmh_mempool_maxalloc );
127 if ( cmem_mempool == NULL )
128 return NULL;
129 return xmem_pool_alloc(size, cmem_mempool);
130 #endif
131 }
132 #define tmh_alloc_subpage_thispool(_pool, _s, _a) \
133 _tmh_alloc_subpage_thispool(pool->client->tmh->persistent_pool, \
134 _s, _a)
136 static inline void _tmh_free_subpage_thispool(struct xmem_pool *cmem_mempool,
137 void *ptr, size_t size)
138 {
139 #ifdef __i386__
140 xfree(ptr);
141 #else
142 ASSERT( size < tmh_mempool_maxalloc );
143 ASSERT( cmem_mempool != NULL );
144 xmem_pool_free(ptr,cmem_mempool);
145 #endif
146 }
147 #define tmh_free_subpage_thispool(_pool, _p, _s) \
148 _tmh_free_subpage_thispool(_pool->client->tmh->persistent_pool, _p, _s)
150 static inline struct page_info *_tmh_alloc_page_thispool(struct domain *d)
151 {
152 struct page_info *pi;
154 /* note that this tot_pages check is not protected by d->page_alloc_lock,
155 * so may race and periodically fail in donate_page or alloc_domheap_pages
156 * That's OK... neither is a problem, though chatty if log_lvl is set */
157 if ( d->tot_pages >= d->max_pages )
158 return NULL;
160 if ( tmh_page_list_pages )
161 {
162 if ( (pi = tmh_page_list_get()) != NULL )
163 {
164 if ( donate_page(d,pi,0) == 0 )
165 goto out;
166 else
167 tmh_page_list_put(pi);
168 }
169 }
171 pi = alloc_domheap_pages(d,0,MEMF_tmem);
173 out:
174 ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
175 return pi;
176 }
177 #define tmh_alloc_page_thispool(_pool) \
178 _tmh_alloc_page_thispool(_pool->client->tmh->domain)
180 static inline void _tmh_free_page_thispool(struct page_info *pi)
181 {
182 struct domain *d = page_get_owner(pi);
184 ASSERT(IS_VALID_PAGE(pi));
185 if ( (d == NULL) || steal_page(d,pi,0) == 0 )
186 tmh_page_list_put(pi);
187 else
188 {
189 scrub_one_page(pi);
190 ASSERT((pi->count_info & ~(PGC_allocated | 1)) == 0);
191 free_domheap_pages(pi,0);
192 }
193 }
194 #define tmh_free_page_thispool(_pool,_pg) \
195 _tmh_free_page_thispool(_pg)
197 /*
198 * Memory allocation for ephemeral (non-persistent) data
199 */
201 static inline void *tmh_alloc_subpage(void *pool, size_t size,
202 size_t align)
203 {
204 #ifdef __i386__
205 ASSERT( size < PAGE_SIZE );
206 return _xmalloc(size, align);
207 #else
208 ASSERT( size < tmh_mempool_maxalloc );
209 ASSERT( tmh_mempool != NULL );
210 return xmem_pool_alloc(size, tmh_mempool);
211 #endif
212 }
214 static inline void tmh_free_subpage(void *ptr, size_t size)
215 {
216 #ifdef __i386__
217 ASSERT( size < PAGE_SIZE );
218 xfree(ptr);
219 #else
220 ASSERT( size < tmh_mempool_maxalloc );
221 xmem_pool_free(ptr,tmh_mempool);
222 #endif
223 }
225 static inline struct page_info *tmh_alloc_page(void *pool, int no_heap)
226 {
227 struct page_info *pi = tmh_page_list_get();
229 if ( pi == NULL && !no_heap )
230 pi = alloc_domheap_pages(0,0,MEMF_tmem);
231 ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
232 if ( pi != NULL && !no_heap )
233 atomic_inc(&freeable_page_count);
234 return pi;
235 }
237 static inline void tmh_free_page(struct page_info *pi)
238 {
239 ASSERT(IS_VALID_PAGE(pi));
240 tmh_page_list_put(pi);
241 atomic_dec(&freeable_page_count);
242 }
244 static inline unsigned int tmem_subpage_maxsize(void)
245 {
246 return tmh_mempool_maxalloc;
247 }
249 static inline unsigned long tmh_freeable_pages(void)
250 {
251 return tmh_avail_pages() + _atomic_read(freeable_page_count);
252 }
254 static inline unsigned long tmh_free_mb(void)
255 {
256 return (tmh_avail_pages() + total_free_pages()) >> (20 - PAGE_SHIFT);
257 }
259 /*
260 * Memory allocation for "infrastructure" data
261 */
263 static inline void *tmh_alloc_infra(size_t size, size_t align)
264 {
265 return _xmalloc(size,align);
266 }
268 static inline void tmh_free_infra(void *p)
269 {
270 return xfree(p);
271 }
273 #define tmh_lock_all opt_tmem_lock
274 #define tmh_flush_dups opt_tmem_flush_dups
275 #define tmh_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
277 /* "Client" (==domain) abstraction */
279 struct client;
280 typedef domid_t cli_id_t;
281 typedef struct domain tmh_cli_ptr_t;
282 typedef struct page_info pfp_t;
284 /* this appears to be unreliable when a domain is being shut down */
285 static inline struct client *tmh_client_from_cli_id(cli_id_t cli_id)
286 {
287 struct domain *d = get_domain_by_id(cli_id); /* incs d->refcnt! */
288 if (d == NULL)
289 return NULL;
290 return (struct client *)(d->tmem);
291 }
293 static inline struct client *tmh_client_from_current(void)
294 {
295 return (struct client *)(current->domain->tmem);
296 }
298 #define tmh_client_is_dying(_client) (!!_client->tmh->domain->is_dying)
300 static inline cli_id_t tmh_get_cli_id_from_current(void)
301 {
302 return current->domain->domain_id;
303 }
305 static inline tmh_cli_ptr_t *tmh_get_cli_ptr_from_current(void)
306 {
307 return current->domain;
308 }
310 static inline void tmh_set_client_from_id(struct client *client,cli_id_t cli_id)
311 {
312 struct domain *d = get_domain_by_id(cli_id);
313 d->tmem = client;
314 }
316 static inline bool_t tmh_current_is_privileged(void)
317 {
318 return IS_PRIV(current->domain);
319 }
321 /* these typedefs are in the public/tmem.h interface
322 typedef XEN_GUEST_HANDLE(void) cli_mfn_t;
323 typedef XEN_GUEST_HANDLE(char) cli_va_t;
324 */
325 typedef XEN_GUEST_HANDLE(tmem_op_t) tmem_cli_op_t;
327 static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
328 {
329 #ifdef CONFIG_COMPAT
330 if ( is_pv_32on64_vcpu(current) )
331 {
332 int rc;
333 enum XLAT_tmem_op_u u;
334 tmem_op_compat_t cop;
336 rc = copy_from_guest(&cop, guest_handle_cast(uops, void), 1);
337 if ( rc )
338 return rc;
339 switch ( cop.cmd )
340 {
341 case TMEM_NEW_POOL: u = XLAT_tmem_op_u_new; break;
342 case TMEM_CONTROL: u = XLAT_tmem_op_u_ctrl; break;
343 case TMEM_AUTH: u = XLAT_tmem_op_u_new; break;
344 case TMEM_RESTORE_NEW:u = XLAT_tmem_op_u_new; break;
345 default: u = XLAT_tmem_op_u_gen ; break;
346 }
347 #define XLAT_tmem_op_HNDL_u_ctrl_buf(_d_, _s_) \
348 guest_from_compat_handle((_d_)->u.ctrl.buf, (_s_)->u.ctrl.buf)
349 XLAT_tmem_op(op, &cop);
350 #undef XLAT_tmem_op_HNDL_u_ctrl_buf
351 return 0;
352 }
353 #endif
354 return copy_from_guest(op, uops, 1);
355 }
357 static inline void tmh_copy_to_client_buf_offset(tmem_cli_va_t clibuf, int off,
358 char *tmembuf, int len)
359 {
360 copy_to_guest_offset(clibuf,off,tmembuf,len);
361 }
363 #define TMH_CLI_ID_NULL ((cli_id_t)((domid_t)-1L))
365 #define tmh_cli_id_str "domid"
366 #define tmh_client_str "domain"
368 extern int tmh_decompress_to_client(tmem_cli_mfn_t,void*,size_t,void*);
370 extern int tmh_compress_from_client(tmem_cli_mfn_t,void**,size_t *,void*);
372 extern int tmh_copy_from_client(pfp_t *pfp,
373 tmem_cli_mfn_t cmfn, uint32_t tmem_offset,
374 uint32_t pfn_offset, uint32_t len, void *cva);
376 extern int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t *pfp,
377 uint32_t tmem_offset, uint32_t pfn_offset, uint32_t len, void *cva);
380 #define TMEM_PERF
381 #ifdef TMEM_PERF
382 #define DECL_CYC_COUNTER(x) \
383 uint64_t x##_sum_cycles = 0, x##_count = 0; \
384 uint32_t x##_min_cycles = 0x7fffffff, x##_max_cycles = 0;
385 #define EXTERN_CYC_COUNTER(x) \
386 extern uint64_t x##_sum_cycles, x##_count; \
387 extern uint32_t x##_min_cycles, x##_max_cycles;
388 #define DECL_LOCAL_CYC_COUNTER(x) \
389 int64_t x##_start = 0
390 #define START_CYC_COUNTER(x) x##_start = get_cycles()
391 #define DUP_START_CYC_COUNTER(x,y) x##_start = y##_start
392 /* following might race, but since its advisory only, don't care */
393 #define END_CYC_COUNTER(x) \
394 do { \
395 x##_start = get_cycles() - x##_start; \
396 if (x##_start > 0 && x##_start < 1000000000) { \
397 x##_sum_cycles += x##_start; x##_count++; \
398 if ((uint32_t)x##_start < x##_min_cycles) x##_min_cycles = x##_start; \
399 if ((uint32_t)x##_start > x##_max_cycles) x##_max_cycles = x##_start; \
400 } \
401 } while (0)
402 #define END_CYC_COUNTER_CLI(x,y) \
403 do { \
404 x##_start = get_cycles() - x##_start; \
405 if (x##_start > 0 && x##_start < 1000000000) { \
406 x##_sum_cycles += x##_start; x##_count++; \
407 if ((uint32_t)x##_start < x##_min_cycles) x##_min_cycles = x##_start; \
408 if ((uint32_t)x##_start > x##_max_cycles) x##_max_cycles = x##_start; \
409 y->total_cycles += x##_start; \
410 } \
411 } while (0)
412 #define RESET_CYC_COUNTER(x) { x##_sum_cycles = 0, x##_count = 0; \
413 x##_min_cycles = 0x7fffffff, x##_max_cycles = 0; }
414 #define SCNPRINTF_CYC_COUNTER(buf,size,x,tag) \
415 scnprintf(buf,size, \
416 tag"n:%"PRIu64","tag"t:%"PRIu64","tag"x:%"PRId32","tag"m:%"PRId32",", \
417 x##_count,x##_sum_cycles,x##_max_cycles,x##_min_cycles)
418 #else
419 #define DECL_CYC_COUNTER(x)
420 #define EXTERN_CYC_COUNTER(x) \
421 extern uint64_t x##_sum_cycles, x##_count; \
422 extern uint32_t x##_min_cycles, x##_max_cycles;
423 #define DECL_LOCAL_CYC_COUNTER(x) do { } while (0)
424 #define START_CYC_COUNTER(x) do { } while (0)
425 #define DUP_START_CYC_COUNTER(x) do { } while (0)
426 #define END_CYC_COUNTER(x) do { } while (0)
427 #define SCNPRINTF_CYC_COUNTER(buf,size,x,tag) (0)
428 #define RESET_CYC_COUNTER(x) do { } while (0)
429 #endif
431 #endif /* __XEN_TMEM_XEN_H__ */