debuggers.hg
changeset 22904:9a6458e0c3f5
libxc: maintain a small, per-handle, cache of hypercall buffer memory
Constantly m(un)locking memory can have significant overhead on
systems with large numbers of CPUs. This was previously fixed by
20841:fbe8f32fa257 but this was dropped during the transition to
hypercall buffers.
Introduce a small cache of single page hypercall buffer allocations
which can be resused to avoid this overhead.
Add some statistics tracking to the hypercall buffer allocations.
The cache size of 4 was chosen based on these statistics since they
indicated that 2 pages was sufficient to satisfy all concurrent single
page hypercall buffer allocations seen during "xl create", "xl
shutdown" and "xl destroy" of both a PV and HVM guest therefore 4
pages should cover the majority of important cases.
This fixes http://bugzilla.xensource.com/bugzilla/show_bug.cgi?id=1719.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Reported-by: Zheng, Shaohui <shaohui.zheng@intel.com>
Tested-by: Haitao Shan <maillists.shan@gmail.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
Constantly m(un)locking memory can have significant overhead on
systems with large numbers of CPUs. This was previously fixed by
20841:fbe8f32fa257 but this was dropped during the transition to
hypercall buffers.
Introduce a small cache of single page hypercall buffer allocations
which can be resused to avoid this overhead.
Add some statistics tracking to the hypercall buffer allocations.
The cache size of 4 was chosen based on these statistics since they
indicated that 2 pages was sufficient to satisfy all concurrent single
page hypercall buffer allocations seen during "xl create", "xl
shutdown" and "xl destroy" of both a PV and HVM guest therefore 4
pages should cover the majority of important cases.
This fixes http://bugzilla.xensource.com/bugzilla/show_bug.cgi?id=1719.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Reported-by: Zheng, Shaohui <shaohui.zheng@intel.com>
Tested-by: Haitao Shan <maillists.shan@gmail.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author | Ian Campbell <ian.campbell@citrix.com> |
---|---|
date | Tue Feb 01 19:26:36 2011 +0000 (2011-02-01) |
parents | 74cd0f668546 |
children | 842ff5b82889 |
files | tools/libxc/xc_hcall_buf.c tools/libxc/xc_private.c tools/libxc/xc_private.h |
line diff
1.1 --- a/tools/libxc/xc_hcall_buf.c Tue Feb 01 19:25:08 2011 +0000 1.2 +++ b/tools/libxc/xc_hcall_buf.c Tue Feb 01 19:26:36 2011 +0000 1.3 @@ -18,6 +18,7 @@ 1.4 1.5 #include <stdlib.h> 1.6 #include <malloc.h> 1.7 +#include <pthread.h> 1.8 1.9 #include "xc_private.h" 1.10 #include "xg_private.h" 1.11 @@ -28,31 +29,137 @@ xc_hypercall_buffer_t XC__HYPERCALL_BUFF 1.12 HYPERCALL_BUFFER_INIT_NO_BOUNCE 1.13 }; 1.14 1.15 +pthread_mutex_t hypercall_buffer_cache_mutex = PTHREAD_MUTEX_INITIALIZER; 1.16 + 1.17 +static void hypercall_buffer_cache_lock(xc_interface *xch) 1.18 +{ 1.19 + if ( xch->flags & XC_OPENFLAG_NON_REENTRANT ) 1.20 + return; 1.21 + pthread_mutex_lock(&hypercall_buffer_cache_mutex); 1.22 +} 1.23 + 1.24 +static void hypercall_buffer_cache_unlock(xc_interface *xch) 1.25 +{ 1.26 + if ( xch->flags & XC_OPENFLAG_NON_REENTRANT ) 1.27 + return; 1.28 + pthread_mutex_unlock(&hypercall_buffer_cache_mutex); 1.29 +} 1.30 + 1.31 +static void *hypercall_buffer_cache_alloc(xc_interface *xch, int nr_pages) 1.32 +{ 1.33 + void *p = NULL; 1.34 + 1.35 + hypercall_buffer_cache_lock(xch); 1.36 + 1.37 + xch->hypercall_buffer_total_allocations++; 1.38 + xch->hypercall_buffer_current_allocations++; 1.39 + if ( xch->hypercall_buffer_current_allocations > xch->hypercall_buffer_maximum_allocations ) 1.40 + xch->hypercall_buffer_maximum_allocations = xch->hypercall_buffer_current_allocations; 1.41 + 1.42 + if ( nr_pages > 1 ) 1.43 + { 1.44 + xch->hypercall_buffer_cache_toobig++; 1.45 + } 1.46 + else if ( xch->hypercall_buffer_cache_nr > 0 ) 1.47 + { 1.48 + p = xch->hypercall_buffer_cache[--xch->hypercall_buffer_cache_nr]; 1.49 + xch->hypercall_buffer_cache_hits++; 1.50 + } 1.51 + else 1.52 + { 1.53 + xch->hypercall_buffer_cache_misses++; 1.54 + } 1.55 + 1.56 + hypercall_buffer_cache_unlock(xch); 1.57 + 1.58 + return p; 1.59 +} 1.60 + 1.61 +static int hypercall_buffer_cache_free(xc_interface *xch, void *p, int nr_pages) 1.62 +{ 1.63 + int rc = 0; 1.64 + 1.65 + hypercall_buffer_cache_lock(xch); 1.66 + 1.67 + xch->hypercall_buffer_total_releases++; 1.68 + xch->hypercall_buffer_current_allocations--; 1.69 + 1.70 + if ( nr_pages == 1 && xch->hypercall_buffer_cache_nr < HYPERCALL_BUFFER_CACHE_SIZE ) 1.71 + { 1.72 + xch->hypercall_buffer_cache[xch->hypercall_buffer_cache_nr++] = p; 1.73 + rc = 1; 1.74 + } 1.75 + 1.76 + hypercall_buffer_cache_unlock(xch); 1.77 + 1.78 + return rc; 1.79 +} 1.80 + 1.81 +static void do_hypercall_buffer_free_pages(void *ptr, int nr_pages) 1.82 +{ 1.83 +#ifndef __sun__ 1.84 + (void) munlock(ptr, nr_pages * PAGE_SIZE); 1.85 +#endif 1.86 + 1.87 + free(ptr); 1.88 +} 1.89 + 1.90 +void xc__hypercall_buffer_cache_release(xc_interface *xch) 1.91 +{ 1.92 + void *p; 1.93 + 1.94 + hypercall_buffer_cache_lock(xch); 1.95 + 1.96 + DBGPRINTF("hypercall buffer: total allocations:%d total releases:%d", 1.97 + xch->hypercall_buffer_total_allocations, 1.98 + xch->hypercall_buffer_total_releases); 1.99 + DBGPRINTF("hypercall buffer: current allocations:%d maximum allocations:%d", 1.100 + xch->hypercall_buffer_current_allocations, 1.101 + xch->hypercall_buffer_maximum_allocations); 1.102 + DBGPRINTF("hypercall buffer: cache current size:%d", 1.103 + xch->hypercall_buffer_cache_nr); 1.104 + DBGPRINTF("hypercall buffer: cache hits:%d misses:%d toobig:%d", 1.105 + xch->hypercall_buffer_cache_hits, 1.106 + xch->hypercall_buffer_cache_misses, 1.107 + xch->hypercall_buffer_cache_toobig); 1.108 + 1.109 + while ( xch->hypercall_buffer_cache_nr > 0 ) 1.110 + { 1.111 + p = xch->hypercall_buffer_cache[--xch->hypercall_buffer_cache_nr]; 1.112 + do_hypercall_buffer_free_pages(p, 1); 1.113 + } 1.114 + 1.115 + hypercall_buffer_cache_unlock(xch); 1.116 +} 1.117 + 1.118 void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages) 1.119 { 1.120 size_t size = nr_pages * PAGE_SIZE; 1.121 - void *p; 1.122 + void *p = hypercall_buffer_cache_alloc(xch, nr_pages); 1.123 + 1.124 + if ( !p ) { 1.125 #if defined(_POSIX_C_SOURCE) && !defined(__sun__) 1.126 - int ret; 1.127 - ret = posix_memalign(&p, PAGE_SIZE, size); 1.128 - if (ret != 0) 1.129 - return NULL; 1.130 + int ret; 1.131 + ret = posix_memalign(&p, PAGE_SIZE, size); 1.132 + if (ret != 0) 1.133 + return NULL; 1.134 #elif defined(__NetBSD__) || defined(__OpenBSD__) 1.135 - p = valloc(size); 1.136 + p = valloc(size); 1.137 #else 1.138 - p = memalign(PAGE_SIZE, size); 1.139 + p = memalign(PAGE_SIZE, size); 1.140 #endif 1.141 1.142 - if (!p) 1.143 - return NULL; 1.144 + if (!p) 1.145 + return NULL; 1.146 1.147 #ifndef __sun__ 1.148 - if ( mlock(p, size) < 0 ) 1.149 - { 1.150 - free(p); 1.151 - return NULL; 1.152 + if ( mlock(p, size) < 0 ) 1.153 + { 1.154 + free(p); 1.155 + return NULL; 1.156 + } 1.157 +#endif 1.158 } 1.159 -#endif 1.160 1.161 b->hbuf = p; 1.162 1.163 @@ -65,11 +172,8 @@ void xc__hypercall_buffer_free_pages(xc_ 1.164 if ( b->hbuf == NULL ) 1.165 return; 1.166 1.167 -#ifndef __sun__ 1.168 - (void) munlock(b->hbuf, nr_pages * PAGE_SIZE); 1.169 -#endif 1.170 - 1.171 - free(b->hbuf); 1.172 + if ( !hypercall_buffer_cache_free(xch, b->hbuf, nr_pages) ) 1.173 + do_hypercall_buffer_free_pages(b->hbuf, nr_pages); 1.174 } 1.175 1.176 struct allocation_header {
2.1 --- a/tools/libxc/xc_private.c Tue Feb 01 19:25:08 2011 +0000 2.2 +++ b/tools/libxc/xc_private.c Tue Feb 01 19:26:36 2011 +0000 2.3 @@ -126,6 +126,16 @@ static struct xc_interface_core *xc_inte 2.4 xch->error_handler = logger; xch->error_handler_tofree = 0; 2.5 xch->dombuild_logger = dombuild_logger; xch->dombuild_logger_tofree = 0; 2.6 2.7 + xch->hypercall_buffer_cache_nr = 0; 2.8 + 2.9 + xch->hypercall_buffer_total_allocations = 0; 2.10 + xch->hypercall_buffer_total_releases = 0; 2.11 + xch->hypercall_buffer_current_allocations = 0; 2.12 + xch->hypercall_buffer_maximum_allocations = 0; 2.13 + xch->hypercall_buffer_cache_hits = 0; 2.14 + xch->hypercall_buffer_cache_misses = 0; 2.15 + xch->hypercall_buffer_cache_toobig = 0; 2.16 + 2.17 xch->ops_handle = XC_OSDEP_OPEN_ERROR; 2.18 xch->ops = NULL; 2.19 2.20 @@ -172,6 +182,8 @@ static int xc_interface_close_common(xc_ 2.21 { 2.22 int rc = 0; 2.23 2.24 + xc__hypercall_buffer_cache_release(xch); 2.25 + 2.26 xtl_logger_destroy(xch->dombuild_logger_tofree); 2.27 xtl_logger_destroy(xch->error_handler_tofree); 2.28
3.1 --- a/tools/libxc/xc_private.h Tue Feb 01 19:25:08 2011 +0000 3.2 +++ b/tools/libxc/xc_private.h Tue Feb 01 19:26:36 2011 +0000 3.3 @@ -75,6 +75,28 @@ struct xc_interface_core { 3.4 FILE *dombuild_logger_file; 3.5 const char *currently_progress_reporting; 3.6 3.7 + /* 3.8 + * A simple cache of unused, single page, hypercall buffers 3.9 + * 3.10 + * Protected by a global lock. 3.11 + */ 3.12 +#define HYPERCALL_BUFFER_CACHE_SIZE 4 3.13 + int hypercall_buffer_cache_nr; 3.14 + void *hypercall_buffer_cache[HYPERCALL_BUFFER_CACHE_SIZE]; 3.15 + 3.16 + /* 3.17 + * Hypercall buffer statistics. All protected by the global 3.18 + * hypercall_buffer_cache lock. 3.19 + */ 3.20 + int hypercall_buffer_total_allocations; 3.21 + int hypercall_buffer_total_releases; 3.22 + int hypercall_buffer_current_allocations; 3.23 + int hypercall_buffer_maximum_allocations; 3.24 + int hypercall_buffer_cache_hits; 3.25 + int hypercall_buffer_cache_misses; 3.26 + int hypercall_buffer_cache_toobig; 3.27 + 3.28 + /* Low lovel OS interface */ 3.29 xc_osdep_info_t osdep; 3.30 xc_osdep_ops *ops; /* backend operations */ 3.31 xc_osdep_handle ops_handle; /* opaque data for xc_osdep_ops */ 3.32 @@ -158,6 +180,11 @@ void xc__hypercall_bounce_post(xc_interf 3.33 #define xc_hypercall_bounce_post(_xch, _name) xc__hypercall_bounce_post(_xch, HYPERCALL_BUFFER(_name)) 3.34 3.35 /* 3.36 + * Release hypercall buffer cache 3.37 + */ 3.38 +void xc__hypercall_buffer_cache_release(xc_interface *xch); 3.39 + 3.40 +/* 3.41 * Hypercall interfaces. 3.42 */ 3.43