debuggers.hg
changeset 22396:477dda47486e
tools: change cpumaps to uint8_t
Cpumap types in tools (libxc and libxl) are changed to be based on bytes like
in the interface to the hypervisor.
To make handling easier the size of used cpumaps is always based on the
number of physical cpus supported by the hypervisor. This eliminates the need
to keep track of the cpumap size in external interfaces.
In libxl a macro for cycling through a cpumap is added (libxl_for_each_cpu).
Interfaces changed:
libxl_set_vcpuaffinity()
libxl_cpumap_alloc()
xc_vcpu_setaffinity()
xc_vcpu_getaffinity()
xc_cpupool_freeinfo()
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Cpumap types in tools (libxc and libxl) are changed to be based on bytes like
in the interface to the hypervisor.
To make handling easier the size of used cpumaps is always based on the
number of physical cpus supported by the hypervisor. This eliminates the need
to keep track of the cpumap size in external interfaces.
In libxl a macro for cycling through a cpumap is added (libxl_for_each_cpu).
Interfaces changed:
libxl_set_vcpuaffinity()
libxl_cpumap_alloc()
xc_vcpu_setaffinity()
xc_vcpu_getaffinity()
xc_cpupool_freeinfo()
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
author | Juergen Gross <juergen.gross@ts.fujitsu.com> |
---|---|
date | Wed Nov 03 12:10:46 2010 +0000 (2010-11-03) |
parents | 3f98881703aa |
children | 66c0e432398f |
files | tools/libxc/xc_cpupool.c tools/libxc/xc_domain.c tools/libxc/xc_misc.c tools/libxc/xenctrl.h tools/libxl/libxl.c tools/libxl/libxl.h tools/libxl/libxl_utils.c tools/libxl/libxl_utils.h tools/libxl/xl_cmdimpl.c tools/python/xen/lowlevel/xc/xc.c |
line diff
1.1 --- a/tools/libxc/xc_cpupool.c Wed Nov 03 11:58:25 2010 +0000 1.2 +++ b/tools/libxc/xc_cpupool.c Wed Nov 03 12:10:46 2010 +0000 1.3 @@ -34,11 +34,6 @@ static int do_sysctl_save(xc_interface * 1.4 return ret; 1.5 } 1.6 1.7 -static int get_cpumap_size(xc_interface *xch) 1.8 -{ 1.9 - return (xc_get_max_cpus(xch) + 7) / 8; 1.10 -} 1.11 - 1.12 int xc_cpupool_create(xc_interface *xch, 1.13 uint32_t *ppoolid, 1.14 uint32_t sched_id) 1.15 @@ -75,12 +70,10 @@ xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_ 1.16 int err = 0; 1.17 xc_cpupoolinfo_t *info = NULL; 1.18 int local_size; 1.19 - int cpumap_size; 1.20 - int size; 1.21 DECLARE_SYSCTL; 1.22 DECLARE_HYPERCALL_BUFFER(uint8_t, local); 1.23 1.24 - local_size = get_cpumap_size(xch); 1.25 + local_size = xc_get_cpumap_size(xch); 1.26 if (!local_size) 1.27 { 1.28 PERROR("Could not get number of cpus"); 1.29 @@ -93,9 +86,6 @@ xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_ 1.30 return NULL; 1.31 } 1.32 1.33 - cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap); 1.34 - size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap); 1.35 - 1.36 sysctl.cmd = XEN_SYSCTL_cpupool_op; 1.37 sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; 1.38 sysctl.u.cpupool_op.cpupool_id = poolid; 1.39 @@ -107,18 +97,19 @@ xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_ 1.40 if ( err < 0 ) 1.41 goto out; 1.42 1.43 - info = malloc(size); 1.44 + info = calloc(1, sizeof(xc_cpupoolinfo_t)); 1.45 if ( !info ) 1.46 goto out; 1.47 1.48 - memset(info, 0, size); 1.49 - info->cpumap_size = local_size * 8; 1.50 - info->cpumap = (uint64_t *)(info + 1); 1.51 - 1.52 + info->cpumap = xc_cpumap_alloc(xch); 1.53 + if (!info->cpumap) { 1.54 + free(info); 1.55 + goto out; 1.56 + } 1.57 info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; 1.58 info->sched_id = sysctl.u.cpupool_op.sched_id; 1.59 info->n_dom = sysctl.u.cpupool_op.n_dom; 1.60 - bitmap_byte_to_64(info->cpumap, local, local_size * 8); 1.61 + memcpy(info->cpumap, local, local_size); 1.62 1.63 out: 1.64 xc_hypercall_buffer_free(xch, local); 1.65 @@ -126,6 +117,13 @@ out: 1.66 return info; 1.67 } 1.68 1.69 +void xc_cpupool_infofree(xc_interface *xch, 1.70 + xc_cpupoolinfo_t *info) 1.71 +{ 1.72 + free(info->cpumap); 1.73 + free(info); 1.74 +} 1.75 + 1.76 int xc_cpupool_addcpu(xc_interface *xch, 1.77 uint32_t poolid, 1.78 int cpu) 1.79 @@ -165,19 +163,19 @@ int xc_cpupool_movedomain(xc_interface * 1.80 return do_sysctl_save(xch, &sysctl); 1.81 } 1.82 1.83 -uint64_t * xc_cpupool_freeinfo(xc_interface *xch, 1.84 - int *cpusize) 1.85 +xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch) 1.86 { 1.87 int err = -1; 1.88 - uint64_t *cpumap = NULL; 1.89 + xc_cpumap_t cpumap = NULL; 1.90 + int mapsize; 1.91 DECLARE_SYSCTL; 1.92 DECLARE_HYPERCALL_BUFFER(uint8_t, local); 1.93 1.94 - *cpusize = get_cpumap_size(xch); 1.95 - if (*cpusize == 0) 1.96 + mapsize = xc_get_cpumap_size(xch); 1.97 + if (mapsize == 0) 1.98 return NULL; 1.99 1.100 - local = xc_hypercall_buffer_alloc(xch, local, *cpusize); 1.101 + local = xc_hypercall_buffer_alloc(xch, local, mapsize); 1.102 if ( local == NULL ) { 1.103 PERROR("Could not allocate locked memory for xc_cpupool_freeinfo"); 1.104 return NULL; 1.105 @@ -186,18 +184,18 @@ uint64_t * xc_cpupool_freeinfo(xc_interf 1.106 sysctl.cmd = XEN_SYSCTL_cpupool_op; 1.107 sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO; 1.108 set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); 1.109 - sysctl.u.cpupool_op.cpumap.nr_cpus = *cpusize * 8; 1.110 + sysctl.u.cpupool_op.cpumap.nr_cpus = mapsize * 8; 1.111 1.112 err = do_sysctl_save(xch, &sysctl); 1.113 1.114 if ( err < 0 ) 1.115 goto out; 1.116 1.117 - cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), sizeof(*cpumap)); 1.118 + cpumap = xc_cpumap_alloc(xch); 1.119 if (cpumap == NULL) 1.120 goto out; 1.121 1.122 - bitmap_byte_to_64(cpumap, local, *cpusize * 8); 1.123 + memcpy(cpumap, local, mapsize); 1.124 1.125 out: 1.126 xc_hypercall_buffer_free(xch, local);
2.1 --- a/tools/libxc/xc_domain.c Wed Nov 03 11:58:25 2010 +0000 2.2 +++ b/tools/libxc/xc_domain.c Wed Nov 03 12:10:46 2010 +0000 2.3 @@ -113,11 +113,19 @@ int xc_domain_shutdown(xc_interface *xch 2.4 int xc_vcpu_setaffinity(xc_interface *xch, 2.5 uint32_t domid, 2.6 int vcpu, 2.7 - uint64_t *cpumap, int cpusize) 2.8 + xc_cpumap_t cpumap) 2.9 { 2.10 DECLARE_DOMCTL; 2.11 DECLARE_HYPERCALL_BUFFER(uint8_t, local); 2.12 int ret = -1; 2.13 + int cpusize; 2.14 + 2.15 + cpusize = xc_get_cpumap_size(xch); 2.16 + if (!cpusize) 2.17 + { 2.18 + PERROR("Could not get number of cpus"); 2.19 + goto out; 2.20 + } 2.21 2.22 local = xc_hypercall_buffer_alloc(xch, local, cpusize); 2.23 if ( local == NULL ) 2.24 @@ -130,7 +138,7 @@ int xc_vcpu_setaffinity(xc_interface *xc 2.25 domctl.domain = (domid_t)domid; 2.26 domctl.u.vcpuaffinity.vcpu = vcpu; 2.27 2.28 - bitmap_64_to_byte(local, cpumap, cpusize * 8); 2.29 + memcpy(local, cpumap, cpusize); 2.30 2.31 set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); 2.32 2.33 @@ -148,14 +156,22 @@ int xc_vcpu_setaffinity(xc_interface *xc 2.34 int xc_vcpu_getaffinity(xc_interface *xch, 2.35 uint32_t domid, 2.36 int vcpu, 2.37 - uint64_t *cpumap, int cpusize) 2.38 + xc_cpumap_t cpumap) 2.39 { 2.40 DECLARE_DOMCTL; 2.41 DECLARE_HYPERCALL_BUFFER(uint8_t, local); 2.42 int ret = -1; 2.43 + int cpusize; 2.44 + 2.45 + cpusize = xc_get_cpumap_size(xch); 2.46 + if (!cpusize) 2.47 + { 2.48 + PERROR("Could not get number of cpus"); 2.49 + goto out; 2.50 + } 2.51 2.52 local = xc_hypercall_buffer_alloc(xch, local, cpusize); 2.53 - if(local == NULL) 2.54 + if (local == NULL) 2.55 { 2.56 PERROR("Could not allocate memory for getvcpuaffinity domctl hypercall"); 2.57 goto out; 2.58 @@ -170,7 +186,7 @@ int xc_vcpu_getaffinity(xc_interface *xc 2.59 2.60 ret = do_domctl(xch, &domctl); 2.61 2.62 - bitmap_byte_to_64(cpumap, local, cpusize * 8); 2.63 + memcpy(cpumap, local, cpusize); 2.64 2.65 xc_hypercall_buffer_free(xch, local); 2.66 out:
3.1 --- a/tools/libxc/xc_misc.c Wed Nov 03 11:58:25 2010 +0000 3.2 +++ b/tools/libxc/xc_misc.c Wed Nov 03 12:10:46 2010 +0000 3.3 @@ -35,6 +35,21 @@ int xc_get_max_cpus(xc_interface *xch) 3.4 return max_cpus; 3.5 } 3.6 3.7 +int xc_get_cpumap_size(xc_interface *xch) 3.8 +{ 3.9 + return (xc_get_max_cpus(xch) + 7) / 8; 3.10 +} 3.11 + 3.12 +xc_cpumap_t xc_cpumap_alloc(xc_interface *xch) 3.13 +{ 3.14 + int sz; 3.15 + 3.16 + sz = xc_get_cpumap_size(xch); 3.17 + if (sz == 0) 3.18 + return NULL; 3.19 + return calloc(1, sz); 3.20 +} 3.21 + 3.22 int xc_readconsolering(xc_interface *xch, 3.23 char *buffer, 3.24 unsigned int *pnr_chars,
4.1 --- a/tools/libxc/xenctrl.h Wed Nov 03 11:58:25 2010 +0000 4.2 +++ b/tools/libxc/xenctrl.h Wed Nov 03 12:10:46 2010 +0000 4.3 @@ -281,6 +281,20 @@ void xc__hypercall_buffer_free_pages(xc_ 4.4 #define xc_hypercall_buffer_free_pages(_xch, _name, _nr) xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name), _nr) 4.5 4.6 /* 4.7 + * CPUMAP handling 4.8 + */ 4.9 +typedef uint8_t *xc_cpumap_t; 4.10 + 4.11 +/* return maximum number of cpus the hypervisor supports */ 4.12 +int xc_get_max_cpus(xc_interface *xch); 4.13 + 4.14 +/* return array size for cpumap */ 4.15 +int xc_get_cpumap_size(xc_interface *xch); 4.16 + 4.17 +/* allocate a cpumap */ 4.18 +xc_cpumap_t xc_cpumap_alloc(xc_interface *xch); 4.19 + 4.20 +/* 4.21 * DOMAIN DEBUGGING FUNCTIONS 4.22 */ 4.23 4.24 @@ -348,9 +362,6 @@ typedef union 4.25 } start_info_any_t; 4.26 4.27 4.28 -/* return maximum number of cpus the hypervisor supports */ 4.29 -int xc_get_max_cpus(xc_interface *xch); 4.30 - 4.31 int xc_domain_create(xc_interface *xch, 4.32 uint32_t ssidref, 4.33 xen_domain_handle_t handle, 4.34 @@ -462,13 +473,11 @@ int xc_watchdog(xc_interface *xch, 4.35 int xc_vcpu_setaffinity(xc_interface *xch, 4.36 uint32_t domid, 4.37 int vcpu, 4.38 - uint64_t *cpumap, 4.39 - int cpusize); 4.40 + xc_cpumap_t cpumap); 4.41 int xc_vcpu_getaffinity(xc_interface *xch, 4.42 uint32_t domid, 4.43 int vcpu, 4.44 - uint64_t *cpumap, 4.45 - int cpusize); 4.46 + xc_cpumap_t cpumap); 4.47 4.48 /** 4.49 * This function will return information about one or more domains. It is 4.50 @@ -670,8 +679,7 @@ typedef struct xc_cpupoolinfo { 4.51 uint32_t cpupool_id; 4.52 uint32_t sched_id; 4.53 uint32_t n_dom; 4.54 - uint32_t cpumap_size; /* max number of cpus in map */ 4.55 - uint64_t *cpumap; 4.56 + xc_cpumap_t cpumap; 4.57 } xc_cpupoolinfo_t; 4.58 4.59 /** 4.60 @@ -701,12 +709,20 @@ int xc_cpupool_destroy(xc_interface *xch 4.61 * starting at the given id. 4.62 * @parm xc_handle a handle to an open hypervisor interface 4.63 * @parm poolid lowest id for which info is returned 4.64 - * return cpupool info ptr (obtained by malloc) 4.65 + * return cpupool info ptr (to be freed via xc_cpupool_infofree) 4.66 */ 4.67 xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 4.68 uint32_t poolid); 4.69 4.70 /** 4.71 + * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo. 4.72 + * @parm xc_handle a handle to an open hypervisor interface 4.73 + * @parm info area to free 4.74 + */ 4.75 +void xc_cpupool_infofree(xc_interface *xch, 4.76 + xc_cpupoolinfo_t *info); 4.77 + 4.78 +/** 4.79 * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned. 4.80 * 4.81 * @parm xc_handle a handle to an open hypervisor interface 4.82 @@ -746,11 +762,9 @@ int xc_cpupool_movedomain(xc_interface * 4.83 * Return map of cpus not in any cpupool. 4.84 * 4.85 * @parm xc_handle a handle to an open hypervisor interface 4.86 - * @parm cpusize where to store array size in bytes 4.87 * return cpumap array on success, NULL else 4.88 */ 4.89 -uint64_t *xc_cpupool_freeinfo(xc_interface *xch, 4.90 - int *cpusize); 4.91 +xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch); 4.92 4.93 4.94 /*
5.1 --- a/tools/libxl/libxl.c Wed Nov 03 11:58:25 2010 +0000 5.2 +++ b/tools/libxl/libxl.c Wed Nov 03 12:10:46 2010 +0000 5.3 @@ -610,16 +610,11 @@ int libxl_domain_info(libxl_ctx *ctx, li 5.4 libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool) 5.5 { 5.6 libxl_cpupoolinfo *ptr, *tmp; 5.7 - int i, m, ncpu; 5.8 + int i; 5.9 xc_cpupoolinfo_t *info; 5.10 uint32_t poolid; 5.11 5.12 ptr = NULL; 5.13 - ncpu = xc_get_max_cpus(ctx->xch); 5.14 - if (!ncpu) { 5.15 - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting max cpu number"); 5.16 - return NULL; 5.17 - } 5.18 5.19 poolid = 0; 5.20 for (i = 0;; i++) { 5.21 @@ -630,19 +625,20 @@ libxl_cpupoolinfo * libxl_list_cpupool(l 5.22 if (!tmp) { 5.23 LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info"); 5.24 free(ptr); 5.25 + xc_cpupool_infofree(ctx->xch, info); 5.26 return NULL; 5.27 } 5.28 ptr = tmp; 5.29 ptr[i].poolid = info->cpupool_id; 5.30 ptr[i].sched_id = info->sched_id; 5.31 ptr[i].n_dom = info->n_dom; 5.32 - if (libxl_cpumap_alloc(&ptr[i].cpumap, ncpu)) 5.33 + if (libxl_cpumap_alloc(ctx, &ptr[i].cpumap)) { 5.34 + xc_cpupool_infofree(ctx->xch, info); 5.35 break; 5.36 - for (m = 0; m < ptr[i].cpumap.size / sizeof(*ptr[i].cpumap.map); m++) 5.37 - ptr[i].cpumap.map[m] = (info->cpumap_size > (m * sizeof(*ptr[i].cpumap.map))) ? 5.38 - info->cpumap[m] : 0; 5.39 + } 5.40 + memcpy(ptr[i].cpumap.map, info->cpumap, ptr[i].cpumap.size); 5.41 poolid = info->cpupool_id + 1; 5.42 - free(info); 5.43 + xc_cpupool_infofree(ctx->xch, info); 5.44 } 5.45 5.46 *nb_pool = i; 5.47 @@ -3229,14 +3225,14 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct 5.48 LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting infolist"); 5.49 return NULL; 5.50 } 5.51 - *nrcpus = xc_get_max_cpus(ctx->xch); 5.52 + *nrcpus = libxl_get_max_cpus(ctx); 5.53 ret = ptr = calloc(domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo)); 5.54 if (!ptr) { 5.55 return NULL; 5.56 } 5.57 5.58 for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) { 5.59 - if (libxl_cpumap_alloc(&ptr->cpumap, *nrcpus)) { 5.60 + if (libxl_cpumap_alloc(ctx, &ptr->cpumap)) { 5.61 LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpumap"); 5.62 return NULL; 5.63 } 5.64 @@ -3244,8 +3240,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct 5.65 LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu info"); 5.66 return NULL; 5.67 } 5.68 - if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, 5.69 - ptr->cpumap.map, ((*nrcpus) + 7) / 8) == -1) { 5.70 + if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, ptr->cpumap.map) == -1) { 5.71 LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity"); 5.72 return NULL; 5.73 } 5.74 @@ -3260,9 +3255,9 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct 5.75 } 5.76 5.77 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, 5.78 - uint64_t *cpumap, int nrcpus) 5.79 + libxl_cpumap *cpumap) 5.80 { 5.81 - if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap, (nrcpus + 7) / 8)) { 5.82 + if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) { 5.83 LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity"); 5.84 return ERROR_FAIL; 5.85 } 5.86 @@ -3933,7 +3928,11 @@ int libxl_get_freecpus(libxl_ctx *ctx, l 5.87 { 5.88 int ncpus; 5.89 5.90 - cpumap->map = xc_cpupool_freeinfo(ctx->xch, &ncpus); 5.91 + ncpus = libxl_get_max_cpus(ctx); 5.92 + if (ncpus == 0) 5.93 + return ERROR_FAIL; 5.94 + 5.95 + cpumap->map = xc_cpupool_freeinfo(ctx->xch); 5.96 if (cpumap->map == NULL) 5.97 return ERROR_FAIL; 5.98 5.99 @@ -3963,8 +3962,8 @@ int libxl_create_cpupool(libxl_ctx *ctx, 5.100 return ERROR_FAIL; 5.101 } 5.102 5.103 - for (i = 0; i < cpumap.size * 8; i++) 5.104 - if (cpumap.map[i / 64] & (1L << (i % 64))) { 5.105 + libxl_for_each_cpu(i, cpumap) 5.106 + if (libxl_cpumap_test(&cpumap, i)) { 5.107 rc = xc_cpupool_addcpu(ctx->xch, *poolid, i); 5.108 if (rc) { 5.109 LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, 5.110 @@ -3996,6 +3995,7 @@ int libxl_destroy_cpupool(libxl_ctx *ctx 5.111 int rc, i; 5.112 xc_cpupoolinfo_t *info; 5.113 xs_transaction_t t; 5.114 + libxl_cpumap cpumap; 5.115 5.116 info = xc_cpupool_getinfo(ctx->xch, poolid); 5.117 if (info == NULL) 5.118 @@ -4005,14 +4005,19 @@ int libxl_destroy_cpupool(libxl_ctx *ctx 5.119 if ((info->cpupool_id != poolid) || (info->n_dom)) 5.120 goto out; 5.121 5.122 - for (i = 0; i < info->cpumap_size; i++) 5.123 - if (info->cpumap[i / 64] & (1L << (i % 64))) { 5.124 + rc = ERROR_NOMEM; 5.125 + if (libxl_cpumap_alloc(ctx, &cpumap)) 5.126 + goto out; 5.127 + 5.128 + memcpy(cpumap.map, info->cpumap, cpumap.size); 5.129 + libxl_for_each_cpu(i, cpumap) 5.130 + if (libxl_cpumap_test(&cpumap, i)) { 5.131 rc = xc_cpupool_removecpu(ctx->xch, poolid, i); 5.132 if (rc) { 5.133 LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, 5.134 "Error removing cpu from cpupool"); 5.135 rc = ERROR_FAIL; 5.136 - goto out; 5.137 + goto out1; 5.138 } 5.139 } 5.140 5.141 @@ -4020,7 +4025,7 @@ int libxl_destroy_cpupool(libxl_ctx *ctx 5.142 if (rc) { 5.143 LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Could not destroy cpupool"); 5.144 rc = ERROR_FAIL; 5.145 - goto out; 5.146 + goto out1; 5.147 } 5.148 5.149 for (;;) { 5.150 @@ -4034,8 +4039,10 @@ int libxl_destroy_cpupool(libxl_ctx *ctx 5.151 5.152 rc = 0; 5.153 5.154 +out1: 5.155 + libxl_cpumap_destroy(&cpumap); 5.156 out: 5.157 - free(info); 5.158 + xc_cpupool_infofree(ctx->xch, info); 5.159 5.160 return rc; 5.161 }
6.1 --- a/tools/libxl/libxl.h Wed Nov 03 11:58:25 2010 +0000 6.2 +++ b/tools/libxl/libxl.h Wed Nov 03 12:10:46 2010 +0000 6.3 @@ -145,7 +145,7 @@ typedef uint32_t libxl_hwcap[8]; 6.4 6.5 typedef struct { 6.6 uint32_t size; /* number of bytes in map */ 6.7 - uint64_t *map; 6.8 + uint8_t *map; 6.9 } libxl_cpumap; 6.10 void libxl_cpumap_destroy(libxl_cpumap *map); 6.11 6.12 @@ -464,7 +464,7 @@ int libxl_get_physinfo(libxl_ctx *ctx, l 6.13 libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid, 6.14 int *nb_vcpu, int *nrcpus); 6.15 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, 6.16 - uint64_t *cpumap, int nrcpus); 6.17 + libxl_cpumap *cpumap); 6.18 int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, uint32_t bitmask); 6.19 6.20 int libxl_get_sched_id(libxl_ctx *ctx);
7.1 --- a/tools/libxl/libxl_utils.c Wed Nov 03 11:58:25 2010 +0000 7.2 +++ b/tools/libxl/libxl_utils.c Wed Nov 03 12:10:46 2010 +0000 7.3 @@ -708,15 +708,20 @@ out: 7.4 return rc; 7.5 } 7.6 7.7 -int libxl_cpumap_alloc(libxl_cpumap *cpumap, int max_cpus) 7.8 +int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap) 7.9 { 7.10 - int elems; 7.11 + int max_cpus; 7.12 + int sz; 7.13 7.14 - elems = (max_cpus + 63) / 64; 7.15 - cpumap->map = calloc(elems, sizeof(*cpumap->map)); 7.16 + max_cpus = libxl_get_max_cpus(ctx); 7.17 + if (max_cpus == 0) 7.18 + return ERROR_FAIL; 7.19 + 7.20 + sz = (max_cpus + 7) / 8; 7.21 + cpumap->map = calloc(sz, sizeof(*cpumap->map)); 7.22 if (!cpumap->map) 7.23 return ERROR_NOMEM; 7.24 - cpumap->size = elems * 8; /* size in bytes */ 7.25 + cpumap->size = sz; 7.26 return 0; 7.27 } 7.28 7.29 @@ -729,21 +734,21 @@ int libxl_cpumap_test(libxl_cpumap *cpum 7.30 { 7.31 if (cpu >= cpumap->size * 8) 7.32 return 0; 7.33 - return (cpumap->map[cpu / 64] & (1L << (cpu & 63))) ? 1 : 0; 7.34 + return (cpumap->map[cpu / 8] & (1 << (cpu & 7))) ? 1 : 0; 7.35 } 7.36 7.37 void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu) 7.38 { 7.39 if (cpu >= cpumap->size * 8) 7.40 return; 7.41 - cpumap->map[cpu / 64] |= 1L << (cpu & 63); 7.42 + cpumap->map[cpu / 8] |= 1 << (cpu & 7); 7.43 } 7.44 7.45 void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu) 7.46 { 7.47 if (cpu >= cpumap->size * 8) 7.48 return; 7.49 - cpumap->map[cpu / 64] &= ~(1L << (cpu & 63)); 7.50 + cpumap->map[cpu / 8] &= ~(1 << (cpu & 7)); 7.51 } 7.52 7.53 int libxl_get_max_cpus(libxl_ctx *ctx)
8.1 --- a/tools/libxl/libxl_utils.h Wed Nov 03 11:58:25 2010 +0000 8.2 +++ b/tools/libxl/libxl_utils.h Wed Nov 03 12:10:46 2010 +0000 8.3 @@ -76,9 +76,11 @@ int libxl_devid_to_device_net2(libxl_ctx 8.4 * return -1 if there are an error */ 8.5 int libxl_check_device_model_version(libxl_ctx *ctx, char *path); 8.6 8.7 -int libxl_cpumap_alloc(libxl_cpumap *cpumap, int max_cpus); 8.8 +int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap); 8.9 int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu); 8.10 void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu); 8.11 void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu); 8.12 +#define libxl_for_each_cpu(var, map) for (var = 0; var < (map).size * 8; var++) 8.13 + 8.14 #endif 8.15
9.1 --- a/tools/libxl/xl_cmdimpl.c Wed Nov 03 11:58:25 2010 +0000 9.2 +++ b/tools/libxl/xl_cmdimpl.c Wed Nov 03 12:10:46 2010 +0000 9.3 @@ -3534,8 +3534,8 @@ static void print_vcpuinfo(uint32_t tdom 9.4 uint32_t nr_cpus) 9.5 { 9.6 int i, l; 9.7 - uint64_t *cpumap; 9.8 - uint64_t pcpumap; 9.9 + uint8_t *cpumap; 9.10 + uint8_t pcpumap; 9.11 char *domname; 9.12 9.13 /* NAME ID VCPU */ 9.14 @@ -3555,14 +3555,14 @@ static void print_vcpuinfo(uint32_t tdom 9.15 /* TIM */ 9.16 printf("%9.1f ", ((float)vcpuinfo->vcpu_time / 1e9)); 9.17 /* CPU AFFINITY */ 9.18 - pcpumap = nr_cpus > 64 ? (uint64_t)-1 : ((1ULL << nr_cpus) - 1); 9.19 + pcpumap = nr_cpus > 8 ? (uint8_t)-1 : ((1 << nr_cpus) - 1); 9.20 for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) { 9.21 if (*cpumap < pcpumap) { 9.22 break; 9.23 } 9.24 - if (nr_cpus > 64) { 9.25 + if (nr_cpus > 8) { 9.26 pcpumap = -1; 9.27 - nr_cpus -= 64; 9.28 + nr_cpus -= 8; 9.29 } else { 9.30 pcpumap = ((1 << nr_cpus) - 1); 9.31 nr_cpus = 0; 9.32 @@ -3593,7 +3593,7 @@ static void print_vcpuinfo(uint32_t tdom 9.33 } 9.34 } 9.35 printf("\n"); 9.36 - nr_cpus = nr_cpus > 64 ? nr_cpus - 64 : 0; 9.37 + nr_cpus = nr_cpus > 8 ? nr_cpus - 8 : 0; 9.38 } 9.39 } 9.40 } 9.41 @@ -3678,11 +3678,11 @@ int main_vcpulist(int argc, char **argv) 9.42 static void vcpupin(char *d, const char *vcpu, char *cpu) 9.43 { 9.44 libxl_vcpuinfo *vcpuinfo; 9.45 - uint64_t *cpumap = NULL; 9.46 + libxl_cpumap cpumap; 9.47 9.48 uint32_t vcpuid, cpuida, cpuidb; 9.49 char *endptr, *toka, *tokb; 9.50 - int i, nb_vcpu, cpusize, cpumapsize; 9.51 + int i, nb_vcpu; 9.52 9.53 vcpuid = strtoul(vcpu, &endptr, 10); 9.54 if (vcpu == endptr) { 9.55 @@ -3695,63 +3695,54 @@ static void vcpupin(char *d, const char 9.56 9.57 find_domain(d); 9.58 9.59 - if ((cpusize = libxl_get_max_cpus(&ctx)) == 0) { 9.60 - fprintf(stderr, "libxl_get_max_cpus failed.\n"); 9.61 - goto vcpupin_out1; 9.62 - } 9.63 - cpumapsize = (cpusize + sizeof (uint64_t) - 1) / sizeof (uint64_t); 9.64 - 9.65 - cpumap = calloc(cpumapsize, sizeof (uint64_t)); 9.66 - if (!cpumap) { 9.67 - goto vcpupin_out1; 9.68 + if (libxl_cpumap_alloc(&ctx, &cpumap)) { 9.69 + goto vcpupin_out; 9.70 } 9.71 if (strcmp(cpu, "all")) { 9.72 for (toka = strtok(cpu, ","), i = 0; toka; toka = strtok(NULL, ","), ++i) { 9.73 cpuida = strtoul(toka, &endptr, 10); 9.74 if (toka == endptr) { 9.75 fprintf(stderr, "Error: Invalid argument.\n"); 9.76 - goto vcpupin_out; 9.77 + goto vcpupin_out1; 9.78 } 9.79 if (*endptr == '-') { 9.80 tokb = endptr + 1; 9.81 cpuidb = strtoul(tokb, &endptr, 10); 9.82 if ((tokb == endptr) || (cpuida > cpuidb)) { 9.83 fprintf(stderr, "Error: Invalid argument.\n"); 9.84 - goto vcpupin_out; 9.85 + goto vcpupin_out1; 9.86 } 9.87 while (cpuida <= cpuidb) { 9.88 - cpumap[cpuida / 64] |= (1 << (cpuida % 64)); 9.89 + libxl_cpumap_set(&cpumap, cpuida); 9.90 ++cpuida; 9.91 } 9.92 } else { 9.93 - cpumap[cpuida / 64] |= (1 << (cpuida % 64)); 9.94 + libxl_cpumap_set(&cpumap, cpuida); 9.95 } 9.96 } 9.97 } 9.98 else { 9.99 - memset(cpumap, -1, sizeof (uint64_t) * cpumapsize); 9.100 + memset(cpumap.map, -1, cpumap.size); 9.101 } 9.102 9.103 if (vcpuid != -1) { 9.104 - if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid, 9.105 - cpumap, cpusize) == -1) { 9.106 + if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid, &cpumap) == -1) { 9.107 fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", vcpuid); 9.108 } 9.109 } 9.110 else { 9.111 if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &i))) { 9.112 fprintf(stderr, "libxl_list_vcpu failed.\n"); 9.113 - goto vcpupin_out; 9.114 + goto vcpupin_out1; 9.115 } 9.116 for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) { 9.117 - if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid, 9.118 - cpumap, cpusize) == -1) { 9.119 + if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid, &cpumap) == -1) { 9.120 fprintf(stderr, "libxl_set_vcpuaffinity failed on vcpu `%u'.\n", vcpuinfo->vcpuid); 9.121 } 9.122 } 9.123 } 9.124 vcpupin_out1: 9.125 - free(cpumap); 9.126 + libxl_cpumap_destroy(&cpumap); 9.127 vcpupin_out: 9.128 ; 9.129 } 9.130 @@ -3903,7 +3894,7 @@ static void output_physinfo(void) 9.131 printf("free_memory : %"PRIu64"\n", info.free_pages / i); 9.132 } 9.133 if (!libxl_get_freecpus(&ctx, &cpumap)) { 9.134 - for (i = 0; i < cpumap.size * 8; i++) 9.135 + libxl_for_each_cpu(i, cpumap) 9.136 if (libxl_cpumap_test(&cpumap, i)) 9.137 n++; 9.138 printf("free_cpus : %d\n", n); 9.139 @@ -5455,7 +5446,7 @@ int main_cpupoolcreate(int argc, char ** 9.140 fprintf(stderr, "libxl_get_freecpus failed\n"); 9.141 return -ERROR_FAIL; 9.142 } 9.143 - if (libxl_cpumap_alloc(&cpumap, freemap.size * 8)) { 9.144 + if (libxl_cpumap_alloc(&ctx, &cpumap)) { 9.145 fprintf(stderr, "Failed to allocate cpumap\n"); 9.146 return -ERROR_FAIL; 9.147 } 9.148 @@ -5474,7 +5465,7 @@ int main_cpupoolcreate(int argc, char ** 9.149 } else { 9.150 n_cpus = 1; 9.151 n = 0; 9.152 - for (i = 0; i < freemap.size * 8; i++) 9.153 + libxl_for_each_cpu(i, freemap) 9.154 if (libxl_cpumap_test(&freemap, i)) { 9.155 n++; 9.156 libxl_cpumap_set(&cpumap, i); 9.157 @@ -5584,8 +5575,8 @@ int main_cpupoollist(int argc, char **ar 9.158 printf("%-19s", name); 9.159 free(name); 9.160 n = 0; 9.161 - for (c = 0; c < poolinfo[p].cpumap.size * 8; c++) 9.162 - if (poolinfo[p].cpumap.map[c / 64] & (1L << (c % 64))) { 9.163 + libxl_for_each_cpu(c, poolinfo[p].cpumap) 9.164 + if (libxl_cpumap_test(&poolinfo[p].cpumap, c)) { 9.165 if (n && opt_cpus) printf(","); 9.166 if (opt_cpus) printf("%d", c); 9.167 n++;
10.1 --- a/tools/python/xen/lowlevel/xc/xc.c Wed Nov 03 11:58:25 2010 +0000 10.2 +++ b/tools/python/xen/lowlevel/xc/xc.c Wed Nov 03 12:10:46 2010 +0000 10.3 @@ -226,10 +226,8 @@ static PyObject *pyxc_vcpu_setaffinity(X 10.4 { 10.5 uint32_t dom; 10.6 int vcpu = 0, i; 10.7 - uint64_t *cpumap; 10.8 + xc_cpumap_t cpumap; 10.9 PyObject *cpulist = NULL; 10.10 - int nr_cpus, size; 10.11 - uint64_t cpumap_size = sizeof(*cpumap); 10.12 10.13 static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; 10.14 10.15 @@ -237,29 +235,20 @@ static PyObject *pyxc_vcpu_setaffinity(X 10.16 &dom, &vcpu, &cpulist) ) 10.17 return NULL; 10.18 10.19 - nr_cpus = xc_get_max_cpus(self->xc_handle); 10.20 - if ( nr_cpus == 0 ) 10.21 - return pyxc_error_to_exception(self->xc_handle); 10.22 - 10.23 - size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 10.24 - cpumap = malloc(cpumap_size * size); 10.25 + cpumap = xc_cpumap_alloc(self->xc_handle); 10.26 if(cpumap == NULL) 10.27 return pyxc_error_to_exception(self->xc_handle); 10.28 10.29 if ( (cpulist != NULL) && PyList_Check(cpulist) ) 10.30 { 10.31 - for ( i = 0; i < size; i++) 10.32 - { 10.33 - cpumap[i] = 0ULL; 10.34 - } 10.35 for ( i = 0; i < PyList_Size(cpulist); i++ ) 10.36 { 10.37 long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); 10.38 - cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % (cpumap_size * 8)); 10.39 + cpumap[cpu / 8] |= 1 << (cpu % 8); 10.40 } 10.41 } 10.42 10.43 - if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 ) 10.44 + if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) 10.45 { 10.46 free(cpumap); 10.47 return pyxc_error_to_exception(self->xc_handle); 10.48 @@ -385,9 +374,8 @@ static PyObject *pyxc_vcpu_getinfo(XcObj 10.49 uint32_t dom, vcpu = 0; 10.50 xc_vcpuinfo_t info; 10.51 int rc, i; 10.52 - uint64_t *cpumap; 10.53 - int nr_cpus, size; 10.54 - uint64_t cpumap_size = sizeof(*cpumap); 10.55 + xc_cpumap_t cpumap; 10.56 + int nr_cpus; 10.57 10.58 static char *kwd_list[] = { "domid", "vcpu", NULL }; 10.59 10.60 @@ -403,12 +391,11 @@ static PyObject *pyxc_vcpu_getinfo(XcObj 10.61 if ( rc < 0 ) 10.62 return pyxc_error_to_exception(self->xc_handle); 10.63 10.64 - size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 10.65 - if((cpumap = malloc(cpumap_size * size)) == NULL) 10.66 - return pyxc_error_to_exception(self->xc_handle); 10.67 - memset(cpumap, 0, cpumap_size * size); 10.68 - 10.69 - rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size); 10.70 + cpumap = xc_cpumap_alloc(self->xc_handle); 10.71 + if(cpumap == NULL) 10.72 + return pyxc_error_to_exception(self->xc_handle); 10.73 + 10.74 + rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap); 10.75 if ( rc < 0 ) 10.76 { 10.77 free(cpumap); 10.78 @@ -424,12 +411,12 @@ static PyObject *pyxc_vcpu_getinfo(XcObj 10.79 cpulist = PyList_New(0); 10.80 for ( i = 0; i < nr_cpus; i++ ) 10.81 { 10.82 - if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { 10.83 + if (*(cpumap + i / 8) & 1 ) { 10.84 PyObject *pyint = PyInt_FromLong(i); 10.85 PyList_Append(cpulist, pyint); 10.86 Py_DECREF(pyint); 10.87 } 10.88 - cpumap[i / (cpumap_size * 8)] >>= 1; 10.89 + cpumap[i / 8] >>= 1; 10.90 } 10.91 PyDict_SetItemString(info_dict, "cpumap", cpulist); 10.92 Py_DECREF(cpulist); 10.93 @@ -1931,22 +1918,27 @@ static PyObject *pyxc_dom_set_memshr(XcO 10.94 return zero; 10.95 } 10.96 10.97 -static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize) 10.98 +static PyObject *cpumap_to_cpulist(XcObject *self, xc_cpumap_t cpumap) 10.99 { 10.100 PyObject *cpulist = NULL; 10.101 int i; 10.102 + int nr_cpus; 10.103 + 10.104 + nr_cpus = xc_get_max_cpus(self->xc_handle); 10.105 + if ( nr_cpus == 0 ) 10.106 + return pyxc_error_to_exception(self->xc_handle); 10.107 10.108 cpulist = PyList_New(0); 10.109 - for ( i = 0; i < cpusize; i++ ) 10.110 + for ( i = 0; i < nr_cpus; i++ ) 10.111 { 10.112 - if ( *cpumap & (1L << (i % 64)) ) 10.113 + if ( *cpumap & (1 << (i % 8)) ) 10.114 { 10.115 PyObject* pyint = PyInt_FromLong(i); 10.116 10.117 PyList_Append(cpulist, pyint); 10.118 Py_DECREF(pyint); 10.119 } 10.120 - if ( (i % 64) == 63 ) 10.121 + if ( (i % 8) == 7 ) 10.122 cpumap++; 10.123 } 10.124 return cpulist; 10.125 @@ -2003,10 +1995,9 @@ static PyObject *pyxc_cpupool_getinfo(Xc 10.126 "cpupool", (int)info->cpupool_id, 10.127 "sched", info->sched_id, 10.128 "n_dom", info->n_dom, 10.129 - "cpulist", cpumap_to_cpulist(info->cpumap, 10.130 - info->cpumap_size)); 10.131 + "cpulist", cpumap_to_cpulist(self, info->cpumap)); 10.132 pool = info->cpupool_id + 1; 10.133 - free(info); 10.134 + xc_cpupool_infofree(self->xc_handle, info); 10.135 10.136 if ( info_dict == NULL ) 10.137 { 10.138 @@ -2082,15 +2073,14 @@ static PyObject *pyxc_cpupool_movedomain 10.139 10.140 static PyObject *pyxc_cpupool_freeinfo(XcObject *self) 10.141 { 10.142 - uint64_t *cpumap; 10.143 - int mapsize; 10.144 + xc_cpumap_t cpumap; 10.145 PyObject *info = NULL; 10.146 10.147 - cpumap = xc_cpupool_freeinfo(self->xc_handle, &mapsize); 10.148 + cpumap = xc_cpupool_freeinfo(self->xc_handle); 10.149 if (!cpumap) 10.150 return pyxc_error_to_exception(self->xc_handle); 10.151 10.152 - info = cpumap_to_cpulist(cpumap, mapsize * 8); 10.153 + info = cpumap_to_cpulist(self, cpumap); 10.154 10.155 free(cpumap); 10.156