debuggers.hg

changeset 22317:ee0e2acc0d99

tools: cpupools: Support arbitrary numbers of physical cpus

To be able to support arbitrary numbers of physical cpus it was necessary to
include the size of cpumaps in the xc-interfaces for cpu pools.
These were:
definition of xc_cpupoolinfo_t
xc_cpupool_getinfo()
xc_cpupool_freeinfo()
xc_cpupool_getinfo() and xc_cpupool_freeinfo() are changed to allocate the
needed buffer and return it.

Signed-off-by: juergen.gross@ts.fujitsu.com
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Juergen Gross <juergen.gross@ts.fujitsu.com>
date Thu Oct 21 18:32:56 2010 +0100 (2010-10-21)
parents bfb3c97ef507
children 87e8339826f7
files tools/libxc/xc_cpupool.c tools/libxc/xc_misc.c tools/libxc/xenctrl.h tools/libxl/libxl.c tools/libxl/libxl.h tools/libxl/libxl_utils.c tools/libxl/xl_cmdimpl.c tools/python/xen/lowlevel/xc/xc.c
line diff
     1.1 --- a/tools/libxc/xc_cpupool.c	Thu Oct 21 18:14:50 2010 +0100
     1.2 +++ b/tools/libxc/xc_cpupool.c	Thu Oct 21 18:32:56 2010 +0100
     1.3 @@ -34,6 +34,11 @@ static int do_sysctl_save(xc_interface *
     1.4      return ret;
     1.5  }
     1.6  
     1.7 +static int get_cpumap_size(xc_interface *xch)
     1.8 +{
     1.9 +    return (xc_get_max_cpus(xch) + 7) / 8;
    1.10 +}
    1.11 +
    1.12  int xc_cpupool_create(xc_interface *xch,
    1.13                        uint32_t *ppoolid,
    1.14                        uint32_t sched_id)
    1.15 @@ -64,50 +69,61 @@ int xc_cpupool_destroy(xc_interface *xch
    1.16      return do_sysctl_save(xch, &sysctl);
    1.17  }
    1.18  
    1.19 -int xc_cpupool_getinfo(xc_interface *xch, 
    1.20 -                       uint32_t first_poolid,
    1.21 -                       uint32_t n_max, 
    1.22 -                       xc_cpupoolinfo_t *info)
    1.23 +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 
    1.24 +                       uint32_t poolid)
    1.25  {
    1.26      int err = 0;
    1.27 -    int p;
    1.28 -    uint32_t poolid = first_poolid;
    1.29 -    uint8_t local[sizeof (info->cpumap)];
    1.30 +    xc_cpupoolinfo_t *info;
    1.31 +    uint8_t *local;
    1.32 +    int local_size;
    1.33 +    int cpumap_size;
    1.34 +    int size;
    1.35      DECLARE_SYSCTL;
    1.36  
    1.37 -    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
    1.38 -
    1.39 -    for (p = 0; p < n_max; p++)
    1.40 +    local_size = get_cpumap_size(xch);
    1.41 +    if (!local_size)
    1.42      {
    1.43 -        sysctl.cmd = XEN_SYSCTL_cpupool_op;
    1.44 -        sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
    1.45 -        sysctl.u.cpupool_op.cpupool_id = poolid;
    1.46 -        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
    1.47 -        sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
    1.48 +        PERROR("Could not get number of cpus");
    1.49 +        return NULL;
    1.50 +    }
    1.51 +    local = alloca(local_size);
    1.52 +    cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap);
    1.53 +    size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap);
    1.54 +    info = malloc(size);
    1.55 +    if ( !info )
    1.56 +        return NULL;
    1.57 +
    1.58 +    memset(info, 0, size);
    1.59 +    info->cpumap_size = local_size * 8;
    1.60 +    info->cpumap = (uint64_t *)(info + 1);
    1.61  
    1.62 -        if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
    1.63 -        {
    1.64 -            PERROR("Could not lock memory for Xen hypercall");
    1.65 -            break;
    1.66 -        }
    1.67 -        err = do_sysctl_save(xch, &sysctl);
    1.68 -        unlock_pages(xch, local, sizeof (local));
    1.69 +    sysctl.cmd = XEN_SYSCTL_cpupool_op;
    1.70 +    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
    1.71 +    sysctl.u.cpupool_op.cpupool_id = poolid;
    1.72 +    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
    1.73 +    sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
    1.74  
    1.75 -        if ( err < 0 )
    1.76 -            break;
    1.77 +    if ( (err = lock_pages(xch, local, local_size)) != 0 )
    1.78 +    {
    1.79 +        PERROR("Could not lock memory for Xen hypercall");
    1.80 +        free(info);
    1.81 +        return NULL;
    1.82 +    }
    1.83 +    err = do_sysctl_save(xch, &sysctl);
    1.84 +    unlock_pages(xch, local, local_size);
    1.85  
    1.86 -        info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
    1.87 -        info->sched_id = sysctl.u.cpupool_op.sched_id;
    1.88 -        info->n_dom = sysctl.u.cpupool_op.n_dom;
    1.89 -        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
    1.90 -        poolid = sysctl.u.cpupool_op.cpupool_id + 1;
    1.91 -        info++;
    1.92 +    if ( err < 0 )
    1.93 +    {
    1.94 +        free(info);
    1.95 +        return NULL;
    1.96      }
    1.97  
    1.98 -    if ( p == 0 )
    1.99 -        return err;
   1.100 +    info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
   1.101 +    info->sched_id = sysctl.u.cpupool_op.sched_id;
   1.102 +    info->n_dom = sysctl.u.cpupool_op.n_dom;
   1.103 +    bitmap_byte_to_64(info->cpumap, local, local_size * 8);
   1.104  
   1.105 -    return p;
   1.106 +    return info;
   1.107  }
   1.108  
   1.109  int xc_cpupool_addcpu(xc_interface *xch,
   1.110 @@ -149,31 +165,41 @@ int xc_cpupool_movedomain(xc_interface *
   1.111      return do_sysctl_save(xch, &sysctl);
   1.112  }
   1.113  
   1.114 -int xc_cpupool_freeinfo(xc_interface *xch,
   1.115 -                        uint64_t *cpumap)
   1.116 +uint64_t * xc_cpupool_freeinfo(xc_interface *xch,
   1.117 +                        int *cpusize)
   1.118  {
   1.119      int err;
   1.120 -    uint8_t local[sizeof (*cpumap)];
   1.121 +    uint8_t *local;
   1.122 +    uint64_t *cpumap;
   1.123      DECLARE_SYSCTL;
   1.124  
   1.125 +    *cpusize = get_cpumap_size(xch);
   1.126 +    if (*cpusize == 0)
   1.127 +        return NULL;
   1.128 +    local = alloca(*cpusize);
   1.129 +    cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), sizeof(*cpumap));
   1.130 +    if (cpumap == NULL)
   1.131 +        return NULL;
   1.132 +
   1.133      sysctl.cmd = XEN_SYSCTL_cpupool_op;
   1.134      sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
   1.135      set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
   1.136 -    sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
   1.137 +    sysctl.u.cpupool_op.cpumap.nr_cpus = *cpusize * 8;
   1.138  
   1.139 -    if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
   1.140 +    if ( (err = lock_pages(xch, local, *cpusize)) != 0 )
   1.141      {
   1.142          PERROR("Could not lock memory for Xen hypercall");
   1.143 -        return err;
   1.144 +        free(cpumap);
   1.145 +        return NULL;
   1.146      }
   1.147  
   1.148      err = do_sysctl_save(xch, &sysctl);
   1.149 -    unlock_pages(xch, local, sizeof (local));
   1.150 -
   1.151 -    if (err < 0)
   1.152 -        return err;
   1.153 +    unlock_pages(xch, local, *cpusize);
   1.154 +    bitmap_byte_to_64(cpumap, local, *cpusize * 8);
   1.155  
   1.156 -    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
   1.157 +    if (err >= 0)
   1.158 +        return cpumap;
   1.159  
   1.160 -    return 0;
   1.161 +    free(cpumap);
   1.162 +    return NULL;
   1.163  }
     2.1 --- a/tools/libxc/xc_misc.c	Thu Oct 21 18:14:50 2010 +0100
     2.2 +++ b/tools/libxc/xc_misc.c	Thu Oct 21 18:32:56 2010 +0100
     2.3 @@ -21,6 +21,20 @@
     2.4  #include "xc_private.h"
     2.5  #include <xen/hvm/hvm_op.h>
     2.6  
     2.7 +int xc_get_max_cpus(xc_interface *xch)
     2.8 +{
     2.9 +    static int max_cpus = 0;
    2.10 +    xc_physinfo_t physinfo;
    2.11 +
    2.12 +    if ( max_cpus )
    2.13 +        return max_cpus;
    2.14 +
    2.15 +    if ( !xc_physinfo(xch, &physinfo) )
    2.16 +        max_cpus = physinfo.max_cpu_id + 1;
    2.17 +
    2.18 +    return max_cpus;
    2.19 +}
    2.20 +
    2.21  int xc_readconsolering(xc_interface *xch,
    2.22                         char *buffer,
    2.23                         unsigned int *pnr_chars,
     3.1 --- a/tools/libxc/xenctrl.h	Thu Oct 21 18:14:50 2010 +0100
     3.2 +++ b/tools/libxc/xenctrl.h	Thu Oct 21 18:32:56 2010 +0100
     3.3 @@ -216,6 +216,9 @@ typedef union
     3.4  } start_info_any_t;
     3.5  
     3.6  
     3.7 +/* return maximum number of cpus the hypervisor supports */
     3.8 +int xc_get_max_cpus(xc_interface *xch);
     3.9 +
    3.10  int xc_domain_create(xc_interface *xch,
    3.11                       uint32_t ssidref,
    3.12                       xen_domain_handle_t handle,
    3.13 @@ -535,7 +538,8 @@ typedef struct xc_cpupoolinfo {
    3.14      uint32_t cpupool_id;
    3.15      uint32_t sched_id;
    3.16      uint32_t n_dom;
    3.17 -    uint64_t cpumap;
    3.18 +    uint32_t cpumap_size;    /* max number of cpus in map */
    3.19 +    uint64_t *cpumap;
    3.20  } xc_cpupoolinfo_t;
    3.21  
    3.22  /**
    3.23 @@ -564,15 +568,11 @@ int xc_cpupool_destroy(xc_interface *xch
    3.24   * Get cpupool info. Returns info for up to the specified number of cpupools
    3.25   * starting at the given id.
    3.26   * @parm xc_handle a handle to an open hypervisor interface
    3.27 - * @parm first_poolid lowest id for which info is returned
    3.28 - * @parm n_max maximum number of cpupools to return info
    3.29 - * @parm info pointer to xc_cpupoolinfo_t array
    3.30 - * return number of cpupool infos
    3.31 + * @parm poolid lowest id for which info is returned
    3.32 + * return cpupool info ptr (obtained by malloc)
    3.33   */
    3.34 -int xc_cpupool_getinfo(xc_interface *xch,
    3.35 -                       uint32_t first_poolid,
    3.36 -                       uint32_t n_max,
    3.37 -                       xc_cpupoolinfo_t *info);
    3.38 +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
    3.39 +                       uint32_t poolid);
    3.40  
    3.41  /**
    3.42   * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
    3.43 @@ -614,11 +614,11 @@ int xc_cpupool_movedomain(xc_interface *
    3.44   * Return map of cpus not in any cpupool.
    3.45   *
    3.46   * @parm xc_handle a handle to an open hypervisor interface
    3.47 - * @parm cpumap pointer where to store the cpumap
    3.48 - * return 0 on success, -1 on failure
    3.49 + * @parm cpusize where to store array size in bytes
    3.50 + * return cpumap array on success, NULL else
    3.51   */
    3.52 -int xc_cpupool_freeinfo(xc_interface *xch,
    3.53 -                        uint64_t *cpumap);
    3.54 +uint64_t *xc_cpupool_freeinfo(xc_interface *xch,
    3.55 +                        int *cpusize);
    3.56  
    3.57  
    3.58  /*
     4.1 --- a/tools/libxl/libxl.c	Thu Oct 21 18:14:50 2010 +0100
     4.2 +++ b/tools/libxl/libxl.c	Thu Oct 21 18:32:56 2010 +0100
     4.3 @@ -609,27 +609,31 @@ int libxl_domain_info(libxl_ctx *ctx, li
     4.4  
     4.5  libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool)
     4.6  {
     4.7 -    libxl_poolinfo *ptr;
     4.8 -    int i, ret;
     4.9 -    xc_cpupoolinfo_t info[256];
    4.10 -    int size = 256;
    4.11 -
    4.12 -    ptr = calloc(size, sizeof(libxl_poolinfo));
    4.13 -    if (!ptr) {
    4.14 -        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
    4.15 -        return NULL;
    4.16 +    libxl_poolinfo *ptr, *tmp;
    4.17 +    int i;
    4.18 +    xc_cpupoolinfo_t *info;
    4.19 +    uint32_t poolid;
    4.20 +
    4.21 +    ptr = NULL;
    4.22 +
    4.23 +    poolid = 0;
    4.24 +    for (i = 0;; i++) {
    4.25 +        info = xc_cpupool_getinfo(ctx->xch, poolid);
    4.26 +        if (info == NULL)
    4.27 +            break;
    4.28 +        tmp = realloc(ptr, (i + 1) * sizeof(libxl_poolinfo));
    4.29 +        if (!tmp) {
    4.30 +            LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
    4.31 +            free(ptr);
    4.32 +            return NULL;
    4.33 +        }
    4.34 +        ptr = tmp;
    4.35 +        ptr[i].poolid = info->cpupool_id;
    4.36 +        poolid = info->cpupool_id + 1;
    4.37 +        free(info);
    4.38      }
    4.39  
    4.40 -    ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info);
    4.41 -    if (ret<0) {
    4.42 -        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info");
    4.43 -        return NULL;
    4.44 -    }
    4.45 -
    4.46 -    for (i = 0; i < ret; i++) {
    4.47 -        ptr[i].poolid = info[i].cpupool_id;
    4.48 -    }
    4.49 -    *nb_pool = ret;
    4.50 +    *nb_pool = i;
    4.51      return ptr;
    4.52  }
    4.53  
    4.54 @@ -3207,24 +3211,19 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
    4.55      libxl_vcpuinfo *ptr, *ret;
    4.56      xc_domaininfo_t domaininfo;
    4.57      xc_vcpuinfo_t vcpuinfo;
    4.58 -    xc_physinfo_t physinfo = { 0 };
    4.59      unsigned num_cpuwords;
    4.60  
    4.61      if (xc_domain_getinfolist(ctx->xch, domid, 1, &domaininfo) != 1) {
    4.62          LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting infolist");
    4.63          return NULL;
    4.64      }
    4.65 -    if (xc_physinfo(ctx->xch, &physinfo) == -1) {
    4.66 -        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting physinfo");
    4.67 -        return NULL;
    4.68 -    }
    4.69 -    *nrcpus = physinfo.max_cpu_id + 1;
    4.70 +    *nrcpus = xc_get_max_cpus(ctx->xch);
    4.71      ret = ptr = calloc(domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo));
    4.72      if (!ptr) {
    4.73          return NULL;
    4.74      }
    4.75  
    4.76 -    num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
    4.77 +    num_cpuwords = ((*nrcpus + 63) / 64);
    4.78      for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
    4.79          ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap));
    4.80          if (!ptr->cpumap) {
     5.1 --- a/tools/libxl/libxl.h	Thu Oct 21 18:14:50 2010 +0100
     5.2 +++ b/tools/libxl/libxl.h	Thu Oct 21 18:32:56 2010 +0100
     5.3 @@ -249,6 +249,9 @@ int libxl_domain_shutdown(libxl_ctx *ctx
     5.4  int libxl_domain_destroy(libxl_ctx *ctx, uint32_t domid, int force);
     5.5  int libxl_domain_preserve(libxl_ctx *ctx, uint32_t domid, libxl_domain_create_info *info, const char *name_suffix, libxl_uuid new_uuid);
     5.6  
     5.7 +/* get max. number of cpus supported by hypervisor */
     5.8 +int libxl_get_max_cpus(libxl_ctx *ctx);
     5.9 +
    5.10  /*
    5.11   * Run the configured bootloader for a PV domain and update
    5.12   * info->kernel, info->u.pv.ramdisk and info->u.pv.cmdline as
     6.1 --- a/tools/libxl/libxl_utils.c	Thu Oct 21 18:14:50 2010 +0100
     6.2 +++ b/tools/libxl/libxl_utils.c	Thu Oct 21 18:32:56 2010 +0100
     6.3 @@ -675,3 +675,8 @@ out:
     6.4      libxl__free_all(&gc);
     6.5      return rc;
     6.6  }
     6.7 +
     6.8 +int libxl_get_max_cpus(libxl_ctx *ctx)
     6.9 +{
    6.10 +    return xc_get_max_cpus(ctx->xch);
    6.11 +}
     7.1 --- a/tools/libxl/xl_cmdimpl.c	Thu Oct 21 18:14:50 2010 +0100
     7.2 +++ b/tools/libxl/xl_cmdimpl.c	Thu Oct 21 18:32:56 2010 +0100
     7.3 @@ -3639,12 +3639,11 @@ int main_vcpulist(int argc, char **argv)
     7.4  static void vcpupin(char *d, const char *vcpu, char *cpu)
     7.5  {
     7.6      libxl_vcpuinfo *vcpuinfo;
     7.7 -    libxl_physinfo physinfo;
     7.8      uint64_t *cpumap = NULL;
     7.9  
    7.10      uint32_t vcpuid, cpuida, cpuidb;
    7.11      char *endptr, *toka, *tokb;
    7.12 -    int i, nb_vcpu, cpusize;
    7.13 +    int i, nb_vcpu, cpusize, cpumapsize;
    7.14  
    7.15      vcpuid = strtoul(vcpu, &endptr, 10);
    7.16      if (vcpu == endptr) {
    7.17 @@ -3657,12 +3656,13 @@ static void vcpupin(char *d, const char 
    7.18  
    7.19      find_domain(d);
    7.20  
    7.21 -    if (libxl_get_physinfo(&ctx, &physinfo) != 0) {
    7.22 -        fprintf(stderr, "libxl_get_physinfo failed.\n");
    7.23 +    if ((cpusize = libxl_get_max_cpus(&ctx)) == 0) {
    7.24 +        fprintf(stderr, "libxl_get_max_cpus failed.\n");
    7.25          goto vcpupin_out1;
    7.26      }
    7.27 -
    7.28 -    cpumap = calloc(physinfo.max_cpu_id + 1, sizeof (uint64_t));
    7.29 +    cpumapsize = (cpusize + sizeof (uint64_t) - 1) / sizeof (uint64_t);
    7.30 +
    7.31 +    cpumap = calloc(cpumapsize, sizeof (uint64_t));
    7.32      if (!cpumap) {
    7.33          goto vcpupin_out1;
    7.34      }
    7.35 @@ -3690,24 +3690,24 @@ static void vcpupin(char *d, const char 
    7.36          }
    7.37      }
    7.38      else {
    7.39 -        memset(cpumap, -1, sizeof (uint64_t) * (physinfo.max_cpu_id + 1));
    7.40 +        memset(cpumap, -1, sizeof (uint64_t) * cpumapsize);
    7.41      }
    7.42  
    7.43      if (vcpuid != -1) {
    7.44          if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid,
    7.45 -                                   cpumap, physinfo.max_cpu_id + 1) == -1) {
    7.46 +                                   cpumap, cpusize) == -1) {
    7.47              fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", vcpuid);
    7.48          }
    7.49      }
    7.50      else {
    7.51 -        if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &cpusize))) {
    7.52 +        if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &i))) {
    7.53              fprintf(stderr, "libxl_list_vcpu failed.\n");
    7.54              goto vcpupin_out;
    7.55          }
    7.56          for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) {
    7.57              if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid,
    7.58 -                                       cpumap, physinfo.max_cpu_id + 1) == -1) {
    7.59 -                fprintf(stderr, "libxl_list_vcpu failed on vcpu `%u'.\n", vcpuinfo->vcpuid);
    7.60 +                                       cpumap, cpusize) == -1) {
    7.61 +                fprintf(stderr, "libxl_set_vcpuaffinity failed on vcpu `%u'.\n", vcpuinfo->vcpuid);
    7.62              }
    7.63          }
    7.64      }
     8.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Thu Oct 21 18:14:50 2010 +0100
     8.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Thu Oct 21 18:32:56 2010 +0100
     8.3 @@ -229,7 +229,6 @@ static PyObject *pyxc_vcpu_setaffinity(X
     8.4      uint64_t  *cpumap;
     8.5      PyObject *cpulist = NULL;
     8.6      int nr_cpus, size;
     8.7 -    xc_physinfo_t info = {0}; 
     8.8      uint64_t cpumap_size = sizeof(*cpumap); 
     8.9  
    8.10      static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
    8.11 @@ -238,10 +237,9 @@ static PyObject *pyxc_vcpu_setaffinity(X
    8.12                                        &dom, &vcpu, &cpulist) )
    8.13          return NULL;
    8.14  
    8.15 -    if ( xc_physinfo(self->xc_handle, &info) != 0 )
    8.16 +    nr_cpus = xc_get_max_cpus(self->xc_handle);
    8.17 +    if ( nr_cpus == 0 )
    8.18          return pyxc_error_to_exception(self->xc_handle);
    8.19 -  
    8.20 -    nr_cpus = info.nr_cpus;
    8.21  
    8.22      size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
    8.23      cpumap = malloc(cpumap_size * size);
    8.24 @@ -389,7 +387,6 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
    8.25      int rc, i;
    8.26      uint64_t *cpumap;
    8.27      int nr_cpus, size;
    8.28 -    xc_physinfo_t pinfo = { 0 };
    8.29      uint64_t cpumap_size = sizeof(*cpumap);
    8.30  
    8.31      static char *kwd_list[] = { "domid", "vcpu", NULL };
    8.32 @@ -398,9 +395,9 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
    8.33                                        &dom, &vcpu) )
    8.34          return NULL;
    8.35  
    8.36 -    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
    8.37 +    nr_cpus = xc_get_max_cpus(self->xc_handle);
    8.38 +    if ( nr_cpus == 0 )
    8.39          return pyxc_error_to_exception(self->xc_handle);
    8.40 -    nr_cpus = pinfo.nr_cpus;
    8.41  
    8.42      rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
    8.43      if ( rc < 0 )
    8.44 @@ -1906,22 +1903,23 @@ static PyObject *pyxc_dom_set_memshr(XcO
    8.45      return zero;
    8.46  }
    8.47  
    8.48 -static PyObject *cpumap_to_cpulist(uint64_t cpumap)
    8.49 +static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
    8.50  {
    8.51      PyObject *cpulist = NULL;
    8.52 -    uint32_t i;
    8.53 +    int i;
    8.54  
    8.55      cpulist = PyList_New(0);
    8.56 -    for ( i = 0; cpumap != 0; i++ )
    8.57 +    for ( i = 0; i < cpusize; i++ )
    8.58      {
    8.59 -        if ( cpumap & 1 )
    8.60 +        if ( *cpumap & (1L << (i % 64)) )
    8.61          {
    8.62              PyObject* pyint = PyInt_FromLong(i);
    8.63  
    8.64              PyList_Append(cpulist, pyint);
    8.65              Py_DECREF(pyint);
    8.66          }
    8.67 -        cpumap >>= 1;
    8.68 +        if ( (i % 64) == 63 )
    8.69 +            cpumap++;
    8.70      }
    8.71      return cpulist;
    8.72  }
    8.73 @@ -1959,55 +1957,39 @@ static PyObject *pyxc_cpupool_destroy(Xc
    8.74      return zero;
    8.75  }
    8.76  
    8.77 -static PyObject *pyxc_cpupool_getinfo(XcObject *self,
    8.78 -                                      PyObject *args,
    8.79 -                                      PyObject *kwds)
    8.80 +static PyObject *pyxc_cpupool_getinfo(XcObject *self)
    8.81  {
    8.82      PyObject *list, *info_dict;
    8.83  
    8.84 -    uint32_t first_pool = 0;
    8.85 -    int max_pools = 1024, nr_pools, i;
    8.86 +    uint32_t pool;
    8.87      xc_cpupoolinfo_t *info;
    8.88  
    8.89 -    static char *kwd_list[] = { "first_pool", "max_pools", NULL };
    8.90 -
    8.91 -    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
    8.92 -                                      &first_pool, &max_pools) )
    8.93 -        return NULL;
    8.94 -
    8.95 -    info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
    8.96 -    if (info == NULL)
    8.97 -        return PyErr_NoMemory();
    8.98 -
    8.99 -    nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
   8.100 -
   8.101 -    if (nr_pools < 0)
   8.102 +    list = PyList_New(0);
   8.103 +    for (pool = 0;;)
   8.104      {
   8.105 -        free(info);
   8.106 -        return pyxc_error_to_exception(self->xc_handle);
   8.107 -    }
   8.108 -
   8.109 -    list = PyList_New(nr_pools);
   8.110 -    for ( i = 0 ; i < nr_pools; i++ )
   8.111 -    {
   8.112 +        info = xc_cpupool_getinfo(self->xc_handle, pool);
   8.113 +        if (info == NULL)
   8.114 +            break;
   8.115          info_dict = Py_BuildValue(
   8.116              "{s:i,s:i,s:i,s:N}",
   8.117 -            "cpupool",         (int)info[i].cpupool_id,
   8.118 -            "sched",           info[i].sched_id,
   8.119 -            "n_dom",           info[i].n_dom,
   8.120 -            "cpulist",         cpumap_to_cpulist(info[i].cpumap));
   8.121 +            "cpupool",         (int)info->cpupool_id,
   8.122 +            "sched",           info->sched_id,
   8.123 +            "n_dom",           info->n_dom,
   8.124 +            "cpulist",         cpumap_to_cpulist(info->cpumap,
   8.125 +                                                 info->cpumap_size));
   8.126 +        pool = info->cpupool_id + 1;
   8.127 +        free(info);
   8.128 +
   8.129          if ( info_dict == NULL )
   8.130          {
   8.131              Py_DECREF(list);
   8.132 -            if ( info_dict != NULL ) { Py_DECREF(info_dict); }
   8.133 -            free(info);
   8.134              return NULL;
   8.135          }
   8.136 -        PyList_SetItem(list, i, info_dict);
   8.137 +
   8.138 +        PyList_Append(list, info_dict);
   8.139 +        Py_DECREF(info_dict);
   8.140      }
   8.141  
   8.142 -    free(info);
   8.143 -
   8.144      return list;
   8.145  }
   8.146  
   8.147 @@ -2072,12 +2054,19 @@ static PyObject *pyxc_cpupool_movedomain
   8.148  
   8.149  static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
   8.150  {
   8.151 -    uint64_t cpumap;
   8.152 -
   8.153 -    if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
   8.154 +    uint64_t *cpumap;
   8.155 +    int mapsize;
   8.156 +    PyObject *info = NULL;
   8.157 +
   8.158 +    cpumap = xc_cpupool_freeinfo(self->xc_handle, &mapsize);
   8.159 +    if (!cpumap)
   8.160          return pyxc_error_to_exception(self->xc_handle);
   8.161  
   8.162 -    return cpumap_to_cpulist(cpumap);
   8.163 +    info = cpumap_to_cpulist(cpumap, mapsize * 8);
   8.164 +
   8.165 +    free(cpumap);
   8.166 +
   8.167 +    return info;
   8.168  }
   8.169  
   8.170  static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,
   8.171 @@ -2832,14 +2821,9 @@ static PyMethodDef pyxc_methods[] = {
   8.172  
   8.173      { "cpupool_getinfo",
   8.174        (PyCFunction)pyxc_cpupool_getinfo,
   8.175 -      METH_VARARGS | METH_KEYWORDS, "\n"
   8.176 +      METH_NOARGS, "\n"
   8.177        "Get information regarding a set of cpupools, in increasing id order.\n"
   8.178 -      " first_pool [int, 0]:    First cpupool to retrieve info about.\n"
   8.179 -      " max_pools  [int, 1024]: Maximum number of cpupools to retrieve info"
   8.180 -      " about.\n\n"
   8.181 -      "Returns: [list of dicts] if list length is less than 'max_pools'\n"
   8.182 -      "         parameter then there was an error, or the end of the\n"
   8.183 -      "         cpupool-id space was reached.\n"
   8.184 +      "Returns: [list of dicts]\n"
   8.185        " pool     [int]: Identifier of cpupool to which this info pertains\n"
   8.186        " sched    [int]:  Scheduler used for this cpupool\n"
   8.187        " n_dom    [int]:  Number of Domains in this cpupool\n"