debuggers.hg

changeset 21086:b64a8d2a80ad

libxc: Support set affinity for more than 64 CPUs.

There are more than 64 cpus on new intel platform especially on NUMA
system, so that we need break the pcpu limit (that is 64) when set
affinity of a VCPU.

Signed-off-by: James (song wei) <jsong@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 17 09:17:27 2010 +0000 (2010-03-17)
parents 95f5a4ce8f24
children 066c3eead6ec
files tools/libxc/xc_domain.c tools/libxc/xenctrl.h tools/python/xen/lowlevel/xc/xc.c
line diff
     1.1 --- a/tools/libxc/xc_domain.c	Wed Mar 17 08:35:13 2010 +0000
     1.2 +++ b/tools/libxc/xc_domain.c	Wed Mar 17 09:17:27 2010 +0000
     1.3 @@ -98,23 +98,28 @@ int xc_domain_shutdown(int xc_handle,
     1.4  int xc_vcpu_setaffinity(int xc_handle,
     1.5                          uint32_t domid,
     1.6                          int vcpu,
     1.7 -                        uint64_t cpumap)
     1.8 +                        uint64_t *cpumap, int cpusize)
     1.9  {
    1.10      DECLARE_DOMCTL;
    1.11      int ret = -1;
    1.12 -    uint8_t local[sizeof (cpumap)];
    1.13 +    uint8_t *local = malloc(cpusize); 
    1.14  
    1.15 +    if(local == NULL)
    1.16 +    {
    1.17 +        PERROR("Could not alloc memory for Xen hypercall");
    1.18 +        goto out;
    1.19 +    }
    1.20      domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
    1.21      domctl.domain = (domid_t)domid;
    1.22      domctl.u.vcpuaffinity.vcpu    = vcpu;
    1.23  
    1.24 -    bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
    1.25 +    bitmap_64_to_byte(local, cpumap, cpusize * 8);
    1.26  
    1.27      set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
    1.28  
    1.29 -    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
    1.30 +    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
    1.31      
    1.32 -    if ( lock_pages(local, sizeof(local)) != 0 )
    1.33 +    if ( lock_pages(local, cpusize) != 0 )
    1.34      {
    1.35          PERROR("Could not lock memory for Xen hypercall");
    1.36          goto out;
    1.37 @@ -122,9 +127,10 @@ int xc_vcpu_setaffinity(int xc_handle,
    1.38  
    1.39      ret = do_domctl(xc_handle, &domctl);
    1.40  
    1.41 -    unlock_pages(local, sizeof(local));
    1.42 +    unlock_pages(local, cpusize);
    1.43  
    1.44   out:
    1.45 +    free(local);
    1.46      return ret;
    1.47  }
    1.48  
    1.49 @@ -132,18 +138,26 @@ int xc_vcpu_setaffinity(int xc_handle,
    1.50  int xc_vcpu_getaffinity(int xc_handle,
    1.51                          uint32_t domid,
    1.52                          int vcpu,
    1.53 -                        uint64_t *cpumap)
    1.54 +                        uint64_t *cpumap,
    1.55 +                        int cpusize)
    1.56  {
    1.57      DECLARE_DOMCTL;
    1.58      int ret = -1;
    1.59 -    uint8_t local[sizeof (cpumap)];
    1.60 +    uint8_t * local = malloc(cpusize);
    1.61 +
    1.62 +    if(local == NULL)
    1.63 +    {
    1.64 +        PERROR("Could not alloc memory for Xen hypercall");
    1.65 +        goto out;
    1.66 +    }
    1.67  
    1.68      domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
    1.69      domctl.domain = (domid_t)domid;
    1.70      domctl.u.vcpuaffinity.vcpu = vcpu;
    1.71  
    1.72 +
    1.73      set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
    1.74 -    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
    1.75 +    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
    1.76      
    1.77      if ( lock_pages(local, sizeof(local)) != 0 )
    1.78      {
    1.79 @@ -154,8 +168,9 @@ int xc_vcpu_getaffinity(int xc_handle,
    1.80      ret = do_domctl(xc_handle, &domctl);
    1.81  
    1.82      unlock_pages(local, sizeof (local));
    1.83 -    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
    1.84 - out:
    1.85 +    bitmap_byte_to_64(cpumap, local, cpusize * 8);
    1.86 +out:
    1.87 +    free(local);
    1.88      return ret;
    1.89  }
    1.90  
     2.1 --- a/tools/libxc/xenctrl.h	Wed Mar 17 08:35:13 2010 +0000
     2.2 +++ b/tools/libxc/xenctrl.h	Wed Mar 17 09:17:27 2010 +0000
     2.3 @@ -309,11 +309,13 @@ int xc_domain_shutdown(int xc_handle,
     2.4  int xc_vcpu_setaffinity(int xc_handle,
     2.5                          uint32_t domid,
     2.6                          int vcpu,
     2.7 -                        uint64_t cpumap);
     2.8 +                        uint64_t *cpumap,
     2.9 +                        int cpusize);
    2.10  int xc_vcpu_getaffinity(int xc_handle,
    2.11                          uint32_t domid,
    2.12                          int vcpu,
    2.13 -                        uint64_t *cpumap);
    2.14 +                        uint64_t *cpumap,
    2.15 +                        int cpusize);
    2.16  
    2.17  /**
    2.18   * This function will return information about one or more domains. It is
     3.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 08:35:13 2010 +0000
     3.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 09:17:27 2010 +0000
     3.3 @@ -215,35 +215,54 @@ static PyObject *pyxc_vcpu_setaffinity(X
     3.4  {
     3.5      uint32_t dom;
     3.6      int vcpu = 0, i;
     3.7 -    uint64_t  cpumap = ~0ULL;
     3.8 +    uint64_t  *cpumap;
     3.9      PyObject *cpulist = NULL;
    3.10 +    int nr_cpus, size;
    3.11 +    xc_physinfo_t info; 
    3.12 +    xc_cpu_to_node_t map[1];
    3.13 +    uint64_t cpumap_size = sizeof(cpumap); 
    3.14  
    3.15      static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
    3.16 +    
    3.17  
    3.18      if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|iO", kwd_list, 
    3.19                                        &dom, &vcpu, &cpulist) )
    3.20          return NULL;
    3.21  
    3.22 +    set_xen_guest_handle(info.cpu_to_node, map);
    3.23 +    info.max_cpu_id = 1;
    3.24 +    if ( xc_physinfo(self->xc_handle, &info) != 0 )
    3.25 +        return pyxc_error_to_exception();
    3.26 +  
    3.27 +    nr_cpus = info.nr_cpus;
    3.28 +
    3.29 +    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
    3.30 +    cpumap = malloc(cpumap_size * size);
    3.31 +    if(cpumap == NULL)
    3.32 +        return pyxc_error_to_exception();
    3.33 +    
    3.34 +
    3.35      if ( (cpulist != NULL) && PyList_Check(cpulist) )
    3.36      {
    3.37 -        cpumap = 0ULL;
    3.38 +        for ( i = 0; i < size; i++)
    3.39 +        {
    3.40 +            cpumap[i] = 0ULL;
    3.41 +        }
    3.42          for ( i = 0; i < PyList_Size(cpulist); i++ ) 
    3.43          {
    3.44              long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
    3.45 -            if ( cpu >= 64 )
    3.46 -            {
    3.47 -                errno = EINVAL;
    3.48 -                PyErr_SetFromErrno(xc_error_obj);
    3.49 -                return NULL;
    3.50 -            }
    3.51 -            cpumap |= (uint64_t)1 << cpu;
    3.52 +            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % (cpumap_size * 8));
    3.53          }
    3.54      }
    3.55    
    3.56 -    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
    3.57 +    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
    3.58 +    {
    3.59 +        free(cpumap);
    3.60          return pyxc_error_to_exception();
    3.61 -    
    3.62 +    }
    3.63 +
    3.64      Py_INCREF(zero);
    3.65 +    free(cpumap); 
    3.66      return zero;
    3.67  }
    3.68  
    3.69 @@ -362,7 +381,11 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
    3.70      uint32_t dom, vcpu = 0;
    3.71      xc_vcpuinfo_t info;
    3.72      int rc, i;
    3.73 -    uint64_t cpumap;
    3.74 +    uint64_t *cpumap;
    3.75 +    int nr_cpus, size;
    3.76 +    xc_physinfo_t pinfo = { 0 };
    3.77 +    xc_cpu_to_node_t map[1];
    3.78 +    uint64_t cpumap_size = sizeof(cpumap);
    3.79  
    3.80      static char *kwd_list[] = { "domid", "vcpu", NULL };
    3.81      
    3.82 @@ -370,12 +393,25 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
    3.83                                        &dom, &vcpu) )
    3.84          return NULL;
    3.85  
    3.86 +    set_xen_guest_handle(pinfo.cpu_to_node, map);
    3.87 +    pinfo.max_cpu_id = 1;
    3.88 +    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
    3.89 +        return pyxc_error_to_exception();
    3.90 +    nr_cpus = pinfo.nr_cpus;
    3.91      rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
    3.92      if ( rc < 0 )
    3.93          return pyxc_error_to_exception();
    3.94 -    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
    3.95 +    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
    3.96 +
    3.97 +    if((cpumap = malloc(cpumap_size * size)) == NULL)
    3.98 +        return pyxc_error_to_exception(); 
    3.99 +
   3.100 +    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
   3.101      if ( rc < 0 )
   3.102 +    {
   3.103 +        free(cpumap);
   3.104          return pyxc_error_to_exception();
   3.105 +    }
   3.106  
   3.107      info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
   3.108                                "online",   info.online,
   3.109 @@ -385,17 +421,18 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
   3.110                                "cpu",      info.cpu);
   3.111  
   3.112      cpulist = PyList_New(0);
   3.113 -    for ( i = 0; cpumap != 0; i++ )
   3.114 +    for ( i = 0; i < size * cpumap_size * 8; i++ )
   3.115      {
   3.116 -        if ( cpumap & 1 ) {
   3.117 +        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
   3.118              PyObject *pyint = PyInt_FromLong(i);
   3.119              PyList_Append(cpulist, pyint);
   3.120              Py_DECREF(pyint);
   3.121          }
   3.122 -        cpumap >>= 1;
   3.123 +        *(cpumap + i / (cpumap_size * 8)) >>= 1;
   3.124      }
   3.125      PyDict_SetItemString(info_dict, "cpumap", cpulist);
   3.126      Py_DECREF(cpulist);
   3.127 +    free(cpumap);
   3.128      return info_dict;
   3.129  }
   3.130