debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 20667:f9392f6eda79

tools: improve NUMA guest placement when ballooning

the "guest to a single NUMA node" constrain algorithm does not work
well when we do ballooning. Ballooning and NUMA don't play together
anyway, as Dom0 and thus ballooning is not NUMA aware, I am working on
this but it will not be ready for the Xen 4.0 release window. The
usual ballooning situation will result in an empty candidate list, as
no node has enough free memory to host the guest. In this case the
code will simply pick the first node: again and again, because all
nodes without enough memory will be ultimately penalized with the same
maxint value (regardless of the actual load). The attached patch will
change this to use a relative penalty in case of not-enough memory, so
that low-load low-memory nodes will be used at one point. A half
loaded node has shown to be a good value, as an unbalanced system is
much worse than non-local memory access for guests. Regardless of
that you should restrict the Dom0 on a NUMA system to a reasonable
memory size, so that ballooning is not necessary most of the time. In
this case the guest's memory will be NUMA local.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 11 08:59:54 2009 +0000 (2009-12-11)
parents 2e5032921b07
children 73ff2d056e36
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import stat
35 import traceback
36 from types import StringTypes
38 import xen.lowlevel.xc
39 from xen.util import asserts, auxbin
40 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
41 import xen.util.xsm.xsm as security
42 from xen.util import xsconstants
43 from xen.util import mkdir
44 from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
45 pci_dict_to_bdf_str, pci_dict_to_xc_str, \
46 pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
47 pci_dict_cmp, PCI_DEVFN, PCI_SLOT, PCI_FUNC, parse_hex
49 from xen.xend import balloon, sxp, uuid, image, arch
50 from xen.xend import XendOptions, XendNode, XendConfig
52 from xen.xend.XendConfig import scrub_password
53 from xen.xend.XendBootloader import bootloader, bootloader_tidy
54 from xen.xend.XendError import XendError, VmError
55 from xen.xend.XendDevices import XendDevices
56 from xen.xend.XendTask import XendTask
57 from xen.xend.xenstore.xstransact import xstransact, complete
58 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
59 from xen.xend.xenstore.xswatch import xswatch
60 from xen.xend.XendConstants import *
61 from xen.xend.XendAPIConstants import *
62 from xen.xend.server.DevConstants import xenbusState
63 from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString
65 from xen.xend.XendVMMetrics import XendVMMetrics
67 from xen.xend import XendAPIStore
68 from xen.xend.XendPPCI import XendPPCI
69 from xen.xend.XendDPCI import XendDPCI
70 from xen.xend.XendPSCSI import XendPSCSI
71 from xen.xend.XendDSCSI import XendDSCSI, XendDSCSI_HBA
73 MIGRATE_TIMEOUT = 30.0
74 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
76 xc = xen.lowlevel.xc.xc()
77 xoptions = XendOptions.instance()
79 log = logging.getLogger("xend.XendDomainInfo")
80 #log.setLevel(logging.TRACE)
83 def create(config):
84 """Creates and start a VM using the supplied configuration.
86 @param config: A configuration object involving lists of tuples.
87 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
89 @rtype: XendDomainInfo
90 @return: An up and running XendDomainInfo instance
91 @raise VmError: Invalid configuration or failure to start.
92 """
93 from xen.xend import XendDomain
94 domconfig = XendConfig.XendConfig(sxp_obj = config)
95 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
96 if othervm is None or othervm.domid is None:
97 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
98 if othervm is not None and othervm.domid is not None:
99 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
100 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
101 vm = XendDomainInfo(domconfig)
102 try:
103 vm.start()
104 except:
105 log.exception('Domain construction failed')
106 vm.destroy()
107 raise
109 return vm
111 def create_from_dict(config_dict):
112 """Creates and start a VM using the supplied configuration.
114 @param config_dict: An configuration dictionary.
116 @rtype: XendDomainInfo
117 @return: An up and running XendDomainInfo instance
118 @raise VmError: Invalid configuration or failure to start.
119 """
121 log.debug("XendDomainInfo.create_from_dict(%s)",
122 scrub_password(config_dict))
123 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
124 try:
125 vm.start()
126 except:
127 log.exception('Domain construction failed')
128 vm.destroy()
129 raise
130 return vm
132 def recreate(info, priv):
133 """Create the VM object for an existing domain. The domain must not
134 be dying, as the paths in the store should already have been removed,
135 and asking us to recreate them causes problems.
137 @param xeninfo: Parsed configuration
138 @type xeninfo: Dictionary
139 @param priv: Is a privileged domain (Dom 0)
140 @type priv: bool
142 @rtype: XendDomainInfo
143 @return: A up and running XendDomainInfo instance
144 @raise VmError: Invalid configuration.
145 @raise XendError: Errors with configuration.
146 """
148 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
150 assert not info['dying']
152 xeninfo = XendConfig.XendConfig(dominfo = info)
153 xeninfo['is_control_domain'] = priv
154 xeninfo['is_a_template'] = False
155 xeninfo['auto_power_on'] = False
156 domid = xeninfo['domid']
157 uuid1 = uuid.fromString(xeninfo['uuid'])
158 needs_reinitialising = False
160 dompath = GetDomainPath(domid)
161 if not dompath:
162 raise XendError('No domain path in store for existing '
163 'domain %d' % domid)
165 log.info("Recreating domain %d, UUID %s. at %s" %
166 (domid, xeninfo['uuid'], dompath))
168 # need to verify the path and uuid if not Domain-0
169 # if the required uuid and vm aren't set, then that means
170 # we need to recreate the dom with our own values
171 #
172 # NOTE: this is probably not desirable, really we should just
173 # abort or ignore, but there may be cases where xenstore's
174 # entry disappears (eg. xenstore-rm /)
175 #
176 try:
177 vmpath = xstransact.Read(dompath, "vm")
178 if not vmpath:
179 if not priv:
180 log.warn('/local/domain/%d/vm is missing. recreate is '
181 'confused, trying our best to recover' % domid)
182 needs_reinitialising = True
183 raise XendError('reinit')
185 uuid2_str = xstransact.Read(vmpath, "uuid")
186 if not uuid2_str:
187 log.warn('%s/uuid/ is missing. recreate is confused, '
188 'trying our best to recover' % vmpath)
189 needs_reinitialising = True
190 raise XendError('reinit')
192 uuid2 = uuid.fromString(uuid2_str)
193 if uuid1 != uuid2:
194 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
195 'Trying out best to recover' % domid)
196 needs_reinitialising = True
197 except XendError:
198 pass # our best shot at 'goto' in python :)
200 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
201 vmpath = vmpath)
203 if needs_reinitialising:
204 vm._recreateDom()
205 vm._removeVm()
206 vm._storeVmDetails()
207 vm._storeDomDetails()
209 vm.image = image.create(vm, vm.info)
210 vm.image.recreate()
212 vm._registerWatches()
213 vm.refreshShutdown(xeninfo)
215 # register the domain in the list
216 from xen.xend import XendDomain
217 XendDomain.instance().add_domain(vm)
219 return vm
222 def restore(config):
223 """Create a domain and a VM object to do a restore.
225 @param config: Domain SXP configuration
226 @type config: list of lists. (see C{create})
228 @rtype: XendDomainInfo
229 @return: A up and running XendDomainInfo instance
230 @raise VmError: Invalid configuration or failure to start.
231 @raise XendError: Errors with configuration.
232 """
234 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
235 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
236 resume = True)
237 try:
238 vm.resume()
239 return vm
240 except:
241 vm.destroy()
242 raise
244 def createDormant(domconfig):
245 """Create a dormant/inactive XenDomainInfo without creating VM.
246 This is for creating instances of persistent domains that are not
247 yet start.
249 @param domconfig: Parsed configuration
250 @type domconfig: XendConfig object
252 @rtype: XendDomainInfo
253 @return: A up and running XendDomainInfo instance
254 @raise XendError: Errors with configuration.
255 """
257 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
259 # domid does not make sense for non-running domains.
260 domconfig.pop('domid', None)
261 vm = XendDomainInfo(domconfig)
262 return vm
264 def domain_by_name(name):
265 """Get domain by name
267 @params name: Name of the domain
268 @type name: string
269 @return: XendDomainInfo or None
270 """
271 from xen.xend import XendDomain
272 return XendDomain.instance().domain_lookup_by_name_nr(name)
275 def shutdown_reason(code):
276 """Get a shutdown reason from a code.
278 @param code: shutdown code
279 @type code: int
280 @return: shutdown reason
281 @rtype: string
282 """
283 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
285 def dom_get(dom):
286 """Get info from xen for an existing domain.
288 @param dom: domain id
289 @type dom: int
290 @return: info or None
291 @rtype: dictionary
292 """
293 try:
294 domlist = xc.domain_getinfo(dom, 1)
295 if domlist and dom == domlist[0]['domid']:
296 return domlist[0]
297 except Exception, err:
298 # ignore missing domain
299 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
300 return None
302 from xen.xend.server.pciif import parse_pci_name, PciDevice,\
303 get_assigned_pci_devices, get_all_assigned_pci_devices
306 def do_FLR(domid, is_hvm):
307 dev_str_list = get_assigned_pci_devices(domid)
309 for dev_str in dev_str_list:
310 try:
311 dev = PciDevice(parse_pci_name(dev_str))
312 except Exception, e:
313 raise VmError("pci: failed to locate device and "+
314 "parse it's resources - "+str(e))
315 dev.do_FLR(is_hvm, xoptions.get_pci_dev_assign_strict_check())
317 class XendDomainInfo:
318 """An object represents a domain.
320 @TODO: try to unify dom and domid, they mean the same thing, but
321 xc refers to it as dom, and everywhere else, including
322 xenstore it is domid. The best way is to change xc's
323 python interface.
325 @ivar info: Parsed configuration
326 @type info: dictionary
327 @ivar domid: Domain ID (if VM has started)
328 @type domid: int or None
329 @ivar guest_bitsize: the bitsize of guest
330 @type guest_bitsize: int or None
331 @ivar alloc_mem: the memory domain allocated when booting
332 @type alloc_mem: int or None
333 @ivar vmpath: XenStore path to this VM.
334 @type vmpath: string
335 @ivar dompath: XenStore path to this Domain.
336 @type dompath: string
337 @ivar image: Reference to the VM Image.
338 @type image: xen.xend.image.ImageHandler
339 @ivar store_port: event channel to xenstored
340 @type store_port: int
341 @ivar console_port: event channel to xenconsoled
342 @type console_port: int
343 @ivar store_mfn: xenstored mfn
344 @type store_mfn: int
345 @ivar console_mfn: xenconsoled mfn
346 @type console_mfn: int
347 @ivar notes: OS image notes
348 @type notes: dictionary
349 @ivar vmWatch: reference to a watch on the xenstored vmpath
350 @type vmWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
352 @type shutdownWatch: xen.xend.xenstore.xswatch
353 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
354 @type shutdownStartTime: float or None
355 @ivar restart_in_progress: Is a domain restart thread running?
356 @type restart_in_progress: bool
357 # @ivar state: Domain state
358 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
359 @ivar state_updated: lock for self.state
360 @type state_updated: threading.Condition
361 @ivar refresh_shutdown_lock: lock for polling shutdown state
362 @type refresh_shutdown_lock: threading.Condition
363 @ivar _deviceControllers: device controller cache for this domain
364 @type _deviceControllers: dict 'string' to DevControllers
365 """
367 def __init__(self, info, domid = None, dompath = None, augment = False,
368 priv = False, resume = False, vmpath = None):
369 """Constructor for a domain
371 @param info: parsed configuration
372 @type info: dictionary
373 @keyword domid: Set initial domain id (if any)
374 @type domid: int
375 @keyword dompath: Set initial dompath (if any)
376 @type dompath: string
377 @keyword augment: Augment given info with xenstored VM info
378 @type augment: bool
379 @keyword priv: Is a privileged domain (Dom 0)
380 @type priv: bool
381 @keyword resume: Is this domain being resumed?
382 @type resume: bool
383 """
385 self.info = info
386 if domid == None:
387 self.domid = self.info.get('domid')
388 else:
389 self.domid = domid
390 self.guest_bitsize = None
391 self.alloc_mem = None
393 #REMOVE: uuid is now generated in XendConfig
394 #if not self._infoIsSet('uuid'):
395 # self.info['uuid'] = uuid.toString(uuid.create())
397 # Find a unique /vm/<uuid>/<integer> path if not specified.
398 # This avoids conflict between pre-/post-migrate domains when doing
399 # localhost relocation.
400 self.vmpath = vmpath
401 i = 0
402 while self.vmpath == None:
403 self.vmpath = XS_VMROOT + self.info['uuid']
404 if i != 0:
405 self.vmpath = self.vmpath + '-' + str(i)
406 try:
407 if self._readVm("uuid"):
408 self.vmpath = None
409 i = i + 1
410 except:
411 pass
413 self.dompath = dompath
415 self.image = None
416 self.store_port = None
417 self.store_mfn = None
418 self.console_port = None
419 self.console_mfn = None
421 self.native_protocol = None
423 self.vmWatch = None
424 self.shutdownWatch = None
425 self.shutdownStartTime = None
426 self._resume = resume
427 self.restart_in_progress = False
429 self.state_updated = threading.Condition()
430 self.refresh_shutdown_lock = threading.Condition()
431 self._stateSet(DOM_STATE_HALTED)
433 self._deviceControllers = {}
435 for state in DOM_STATES_OLD:
436 self.info[state] = 0
438 if augment:
439 self._augmentInfo(priv)
441 self._checkName(self.info['name_label'])
443 self.metrics = XendVMMetrics(uuid.createString(), self)
446 #
447 # Public functions available through XMLRPC
448 #
451 def start(self, is_managed = False):
452 """Attempts to start the VM by do the appropriate
453 initialisation if it not started.
454 """
455 from xen.xend import XendDomain
457 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
458 try:
459 XendTask.log_progress(0, 30, self._constructDomain)
460 XendTask.log_progress(31, 60, self._initDomain)
462 XendTask.log_progress(61, 70, self._storeVmDetails)
463 XendTask.log_progress(71, 80, self._storeDomDetails)
464 XendTask.log_progress(81, 90, self._registerWatches)
465 XendTask.log_progress(91, 100, self.refreshShutdown)
467 xendomains = XendDomain.instance()
469 # save running configuration if XendDomains believe domain is
470 # persistent
471 if is_managed:
472 xendomains.managed_config_save(self)
473 except:
474 log.exception('VM start failed')
475 self.destroy()
476 raise
477 else:
478 raise XendError('VM already running')
480 def resume(self):
481 """Resumes a domain that has come back from suspension."""
482 state = self._stateGet()
483 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
484 try:
485 self._constructDomain()
487 try:
488 self._setCPUAffinity()
489 except:
490 # usually a CPU we want to set affinity to does not exist
491 # we just ignore it so that the domain can still be restored
492 log.warn("Cannot restore CPU affinity")
494 self._setSchedParams()
495 self._storeVmDetails()
496 self._createChannels()
497 self._createDevices()
498 self._storeDomDetails()
499 self._endRestore()
500 except:
501 log.exception('VM resume failed')
502 self.destroy()
503 raise
504 else:
505 raise XendError('VM is not suspended; it is %s'
506 % XEN_API_VM_POWER_STATE[state])
508 def shutdown(self, reason):
509 """Shutdown a domain by signalling this via xenstored."""
510 log.debug('XendDomainInfo.shutdown(%s)', reason)
511 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
512 raise XendError('Domain cannot be shutdown')
514 if self.domid == 0:
515 raise XendError('Domain 0 cannot be shutdown')
517 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
518 raise XendError('Invalid reason: %s' % reason)
519 self.storeDom("control/shutdown", reason)
521 # HVM domain shuts itself down only if it has PV drivers
522 if self.info.is_hvm():
523 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
524 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
525 if not hvm_pvdrv or hvm_s_state != 0:
526 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
527 log.info("HVM save:remote shutdown dom %d!", self.domid)
528 xc.domain_shutdown(self.domid, code)
530 def pause(self):
531 """Pause domain
533 @raise XendError: Failed pausing a domain
534 """
535 try:
536 if(self.domid):
537 # get all blktap2 devices
538 dev = xstransact.List(self.vmpath + '/device/tap2')
539 for x in dev:
540 path = self.getDeviceController('tap2').readBackend(x, 'params')
541 if path and path.startswith(TAPDISK_DEVICE):
542 try:
543 _minor, _dev, ctrl = parseDeviceString(path)
544 #pause the disk
545 f = open(ctrl + '/pause', 'w')
546 f.write('pause');
547 f.close()
548 except:
549 pass
550 except Exception, ex:
551 log.warn('Could not pause blktap disk.');
553 try:
554 xc.domain_pause(self.domid)
555 self._stateSet(DOM_STATE_PAUSED)
556 except Exception, ex:
557 log.exception(ex)
558 raise XendError("Domain unable to be paused: %s" % str(ex))
560 def unpause(self):
561 """Unpause domain
563 @raise XendError: Failed unpausing a domain
564 """
565 try:
566 if(self.domid):
567 dev = xstransact.List(self.vmpath + '/device/tap2')
568 for x in dev:
569 path = self.getDeviceController('tap2').readBackend(x, 'params')
570 if path and path.startswith(TAPDISK_DEVICE):
571 try:
572 #Figure out the sysfs path.
573 _minor, _dev, ctrl = parseDeviceString(path)
574 #unpause the disk
575 if(os.path.exists(ctrl + '/resume')):
576 f = open(ctrl + '/resume', 'w');
577 f.write('resume');
578 f.close();
579 except:
580 pass
582 except Exception, ex:
583 log.warn('Could not unpause blktap disk: %s' % str(ex));
585 try:
586 xc.domain_unpause(self.domid)
587 self._stateSet(DOM_STATE_RUNNING)
588 except Exception, ex:
589 log.exception(ex)
590 raise XendError("Domain unable to be unpaused: %s" % str(ex))
592 def send_sysrq(self, key):
593 """ Send a Sysrq equivalent key via xenstored."""
594 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
595 raise XendError("Domain '%s' is not started" % self.info['name_label'])
597 asserts.isCharConvertible(key)
598 self.storeDom("control/sysrq", '%c' % key)
600 def pci_device_configure_boot(self):
602 if not self.info.is_hvm():
603 return
605 devid = '0'
606 first = True
607 dev_info = self._getDeviceInfo_pci(devid)
608 if dev_info is None:
609 return
611 # get the virtual slot info from xenstore
612 dev_uuid = sxp.child_value(dev_info, 'uuid')
613 pci_conf = self.info['devices'][dev_uuid][1]
614 pci_devs = pci_conf['devs']
616 # Keep a set of keys that are done rather than
617 # just itterating through set(map(..., pci_devs))
618 # to preserve any order information present.
619 done = set()
620 for key in map(lambda x: x['key'], pci_devs):
621 if key in done:
622 continue
623 done |= set([key])
624 dev = filter(lambda x: x['key'] == key, pci_devs)
626 head_dev = dev.pop()
627 dev_sxp = pci_convert_dict_to_sxp(head_dev, 'Initialising',
628 'Booting')
629 self.pci_device_configure(dev_sxp, first_dev = first)
630 first = False
632 # That is all for single-function virtual devices
633 if len(dev) == 0:
634 continue
636 if int(head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
637 new_dev_info = self._getDeviceInfo_pci(devid)
638 if new_dev_info is None:
639 continue
640 new_dev_uuid = sxp.child_value(new_dev_info, 'uuid')
641 new_pci_conf = self.info['devices'][new_dev_uuid][1]
642 new_pci_devs = new_pci_conf['devs']
644 new_head_dev = filter(lambda x: pci_dict_cmp(x, head_dev),
645 new_pci_devs)[0]
647 if int(new_head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
648 continue
650 vdevfn = PCI_SLOT(int(new_head_dev['vdevfn'], 16))
651 new_dev = []
652 for i in dev:
653 i['vdevfn'] = '0x%02x' % \
654 PCI_DEVFN(vdevfn,
655 PCI_FUNC(int(i['vdevfn'], 16)))
656 new_dev.append(i)
658 dev = new_dev
660 for i in dev:
661 dev_sxp = pci_convert_dict_to_sxp(i, 'Initialising', 'Booting')
662 self.pci_device_configure(dev_sxp)
664 def hvm_pci_device_create(self, dev_config):
665 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
666 % scrub_password(dev_config))
668 if not self.info.is_hvm():
669 raise VmError("hvm_pci_device_create called on non-HVM guest")
671 #all the PCI devs share one conf node
672 devid = '0'
674 new_dev = dev_config['devs'][0]
675 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
677 #check conflict before trigger hotplug event
678 if dev_info is not None:
679 dev_uuid = sxp.child_value(dev_info, 'uuid')
680 pci_conf = self.info['devices'][dev_uuid][1]
681 pci_devs = pci_conf['devs']
682 for x in pci_devs:
683 if (int(x['vdevfn'], 16) == int(new_dev['vdevfn'], 16) and
684 not int(x['vdevfn'], 16) & AUTO_PHP_SLOT):
685 raise VmError("vdevfn %s already have a device." %
686 (new_dev['vdevfn']))
688 if (pci_dict_cmp(x, new_dev)):
689 raise VmError("device is already inserted")
691 # Test whether the devices can be assigned.
692 self.pci_device_check_attachability(new_dev)
694 return self.hvm_pci_device_insert_dev(new_dev)
696 def pci_device_check_attachability(self, new_dev):
697 # Test whether the devices can be assigned
699 pci_name = pci_dict_to_bdf_str(new_dev)
700 _all_assigned_pci_devices = get_all_assigned_pci_devices(self.domid)
701 if pci_name in _all_assigned_pci_devices:
702 raise VmError("failed to assign device %s that has"
703 " already been assigned to other domain." % pci_name)
705 # Test whether the device is owned by pciback or pci-stub.
706 try:
707 pci_device = PciDevice(new_dev)
708 except Exception, e:
709 raise VmError("pci: failed to locate device and "+
710 "parse its resources - "+str(e))
711 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
712 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
713 %pci_device.name)
715 strict_check = xoptions.get_pci_dev_assign_strict_check()
716 # Check non-page-aligned MMIO BAR.
717 if pci_device.has_non_page_aligned_bar and strict_check:
718 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
719 pci_device.name)
721 # PV guest has less checkings.
722 if not self.info.is_hvm():
723 return
725 if not strict_check:
726 return
728 # Check if there is intermediate PCIe switch bewteen the device and
729 # Root Complex.
730 if pci_device.is_behind_switch_lacking_acs():
731 err_msg = 'pci: to avoid potential security issue, %s is not'+\
732 ' allowed to be assigned to guest since it is behind'+\
733 ' PCIe switch that does not support or enable ACS.'
734 raise VmError(err_msg % pci_device.name)
736 # Check the co-assignment.
737 # To pci-attach a device D to domN, we should ensure each of D's
738 # co-assignment devices hasn't been assigned, or has been assigned to
739 # domN.
740 coassignment_list = pci_device.find_coassigned_devices()
741 pci_device.devs_check_driver(coassignment_list)
742 assigned_pci_device_str_list = self._get_assigned_pci_devices()
743 for pci_str in coassignment_list:
744 if not (pci_str in _all_assigned_pci_devices):
745 continue
746 if not pci_str in assigned_pci_device_str_list:
747 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
748 " because one of its co-assignment device %s has been" + \
749 " assigned to other domain." \
750 )% (pci_device.name, self.info['name_label'], pci_str))
752 def hvm_pci_device_insert(self, dev_config):
753 log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
754 % scrub_password(dev_config))
756 if not self.info.is_hvm():
757 raise VmError("hvm_pci_device_create called on non-HVM guest")
759 new_dev = dev_config['devs'][0]
761 return self.hvm_pci_device_insert_dev(new_dev)
763 def hvm_pci_device_insert_dev(self, new_dev):
764 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
765 % scrub_password(new_dev))
767 if self.domid is not None:
768 opts = ''
769 if new_dev.has_key('opts'):
770 opts = ',' + serialise_pci_opts(new_dev['opts'])
772 bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
773 int(new_dev['vdevfn'], 16), opts)
774 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
775 bdf = xc.assign_device(self.domid, pci_dict_to_xc_str(new_dev))
776 if bdf > 0:
777 raise VmError("Failed to assign device to IOMMU (%s)" % bdf_str)
778 log.debug("pci: assign device %s" % bdf_str)
779 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
781 vdevfn = xstransact.Read("/local/domain/0/device-model/%i/parameter"
782 % self.getDomid())
783 try:
784 vdevfn_int = int(vdevfn, 16)
785 except ValueError:
786 raise VmError(("Cannot pass-through PCI function '%s'. " +
787 "Device model reported an error: %s") %
788 (bdf_str, vdevfn))
789 else:
790 vdevfn = new_dev['vdevfn']
792 return vdevfn
795 def device_create(self, dev_config):
796 """Create a new device.
798 @param dev_config: device configuration
799 @type dev_config: SXP object (parsed config)
800 """
801 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
802 dev_type = sxp.name(dev_config)
803 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
804 dev_config_dict = self.info['devices'][dev_uuid][1]
805 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
807 if dev_type == 'vif':
808 for x in dev_config:
809 if x != 'vif' and x[0] == 'mac':
810 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
811 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
812 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
814 if self.domid is not None:
815 try:
816 dev_config_dict['devid'] = devid = \
817 self._createDevice(dev_type, dev_config_dict)
818 if dev_type == 'tap2':
819 # createDevice may create a blktap1 device if blktap2 is not
820 # installed or if the blktap driver is not supported in
821 # blktap1
822 dev_type = self.getBlockDeviceClass(devid)
823 self._waitForDevice(dev_type, devid)
824 except VmError, ex:
825 del self.info['devices'][dev_uuid]
826 if dev_type == 'pci':
827 for dev in dev_config_dict['devs']:
828 XendAPIStore.deregister(dev['uuid'], 'DPCI')
829 elif dev_type == 'vscsi':
830 for dev in dev_config_dict['devs']:
831 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
832 elif dev_type == 'tap' or dev_type == 'tap2':
833 self.info['vbd_refs'].remove(dev_uuid)
834 else:
835 self.info['%s_refs' % dev_type].remove(dev_uuid)
836 raise ex
837 else:
838 devid = None
840 xen.xend.XendDomain.instance().managed_config_save(self)
841 return self.getDeviceController(dev_type).sxpr(devid)
844 def pci_device_configure(self, dev_sxp, devid = 0, first_dev = False):
845 """Configure an existing pci device.
847 @param dev_sxp: device configuration
848 @type dev_sxp: SXP object (parsed config)
849 @param devid: device id
850 @type devid: int
851 @return: Returns True if successfully updated device
852 @rtype: boolean
853 """
854 log.debug("XendDomainInfo.pci_device_configure: %s"
855 % scrub_password(dev_sxp))
857 dev_class = sxp.name(dev_sxp)
859 if dev_class != 'pci':
860 return False
862 pci_state = sxp.child_value(dev_sxp, 'state')
863 pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
864 existing_dev_info = self._getDeviceInfo_pci(devid)
866 if existing_dev_info is None and pci_state != 'Initialising':
867 raise XendError("Cannot detach when pci platform does not exist")
869 pci_dev = sxp.children(dev_sxp, 'dev')[0]
870 dev_config = pci_convert_sxp_to_dict(dev_sxp)
871 dev = dev_config['devs'][0]
873 stubdomid = self.getStubdomDomid()
874 # Do HVM specific processing
875 if self.info.is_hvm():
876 from xen.xend import XendDomain
877 if pci_state == 'Initialising':
878 if stubdomid is not None :
879 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
881 # HVM PCI device attachment
882 if pci_sub_state == 'Booting':
883 vdevfn = self.hvm_pci_device_insert(dev_config)
884 else:
885 vdevfn = self.hvm_pci_device_create(dev_config)
886 # Update vdevfn
887 dev['vdevfn'] = vdevfn
888 for n in sxp.children(pci_dev):
889 if(n[0] == 'vdevfn'):
890 n[1] = vdevfn
891 else:
892 # HVM PCI device detachment
893 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
894 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
895 existing_pci_devs = existing_pci_conf['devs']
896 new_devs = filter(lambda x: pci_dict_cmp(x, dev),
897 existing_pci_devs)
898 if len(new_devs) < 0:
899 raise VmError("Device %s is not connected" %
900 pci_dict_to_bdf_str(dev))
901 new_dev = new_devs[0]
902 # Only tell qemu-dm to unplug function 0.
903 # When unplugging a function, all functions in the
904 # same vslot must be unplugged, and function 0 must
905 # be one of the functions present when a vslot is
906 # hot-plugged. Telling qemu-dm to unplug function 0
907 # also tells it to unplug all other functions in the
908 # same vslot.
909 if (PCI_FUNC(int(new_dev['vdevfn'], 16)) == 0):
910 self.hvm_destroyPCIDevice(new_dev)
911 if stubdomid is not None :
912 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
913 # Update vdevfn
914 dev['vdevfn'] = new_dev['vdevfn']
915 for n in sxp.children(pci_dev):
916 if(n[0] == 'vdevfn'):
917 n[1] = new_dev['vdevfn']
918 else:
919 # Do PV specific checking
920 if pci_state == 'Initialising':
921 # PV PCI device attachment
922 self.pci_device_check_attachability(dev)
924 # If pci platform does not exist, create and exit.
925 if existing_dev_info is None :
926 self.device_create(dev_sxp)
927 return True
929 if first_dev is True :
930 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
931 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
932 devid = self._createDevice('pci', existing_pci_conf)
933 self.info['devices'][existing_dev_uuid][1]['devid'] = devid
935 if self.domid is not None:
936 # use DevController.reconfigureDevice to change device config
937 dev_control = self.getDeviceController(dev_class)
938 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
939 if not self.info.is_hvm() and not self.info.is_stubdom():
940 # in PV case, wait until backend state becomes connected.
941 dev_control.waitForDevice_reconfigure(devid)
942 num_devs = dev_control.cleanupDevice(devid)
944 # update XendConfig with new device info
945 if dev_uuid:
946 new_dev_sxp = dev_control.configuration(devid)
947 self.info.device_update(dev_uuid, new_dev_sxp)
949 # If there is no device left, destroy pci and remove config.
950 if num_devs == 0:
951 if self.info.is_hvm():
952 self.destroyDevice('pci', devid, True)
953 else:
954 self.destroyDevice('pci', devid)
955 del self.info['devices'][dev_uuid]
956 else:
957 new_dev_sxp = ['pci']
958 for cur_dev in sxp.children(existing_dev_info, 'dev'):
959 if pci_state == 'Closing':
960 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
961 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
962 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
963 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
964 continue
965 new_dev_sxp.append(cur_dev)
967 if pci_state == 'Initialising' and pci_sub_state != 'Booting':
968 for new_dev in sxp.children(dev_sxp, 'dev'):
969 new_dev_sxp.append(new_dev)
971 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
972 self.info.device_update(dev_uuid, new_dev_sxp)
974 # If there is no device left, remove config.
975 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
976 del self.info['devices'][dev_uuid]
978 xen.xend.XendDomain.instance().managed_config_save(self)
980 return True
982 def vscsi_device_configure(self, dev_sxp):
983 """Configure an existing vscsi device.
984 quoted pci funciton
985 """
986 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
987 if not dev_info:
988 return False
989 for dev in sxp.children(dev_info, 'dev'):
990 if p_devs is not None:
991 if sxp.child_value(dev, 'p-dev') in p_devs:
992 return True
993 if v_devs is not None:
994 if sxp.child_value(dev, 'v-dev') in v_devs:
995 return True
996 return False
998 def _vscsi_be(be):
999 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
1000 if be_xdi is not None:
1001 be_domid = be_xdi.getDomid()
1002 if be_domid is not None:
1003 return str(be_domid)
1004 return str(be)
1006 dev_class = sxp.name(dev_sxp)
1007 if dev_class != 'vscsi':
1008 return False
1010 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
1011 devs = dev_config['devs']
1012 v_devs = [d['v-dev'] for d in devs]
1013 state = devs[0]['state']
1014 req_devid = int(devs[0]['devid'])
1015 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
1017 if state == xenbusState['Initialising']:
1018 # new create
1019 # If request devid does not exist, create and exit.
1020 p_devs = [d['p-dev'] for d in devs]
1021 for dev_type, dev_info in self.info.all_devices_sxpr():
1022 if dev_type != 'vscsi':
1023 continue
1024 if _is_vscsi_defined(dev_info, p_devs = p_devs):
1025 raise XendError('The physical device "%s" is already defined' % \
1026 p_devs[0])
1027 if cur_dev_sxp is None:
1028 self.device_create(dev_sxp)
1029 return True
1031 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1032 raise XendError('The virtual device "%s" is already defined' % \
1033 v_devs[0])
1035 if int(dev_config['feature-host']) != \
1036 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
1037 raise XendError('The physical device "%s" cannot define '
1038 'because mode is different' % devs[0]['p-dev'])
1040 new_be = dev_config.get('backend', None)
1041 if new_be is not None:
1042 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
1043 if cur_be is None:
1044 cur_be = xen.xend.XendDomain.DOM0_ID
1045 new_be_dom = _vscsi_be(new_be)
1046 cur_be_dom = _vscsi_be(cur_be)
1047 if new_be_dom != cur_be_dom:
1048 raise XendError('The physical device "%s" cannot define '
1049 'because backend is different' % devs[0]['p-dev'])
1051 elif state == xenbusState['Closing']:
1052 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1053 raise XendError("Cannot detach vscsi device does not exist")
1055 if self.domid is not None:
1056 # use DevController.reconfigureDevice to change device config
1057 dev_control = self.getDeviceController(dev_class)
1058 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
1059 dev_control.waitForDevice_reconfigure(req_devid)
1060 num_devs = dev_control.cleanupDevice(req_devid)
1062 # update XendConfig with new device info
1063 if dev_uuid:
1064 new_dev_sxp = dev_control.configuration(req_devid)
1065 self.info.device_update(dev_uuid, new_dev_sxp)
1067 # If there is no device left, destroy vscsi and remove config.
1068 if num_devs == 0:
1069 self.destroyDevice('vscsi', req_devid)
1070 del self.info['devices'][dev_uuid]
1072 else:
1073 new_dev_sxp = ['vscsi']
1074 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1075 new_dev_sxp.append(cur_mode)
1076 try:
1077 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1078 new_dev_sxp.append(cur_be)
1079 except IndexError:
1080 pass
1082 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1083 if state == xenbusState['Closing']:
1084 if int(cur_mode[1]) == 1:
1085 continue
1086 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1087 continue
1088 new_dev_sxp.append(cur_dev)
1090 if state == xenbusState['Initialising']:
1091 for new_dev in sxp.children(dev_sxp, 'dev'):
1092 new_dev_sxp.append(new_dev)
1094 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1095 self.info.device_update(dev_uuid, new_dev_sxp)
1097 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1098 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1099 del self.info['devices'][dev_uuid]
1101 xen.xend.XendDomain.instance().managed_config_save(self)
1103 return True
1105 def vusb_device_configure(self, dev_sxp, devid):
1106 """Configure a virtual root port.
1107 """
1108 dev_class = sxp.name(dev_sxp)
1109 if dev_class != 'vusb':
1110 return False
1112 dev_config = {}
1113 ports = sxp.child(dev_sxp, 'port')
1114 for port in ports[1:]:
1115 try:
1116 num, bus = port
1117 dev_config['port-%i' % int(num)] = str(bus)
1118 except TypeError:
1119 pass
1121 dev_control = self.getDeviceController(dev_class)
1122 dev_control.reconfigureDevice(devid, dev_config)
1124 return True
1126 def device_configure(self, dev_sxp, devid = None):
1127 """Configure an existing device.
1129 @param dev_config: device configuration
1130 @type dev_config: SXP object (parsed config)
1131 @param devid: device id
1132 @type devid: int
1133 @return: Returns True if successfully updated device
1134 @rtype: boolean
1135 """
1137 # convert device sxp to a dict
1138 dev_class = sxp.name(dev_sxp)
1139 dev_config = {}
1141 if dev_class == 'pci':
1142 return self.pci_device_configure(dev_sxp)
1144 if dev_class == 'vscsi':
1145 return self.vscsi_device_configure(dev_sxp)
1147 if dev_class == 'vusb':
1148 return self.vusb_device_configure(dev_sxp, devid)
1150 for opt_val in dev_sxp[1:]:
1151 try:
1152 dev_config[opt_val[0]] = opt_val[1]
1153 except IndexError:
1154 pass
1156 dev_control = self.getDeviceController(dev_class)
1157 if devid is None:
1158 dev = dev_config.get('dev', '')
1159 if not dev:
1160 raise VmError('Block device must have virtual details specified')
1161 if 'ioemu:' in dev:
1162 (_, dev) = dev.split(':', 1)
1163 try:
1164 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1165 except ValueError:
1166 pass
1167 devid = dev_control.convertToDeviceNumber(dev)
1168 dev_info = self._getDeviceInfo_vbd(devid)
1169 if dev_info is None:
1170 raise VmError("Device %s not connected" % devid)
1171 dev_uuid = sxp.child_value(dev_info, 'uuid')
1173 if self.domid is not None:
1174 # use DevController.reconfigureDevice to change device config
1175 dev_control.reconfigureDevice(devid, dev_config)
1176 else:
1177 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1178 if (new_f['device-type'] == 'cdrom' and
1179 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1180 new_b['mode'] == 'r' and
1181 sxp.child_value(dev_info, 'mode') == 'r'):
1182 pass
1183 else:
1184 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1185 (dev_class, devid, dev_config))
1187 # update XendConfig with new device info
1188 self.info.device_update(dev_uuid, dev_sxp)
1189 xen.xend.XendDomain.instance().managed_config_save(self)
1191 return True
1193 def waitForDevices(self):
1194 """Wait for this domain's configured devices to connect.
1196 @raise VmError: if any device fails to initialise.
1197 """
1198 for devclass in XendDevices.valid_devices():
1199 self.getDeviceController(devclass).waitForDevices()
1201 def hvm_destroyPCIDevice(self, pci_dev):
1202 log.debug("hvm_destroyPCIDevice: %s", pci_dev)
1204 if not self.info.is_hvm():
1205 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1207 # Check the co-assignment.
1208 # To pci-detach a device D from domN, we should ensure: for each DD in the
1209 # list of D's co-assignment devices, DD is not assigned (to domN).
1211 from xen.xend.server.pciif import PciDevice
1212 try:
1213 pci_device = PciDevice(pci_dev)
1214 except Exception, e:
1215 raise VmError("pci: failed to locate device and "+
1216 "parse its resources - "+str(e))
1217 coassignment_list = pci_device.find_coassigned_devices()
1218 coassignment_list.remove(pci_device.name)
1219 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1220 for pci_str in coassignment_list:
1221 if pci_str in assigned_pci_device_str_list:
1222 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1223 " because one of its co-assignment device %s is still " + \
1224 " assigned to the domain." \
1225 )% (pci_device.name, self.info['name_label'], pci_str))
1228 bdf_str = pci_dict_to_bdf_str(pci_dev)
1229 log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
1230 if self.domid is not None:
1231 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1233 return 0
1235 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1236 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1237 deviceClass, devid)
1239 if rm_cfg:
1240 # Convert devid to device number. A device number is
1241 # needed to remove its configuration.
1242 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1244 # Save current sxprs. A device number and a backend
1245 # path are needed to remove its configuration but sxprs
1246 # do not have those after calling destroyDevice.
1247 sxprs = self.getDeviceSxprs(deviceClass)
1249 rc = None
1250 if self.domid is not None:
1252 #new blktap implementation may need a sysfs write after everything is torn down.
1253 if deviceClass == 'tap2':
1254 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1255 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1256 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1257 backpath = xstransact.Read(frontpath, "backend")
1258 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1260 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1261 if not force and rm_cfg:
1262 # The backend path, other than the device itself,
1263 # has to be passed because its accompanied frontend
1264 # path may be void until its removal is actually
1265 # issued. It is probable because destroyDevice is
1266 # issued first.
1267 for dev_num, dev_info in sxprs:
1268 dev_num = int(dev_num)
1269 if dev_num == dev:
1270 for x in dev_info:
1271 if x[0] == 'backend':
1272 backend = x[1]
1273 break
1274 break
1275 self._waitForDevice_destroy(deviceClass, devid, backend)
1277 if rm_cfg and deviceClass != "vif2":
1278 if deviceClass == 'vif':
1279 if self.domid is not None:
1280 mac = ''
1281 for dev_num, dev_info in sxprs:
1282 dev_num = int(dev_num)
1283 if dev_num == dev:
1284 for x in dev_info:
1285 if x[0] == 'mac':
1286 mac = x[1]
1287 break
1288 break
1289 dev_info = self._getDeviceInfo_vif(mac)
1290 else:
1291 _, dev_info = sxprs[dev]
1292 else: # 'vbd' or 'tap' or 'tap2'
1293 dev_info = self._getDeviceInfo_vbd(dev)
1294 # To remove the UUID of the device from refs,
1295 # deviceClass must be always 'vbd'.
1296 deviceClass = 'vbd'
1297 if dev_info is None:
1298 raise XendError("Device %s is not defined" % devid)
1300 dev_uuid = sxp.child_value(dev_info, 'uuid')
1301 del self.info['devices'][dev_uuid]
1302 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1303 xen.xend.XendDomain.instance().managed_config_save(self)
1305 return rc
1307 def getDeviceSxprs(self, deviceClass):
1308 if deviceClass == 'pci':
1309 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1310 if dev_info is None:
1311 return []
1312 dev_uuid = sxp.child_value(dev_info, 'uuid')
1313 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1314 return pci_devs
1315 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1316 return self.getDeviceController(deviceClass).sxprs()
1317 else:
1318 sxprs = []
1319 dev_num = 0
1320 for dev_type, dev_info in self.info.all_devices_sxpr():
1321 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap', 'tap2']) or \
1322 (deviceClass != 'vbd' and dev_type != deviceClass):
1323 continue
1325 if deviceClass == 'vscsi':
1326 vscsi_devs = ['devs', []]
1327 for vscsi_dev in sxp.children(dev_info, 'dev'):
1328 vscsi_dev.append(['frontstate', None])
1329 vscsi_devs[1].append(vscsi_dev)
1330 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1331 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1332 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1333 elif deviceClass == 'vbd':
1334 dev = sxp.child_value(dev_info, 'dev')
1335 if 'ioemu:' in dev:
1336 (_, dev) = dev.split(':', 1)
1337 try:
1338 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1339 except ValueError:
1340 dev_name = dev
1341 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1342 sxprs.append([dev_num, dev_info])
1343 else:
1344 sxprs.append([dev_num, dev_info])
1345 dev_num += 1
1346 return sxprs
1348 def getBlockDeviceClass(self, devid):
1349 # if the domain is running we can get the device class from xenstore.
1350 # This is more accurate, as blktap1 devices show up as blktap2 devices
1351 # in the config.
1352 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1353 # All block devices have a vbd frontend, so we know the frontend path
1354 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1355 frontendPath = "%s/device/vbd/%s" % (self.dompath, dev)
1356 for devclass in XendDevices.valid_devices():
1357 for dev in xstransact.List("%s/device/%s" % (self.vmpath, devclass)):
1358 devFrontendPath = xstransact.Read("%s/device/%s/%s/frontend" % (self.vmpath, devclass, dev))
1359 if frontendPath == devFrontendPath:
1360 return devclass
1362 else: # the domain is not active so we must get the device class
1363 # from the config
1364 # To get a device number from the devid,
1365 # we temporarily use the device controller of VBD.
1366 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1367 dev_info = self._getDeviceInfo_vbd(dev)
1368 if dev_info:
1369 return dev_info[0]
1371 def _getDeviceInfo_vif(self, mac):
1372 for dev_type, dev_info in self.info.all_devices_sxpr():
1373 if dev_type != 'vif':
1374 continue
1375 if mac == sxp.child_value(dev_info, 'mac'):
1376 return dev_info
1378 def _getDeviceInfo_vbd(self, devid):
1379 for dev_type, dev_info in self.info.all_devices_sxpr():
1380 if dev_type != 'vbd' and dev_type != 'tap' and dev_type != 'tap2':
1381 continue
1382 dev = sxp.child_value(dev_info, 'dev')
1383 dev = dev.split(':')[0]
1384 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1385 if devid == dev:
1386 return dev_info
1388 def _getDeviceInfo_pci(self, devid):
1389 for dev_type, dev_info in self.info.all_devices_sxpr():
1390 if dev_type != 'pci':
1391 continue
1392 return dev_info
1393 return None
1395 def _getDeviceInfo_vscsi(self, devid):
1396 devid = int(devid)
1397 for dev_type, dev_info in self.info.all_devices_sxpr():
1398 if dev_type != 'vscsi':
1399 continue
1400 devs = sxp.children(dev_info, 'dev')
1401 if devid == int(sxp.child_value(devs[0], 'devid')):
1402 return dev_info
1403 return None
1405 def _getDeviceInfo_vusb(self, devid):
1406 for dev_type, dev_info in self.info.all_devices_sxpr():
1407 if dev_type != 'vusb':
1408 continue
1409 return dev_info
1410 return None
1412 def _get_assigned_pci_devices(self, devid = 0):
1413 if self.domid is not None:
1414 return get_assigned_pci_devices(self.domid)
1416 dev_info = self._getDeviceInfo_pci(devid)
1417 if dev_info is None:
1418 return []
1419 dev_uuid = sxp.child_value(dev_info, 'uuid')
1420 pci_conf = self.info['devices'][dev_uuid][1]
1421 return map(pci_dict_to_bdf_str, pci_conf['devs'])
1423 def setMemoryTarget(self, target):
1424 """Set the memory target of this domain.
1425 @param target: In MiB.
1426 """
1427 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1428 self.info['name_label'], str(self.domid), target)
1430 MiB = 1024 * 1024
1431 memory_cur = self.get_memory_dynamic_max() / MiB
1433 if self.domid == 0:
1434 dom0_min_mem = xoptions.get_dom0_min_mem()
1435 if target < memory_cur and dom0_min_mem > target:
1436 raise XendError("memory_dynamic_max too small")
1438 self._safe_set_memory('memory_dynamic_min', target * MiB)
1439 self._safe_set_memory('memory_dynamic_max', target * MiB)
1441 if self.domid >= 0:
1442 if target > memory_cur:
1443 balloon.free((target - memory_cur) * 1024, self)
1444 self.storeVm("memory", target)
1445 self.storeDom("memory/target", target << 10)
1446 xc.domain_set_target_mem(self.domid,
1447 (target * 1024))
1448 xen.xend.XendDomain.instance().managed_config_save(self)
1450 def setMemoryMaximum(self, limit):
1451 """Set the maximum memory limit of this domain
1452 @param limit: In MiB.
1453 """
1454 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1455 self.info['name_label'], str(self.domid), limit)
1457 maxmem_cur = self.get_memory_static_max()
1458 MiB = 1024 * 1024
1459 self._safe_set_memory('memory_static_max', limit * MiB)
1461 if self.domid >= 0:
1462 maxmem = int(limit) * 1024
1463 try:
1464 return xc.domain_setmaxmem(self.domid, maxmem)
1465 except Exception, ex:
1466 self._safe_set_memory('memory_static_max', maxmem_cur)
1467 raise XendError(str(ex))
1468 xen.xend.XendDomain.instance().managed_config_save(self)
1471 def getVCPUInfo(self):
1472 try:
1473 # We include the domain name and ID, to help xm.
1474 sxpr = ['domain',
1475 ['domid', self.domid],
1476 ['name', self.info['name_label']],
1477 ['vcpu_count', self.info['VCPUs_max']]]
1479 for i in range(0, self.info['VCPUs_max']):
1480 if self.domid is not None:
1481 info = xc.vcpu_getinfo(self.domid, i)
1483 sxpr.append(['vcpu',
1484 ['number', i],
1485 ['online', info['online']],
1486 ['blocked', info['blocked']],
1487 ['running', info['running']],
1488 ['cpu_time', info['cpu_time'] / 1e9],
1489 ['cpu', info['cpu']],
1490 ['cpumap', info['cpumap']]])
1491 else:
1492 sxpr.append(['vcpu',
1493 ['number', i],
1494 ['online', 0],
1495 ['blocked', 0],
1496 ['running', 0],
1497 ['cpu_time', 0.0],
1498 ['cpu', -1],
1499 ['cpumap', self.info['cpus'][i] and \
1500 self.info['cpus'][i] or range(64)]])
1502 return sxpr
1504 except RuntimeError, exn:
1505 raise XendError(str(exn))
1508 def getDomInfo(self):
1509 return dom_get(self.domid)
1512 # internal functions ... TODO: re-categorised
1515 def _augmentInfo(self, priv):
1516 """Augment self.info, as given to us through L{recreate}, with
1517 values taken from the store. This recovers those values known
1518 to xend but not to the hypervisor.
1519 """
1520 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1521 if priv:
1522 augment_entries.remove('memory')
1523 augment_entries.remove('maxmem')
1524 augment_entries.remove('vcpus')
1525 augment_entries.remove('vcpu_avail')
1527 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1528 for k in augment_entries])
1530 # make returned lists into a dictionary
1531 vm_config = dict(zip(augment_entries, vm_config))
1533 for arg in augment_entries:
1534 val = vm_config[arg]
1535 if val != None:
1536 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1537 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1538 self.info[xapiarg] = val
1539 elif arg == "memory":
1540 self.info["static_memory_min"] = val
1541 elif arg == "maxmem":
1542 self.info["static_memory_max"] = val
1543 else:
1544 self.info[arg] = val
1546 # read CPU Affinity
1547 self.info['cpus'] = []
1548 vcpus_info = self.getVCPUInfo()
1549 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1550 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1552 # For dom0, we ignore any stored value for the vcpus fields, and
1553 # read the current value from Xen instead. This allows boot-time
1554 # settings to take precedence over any entries in the store.
1555 if priv:
1556 xeninfo = dom_get(self.domid)
1557 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1558 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1560 # read image value
1561 image_sxp = self._readVm('image')
1562 if image_sxp:
1563 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1565 # read devices
1566 devices = []
1567 for devclass in XendDevices.valid_devices():
1568 devconfig = self.getDeviceController(devclass).configurations()
1569 if devconfig:
1570 devices.extend(devconfig)
1572 if not self.info['devices'] and devices is not None:
1573 for device in devices:
1574 self.info.device_add(device[0], cfg_sxp = device)
1576 self._update_consoles()
1578 def _update_consoles(self, transaction = None):
1579 if self.domid == None or self.domid == 0:
1580 return
1582 # Update VT100 port if it exists
1583 if transaction is None:
1584 self.console_port = self.readDom('console/port')
1585 else:
1586 self.console_port = self.readDomTxn(transaction, 'console/port')
1587 if self.console_port is not None:
1588 serial_consoles = self.info.console_get_all('vt100')
1589 if not serial_consoles:
1590 cfg = self.info.console_add('vt100', self.console_port)
1591 self._createDevice('console', cfg)
1592 else:
1593 console_uuid = serial_consoles[0].get('uuid')
1594 self.info.console_update(console_uuid, 'location',
1595 self.console_port)
1598 # Update VNC port if it exists and write to xenstore
1599 if transaction is None:
1600 vnc_port = self.readDom('console/vnc-port')
1601 else:
1602 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1603 if vnc_port is not None:
1604 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1605 if dev_type == 'vfb':
1606 old_location = dev_info.get('location')
1607 listen_host = dev_info.get('vnclisten', \
1608 XendOptions.instance().get_vnclisten_address())
1609 new_location = '%s:%s' % (listen_host, str(vnc_port))
1610 if old_location == new_location:
1611 break
1613 dev_info['location'] = new_location
1614 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1615 vfb_ctrl = self.getDeviceController('vfb')
1616 vfb_ctrl.reconfigureDevice(0, dev_info)
1617 break
1620 # Function to update xenstore /vm/*
1623 def _readVm(self, *args):
1624 return xstransact.Read(self.vmpath, *args)
1626 def _writeVm(self, *args):
1627 return xstransact.Write(self.vmpath, *args)
1629 def _removeVm(self, *args):
1630 return xstransact.Remove(self.vmpath, *args)
1632 def _gatherVm(self, *args):
1633 return xstransact.Gather(self.vmpath, *args)
1635 def _listRecursiveVm(self, *args):
1636 return xstransact.ListRecursive(self.vmpath, *args)
1638 def storeVm(self, *args):
1639 return xstransact.Store(self.vmpath, *args)
1641 def permissionsVm(self, *args):
1642 return xstransact.SetPermissions(self.vmpath, *args)
1645 # Function to update xenstore /dom/*
1648 def readDom(self, *args):
1649 return xstransact.Read(self.dompath, *args)
1651 def gatherDom(self, *args):
1652 return xstransact.Gather(self.dompath, *args)
1654 def _writeDom(self, *args):
1655 return xstransact.Write(self.dompath, *args)
1657 def _removeDom(self, *args):
1658 return xstransact.Remove(self.dompath, *args)
1660 def storeDom(self, *args):
1661 return xstransact.Store(self.dompath, *args)
1664 def readDomTxn(self, transaction, *args):
1665 paths = map(lambda x: self.dompath + "/" + x, args)
1666 return transaction.read(*paths)
1668 def gatherDomTxn(self, transaction, *args):
1669 paths = map(lambda x: self.dompath + "/" + x, args)
1670 return transaction.gather(*paths)
1672 def _writeDomTxn(self, transaction, *args):
1673 paths = map(lambda x: self.dompath + "/" + x, args)
1674 return transaction.write(*paths)
1676 def _removeDomTxn(self, transaction, *args):
1677 paths = map(lambda x: self.dompath + "/" + x, args)
1678 return transaction.remove(*paths)
1680 def storeDomTxn(self, transaction, *args):
1681 paths = map(lambda x: self.dompath + "/" + x, args)
1682 return transaction.store(*paths)
1685 def _recreateDom(self):
1686 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1688 def _recreateDomFunc(self, t):
1689 t.remove()
1690 t.mkdir()
1691 t.set_permissions({'dom' : self.domid, 'read' : True})
1692 t.write('vm', self.vmpath)
1693 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1694 # XCP Windows paravirtualized guests use data/
1695 for i in [ 'device', 'control', 'error', 'memory', 'guest', \
1696 'hvmpv', 'data' ]:
1697 t.mkdir(i)
1698 t.set_permissions(i, {'dom' : self.domid})
1700 def _storeDomDetails(self):
1701 to_store = {
1702 'domid': str(self.domid),
1703 'vm': self.vmpath,
1704 'name': self.info['name_label'],
1705 'console/limit': str(xoptions.get_console_limit() * 1024),
1706 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1707 'description': str(self.info['description']),
1710 def f(n, v):
1711 if v is not None:
1712 if type(v) == bool:
1713 to_store[n] = v and "1" or "0"
1714 else:
1715 to_store[n] = str(v)
1717 # Figure out if we need to tell xenconsoled to ignore this guest's
1718 # console - device model will handle console if it is running
1719 constype = "ioemu"
1720 if 'device_model' not in self.info['platform']:
1721 constype = "xenconsoled"
1723 f('console/port', self.console_port)
1724 f('console/ring-ref', self.console_mfn)
1725 f('console/type', constype)
1726 f('store/port', self.store_port)
1727 f('store/ring-ref', self.store_mfn)
1729 if arch.type == "x86":
1730 f('control/platform-feature-multiprocessor-suspend', True)
1732 # elfnotes
1733 for n, v in self.info.get_notes().iteritems():
1734 n = n.lower().replace('_', '-')
1735 if n == 'features':
1736 for v in v.split('|'):
1737 v = v.replace('_', '-')
1738 if v.startswith('!'):
1739 f('image/%s/%s' % (n, v[1:]), False)
1740 else:
1741 f('image/%s/%s' % (n, v), True)
1742 else:
1743 f('image/%s' % n, v)
1745 if self.info.has_key('security_label'):
1746 f('security_label', self.info['security_label'])
1748 to_store.update(self._vcpuDomDetails())
1750 log.debug("Storing domain details: %s", scrub_password(to_store))
1752 self._writeDom(to_store)
1754 def _vcpuDomDetails(self):
1755 def availability(n):
1756 if self.info['vcpu_avail'] & (1 << n):
1757 return 'online'
1758 else:
1759 return 'offline'
1761 result = {}
1762 for v in range(0, self.info['VCPUs_max']):
1763 result["cpu/%d/availability" % v] = availability(v)
1764 return result
1767 # xenstore watches
1770 def _registerWatches(self):
1771 """Register a watch on this VM's entries in the store, and the
1772 domain's control/shutdown node, so that when they are changed
1773 externally, we keep up to date. This should only be called by {@link
1774 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1775 details have been written, but before the new instance is returned."""
1776 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1777 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1778 self._handleShutdownWatch)
1780 def _storeChanged(self, _):
1781 log.trace("XendDomainInfo.storeChanged");
1783 changed = False
1785 # Check whether values in the configuration have
1786 # changed in Xenstore.
1788 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1789 'rtc/timeoffset']
1791 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1792 for k in cfg_vm])
1794 # convert two lists into a python dictionary
1795 vm_details = dict(zip(cfg_vm, vm_details))
1797 for arg, val in vm_details.items():
1798 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1799 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1800 if val != None and val != self.info[xapiarg]:
1801 self.info[xapiarg] = val
1802 changed = True
1803 elif arg == "memory":
1804 if val != None and val != self.info["static_memory_min"]:
1805 self.info["static_memory_min"] = val
1806 changed = True
1807 elif arg == "maxmem":
1808 if val != None and val != self.info["static_memory_max"]:
1809 self.info["static_memory_max"] = val
1810 changed = True
1812 # Check whether image definition has been updated
1813 image_sxp = self._readVm('image')
1814 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1815 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1816 changed = True
1818 # Update the rtc_timeoffset to be preserved across reboot.
1819 # NB. No need to update xenstore domain section.
1820 val = int(vm_details.get("rtc/timeoffset", 0))
1821 self.info["platform"]["rtc_timeoffset"] = val
1823 if changed:
1824 # Update the domain section of the store, as this contains some
1825 # parameters derived from the VM configuration.
1826 self.refresh_shutdown_lock.acquire()
1827 try:
1828 state = self._stateGet()
1829 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1830 self._storeDomDetails()
1831 finally:
1832 self.refresh_shutdown_lock.release()
1834 return 1
1836 def _handleShutdownWatch(self, _):
1837 log.debug('XendDomainInfo.handleShutdownWatch')
1839 reason = self.readDom('control/shutdown')
1841 if reason and reason != 'suspend':
1842 sst = self.readDom('xend/shutdown_start_time')
1843 now = time.time()
1844 if sst:
1845 self.shutdownStartTime = float(sst)
1846 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1847 else:
1848 self.shutdownStartTime = now
1849 self.storeDom('xend/shutdown_start_time', now)
1850 timeout = SHUTDOWN_TIMEOUT
1852 log.trace(
1853 "Scheduling refreshShutdown on domain %d in %ds.",
1854 self.domid, timeout)
1855 threading.Timer(timeout, self.refreshShutdown).start()
1857 return True
1861 # Public Attributes for the VM
1865 def getDomid(self):
1866 return self.domid
1868 def getStubdomDomid(self):
1869 dom_list = xstransact.List('/local/domain')
1870 for d in dom_list:
1871 target = xstransact.Read('/local/domain/' + d + '/target')
1872 if target is not None and int(target) is self.domid :
1873 return int(d)
1874 return None
1876 def setName(self, name, to_store = True):
1877 self._checkName(name)
1878 self.info['name_label'] = name
1879 if to_store:
1880 self.storeVm("name", name)
1882 def getName(self):
1883 return self.info['name_label']
1885 def getDomainPath(self):
1886 return self.dompath
1888 def getShutdownReason(self):
1889 return self.readDom('control/shutdown')
1891 def getStorePort(self):
1892 """For use only by image.py and XendCheckpoint.py."""
1893 return self.store_port
1895 def getConsolePort(self):
1896 """For use only by image.py and XendCheckpoint.py"""
1897 return self.console_port
1899 def getFeatures(self):
1900 """For use only by image.py."""
1901 return self.info['features']
1903 def getVCpuCount(self):
1904 return self.info['VCPUs_max']
1906 def setVCpuCount(self, vcpus):
1907 def vcpus_valid(n):
1908 if vcpus <= 0:
1909 raise XendError('Zero or less VCPUs is invalid')
1910 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1911 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1912 vcpus_valid(vcpus)
1914 self.info['vcpu_avail'] = (1 << vcpus) - 1
1915 if self.domid >= 0:
1916 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1917 self._writeDom(self._vcpuDomDetails())
1918 self.info['VCPUs_live'] = vcpus
1919 else:
1920 if self.info['VCPUs_max'] > vcpus:
1921 # decreasing
1922 del self.info['cpus'][vcpus:]
1923 elif self.info['VCPUs_max'] < vcpus:
1924 # increasing
1925 for c in range(self.info['VCPUs_max'], vcpus):
1926 self.info['cpus'].append(list())
1927 self.info['VCPUs_max'] = vcpus
1928 xen.xend.XendDomain.instance().managed_config_save(self)
1929 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1930 vcpus)
1932 def getMemoryTarget(self):
1933 """Get this domain's target memory size, in KB."""
1934 return self.info['memory_dynamic_max'] / 1024
1936 def getMemoryMaximum(self):
1937 """Get this domain's maximum memory size, in KB."""
1938 # remember, info now stores memory in bytes
1939 return self.info['memory_static_max'] / 1024
1941 def getResume(self):
1942 return str(self._resume)
1944 def setResume(self, isresume):
1945 self._resume = isresume
1947 def getCpus(self):
1948 return self.info['cpus']
1950 def setCpus(self, cpumap):
1951 self.info['cpus'] = cpumap
1953 def getCap(self):
1954 return self.info['vcpus_params']['cap']
1956 def setCap(self, cpu_cap):
1957 self.info['vcpus_params']['cap'] = cpu_cap
1959 def getWeight(self):
1960 return self.info['vcpus_params']['weight']
1962 def setWeight(self, cpu_weight):
1963 self.info['vcpus_params']['weight'] = cpu_weight
1965 def getRestartCount(self):
1966 return self._readVm('xend/restart_count')
1968 def refreshShutdown(self, xeninfo = None):
1969 """ Checks the domain for whether a shutdown is required.
1971 Called from XendDomainInfo and also image.py for HVM images.
1972 """
1974 # If set at the end of this method, a restart is required, with the
1975 # given reason. This restart has to be done out of the scope of
1976 # refresh_shutdown_lock.
1977 restart_reason = None
1979 self.refresh_shutdown_lock.acquire()
1980 try:
1981 if xeninfo is None:
1982 xeninfo = dom_get(self.domid)
1983 if xeninfo is None:
1984 # The domain no longer exists. This will occur if we have
1985 # scheduled a timer to check for shutdown timeouts and the
1986 # shutdown succeeded. It will also occur if someone
1987 # destroys a domain beneath us. We clean up the domain,
1988 # just in case, but we can't clean up the VM, because that
1989 # VM may have migrated to a different domain on this
1990 # machine.
1991 self.cleanupDomain()
1992 self._stateSet(DOM_STATE_HALTED)
1993 return
1995 if xeninfo['dying']:
1996 # Dying means that a domain has been destroyed, but has not
1997 # yet been cleaned up by Xen. This state could persist
1998 # indefinitely if, for example, another domain has some of its
1999 # pages mapped. We might like to diagnose this problem in the
2000 # future, but for now all we do is make sure that it's not us
2001 # holding the pages, by calling cleanupDomain. We can't
2002 # clean up the VM, as above.
2003 self.cleanupDomain()
2004 self._stateSet(DOM_STATE_SHUTDOWN)
2005 return
2007 elif xeninfo['crashed']:
2008 if self.readDom('xend/shutdown_completed'):
2009 # We've seen this shutdown already, but we are preserving
2010 # the domain for debugging. Leave it alone.
2011 return
2013 log.warn('Domain has crashed: name=%s id=%d.',
2014 self.info['name_label'], self.domid)
2015 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
2017 restart_reason = 'crash'
2018 self._stateSet(DOM_STATE_HALTED)
2020 elif xeninfo['shutdown']:
2021 self._stateSet(DOM_STATE_SHUTDOWN)
2022 if self.readDom('xend/shutdown_completed'):
2023 # We've seen this shutdown already, but we are preserving
2024 # the domain for debugging. Leave it alone.
2025 return
2027 else:
2028 reason = shutdown_reason(xeninfo['shutdown_reason'])
2030 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
2031 self.info['name_label'], self.domid, reason)
2032 self._writeVm(LAST_SHUTDOWN_REASON, reason)
2034 self._clearRestart()
2036 if reason == 'suspend':
2037 self._stateSet(DOM_STATE_SUSPENDED)
2038 # Don't destroy the domain. XendCheckpoint will do
2039 # this once it has finished. However, stop watching
2040 # the VM path now, otherwise we will end up with one
2041 # watch for the old domain, and one for the new.
2042 self._unwatchVm()
2043 elif reason in ('poweroff', 'reboot'):
2044 restart_reason = reason
2045 else:
2046 self.destroy()
2048 elif self.dompath is None:
2049 # We have yet to manage to call introduceDomain on this
2050 # domain. This can happen if a restore is in progress, or has
2051 # failed. Ignore this domain.
2052 pass
2053 else:
2054 # Domain is alive. If we are shutting it down, log a message
2055 # if it seems unresponsive.
2056 if xeninfo['paused']:
2057 self._stateSet(DOM_STATE_PAUSED)
2058 else:
2059 self._stateSet(DOM_STATE_RUNNING)
2061 if self.shutdownStartTime:
2062 timeout = (SHUTDOWN_TIMEOUT - time.time() +
2063 self.shutdownStartTime)
2064 if (timeout < 0 and not self.readDom('xend/unresponsive')):
2065 log.info(
2066 "Domain shutdown timeout expired: name=%s id=%s",
2067 self.info['name_label'], self.domid)
2068 self.storeDom('xend/unresponsive', 'True')
2069 finally:
2070 self.refresh_shutdown_lock.release()
2072 if restart_reason and not self.restart_in_progress:
2073 self.restart_in_progress = True
2074 threading.Thread(target = self._maybeRestart,
2075 args = (restart_reason,)).start()
2079 # Restart functions - handling whether we come back up on shutdown.
2082 def _clearRestart(self):
2083 self._removeDom("xend/shutdown_start_time")
2085 def _maybeDumpCore(self, reason):
2086 if reason == 'crash':
2087 if xoptions.get_enable_dump() or self.get_on_crash() \
2088 in ['coredump_and_destroy', 'coredump_and_restart']:
2089 try:
2090 self.dumpCore()
2091 except XendError:
2092 # This error has been logged -- there's nothing more
2093 # we can do in this context.
2094 pass
2096 def _maybeRestart(self, reason):
2097 # Before taking configured action, dump core if configured to do so.
2099 self._maybeDumpCore(reason)
2101 # Dispatch to the correct method based upon the configured on_{reason}
2102 # behaviour.
2103 actions = {"destroy" : self.destroy,
2104 "restart" : self._restart,
2105 "preserve" : self._preserve,
2106 "rename-restart" : self._renameRestart,
2107 "coredump-destroy" : self.destroy,
2108 "coredump-restart" : self._restart}
2110 action_conf = {
2111 'poweroff': 'actions_after_shutdown',
2112 'reboot': 'actions_after_reboot',
2113 'crash': 'actions_after_crash',
2116 action_target = self.info.get(action_conf.get(reason))
2117 func = actions.get(action_target, None)
2118 if func and callable(func):
2119 func()
2120 else:
2121 self.destroy() # default to destroy
2123 def _renameRestart(self):
2124 self._restart(True)
2126 def _restart(self, rename = False):
2127 """Restart the domain after it has exited.
2129 @param rename True if the old domain is to be renamed and preserved,
2130 False if it is to be destroyed.
2131 """
2132 from xen.xend import XendDomain
2134 if self._readVm(RESTART_IN_PROGRESS):
2135 log.error('Xend failed during restart of domain %s. '
2136 'Refusing to restart to avoid loops.',
2137 str(self.domid))
2138 self.destroy()
2139 return
2141 old_domid = self.domid
2142 self._writeVm(RESTART_IN_PROGRESS, 'True')
2144 elapse = time.time() - self.info['start_time']
2145 if elapse < MINIMUM_RESTART_TIME:
2146 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2147 'Refusing to restart to avoid loops.',
2148 self.info['name_label'], elapse)
2149 self.destroy()
2150 return
2152 prev_vm_xend = self._listRecursiveVm('xend')
2153 new_dom_info = self.info
2154 try:
2155 if rename:
2156 new_dom_info = self._preserveForRestart()
2157 else:
2158 self._unwatchVm()
2159 self.destroy()
2161 # new_dom's VM will be the same as this domain's VM, except where
2162 # the rename flag has instructed us to call preserveForRestart.
2163 # In that case, it is important that we remove the
2164 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2165 # once the new one is available.
2167 new_dom = None
2168 try:
2169 new_dom = XendDomain.instance().domain_create_from_dict(
2170 new_dom_info)
2171 for x in prev_vm_xend[0][1]:
2172 new_dom._writeVm('xend/%s' % x[0], x[1])
2173 new_dom.waitForDevices()
2174 new_dom.unpause()
2175 rst_cnt = new_dom._readVm('xend/restart_count')
2176 rst_cnt = int(rst_cnt) + 1
2177 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2178 new_dom._removeVm(RESTART_IN_PROGRESS)
2179 except:
2180 if new_dom:
2181 new_dom._removeVm(RESTART_IN_PROGRESS)
2182 new_dom.destroy()
2183 else:
2184 self._removeVm(RESTART_IN_PROGRESS)
2185 raise
2186 except:
2187 log.exception('Failed to restart domain %s.', str(old_domid))
2189 def _preserveForRestart(self):
2190 """Preserve a domain that has been shut down, by giving it a new UUID,
2191 cloning the VM details, and giving it a new name. This allows us to
2192 keep this domain for debugging, but restart a new one in its place
2193 preserving the restart semantics (name and UUID preserved).
2194 """
2196 new_uuid = uuid.createString()
2197 new_name = 'Domain-%s' % new_uuid
2198 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2199 self.info['name_label'], self.domid, self.info['uuid'],
2200 new_name, new_uuid)
2201 self._unwatchVm()
2202 self._releaseDevices()
2203 # Remove existing vm node in xenstore
2204 self._removeVm()
2205 new_dom_info = self.info.copy()
2206 new_dom_info['name_label'] = self.info['name_label']
2207 new_dom_info['uuid'] = self.info['uuid']
2208 self.info['name_label'] = new_name
2209 self.info['uuid'] = new_uuid
2210 self.vmpath = XS_VMROOT + new_uuid
2211 # Write out new vm node to xenstore
2212 self._storeVmDetails()
2213 self._preserve()
2214 return new_dom_info
2217 def _preserve(self):
2218 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2219 self.domid)
2220 self._unwatchVm()
2221 self.storeDom('xend/shutdown_completed', 'True')
2222 self._stateSet(DOM_STATE_HALTED)
2225 # Debugging ..
2228 def dumpCore(self, corefile = None):
2229 """Create a core dump for this domain.
2231 @raise: XendError if core dumping failed.
2232 """
2234 if not corefile:
2235 # To prohibit directory traversal
2236 based_name = os.path.basename(self.info['name_label'])
2238 coredir = "/var/xen/dump/%s" % (based_name)
2239 if not os.path.exists(coredir):
2240 try:
2241 mkdir.parents(coredir, stat.S_IRWXU)
2242 except Exception, ex:
2243 log.error("Cannot create directory: %s" % str(ex))
2245 if not os.path.isdir(coredir):
2246 # Use former directory to dump core
2247 coredir = '/var/xen/dump'
2249 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2250 corefile = "%s/%s-%s.%s.core" % (coredir, this_time,
2251 self.info['name_label'], self.domid)
2253 if os.path.isdir(corefile):
2254 raise XendError("Cannot dump core in a directory: %s" %
2255 corefile)
2257 try:
2258 try:
2259 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2260 xc.domain_dumpcore(self.domid, corefile)
2261 except RuntimeError, ex:
2262 corefile_incomp = corefile+'-incomplete'
2263 try:
2264 os.rename(corefile, corefile_incomp)
2265 except:
2266 pass
2268 log.error("core dump failed: id = %s name = %s: %s",
2269 self.domid, self.info['name_label'], str(ex))
2270 raise XendError("Failed to dump core: %s" % str(ex))
2271 finally:
2272 self._removeVm(DUMPCORE_IN_PROGRESS)
2275 # Device creation/deletion functions
2278 def _createDevice(self, deviceClass, devConfig):
2279 return self.getDeviceController(deviceClass).createDevice(devConfig)
2281 def _waitForDevice(self, deviceClass, devid):
2282 return self.getDeviceController(deviceClass).waitForDevice(devid)
2284 def _waitForDeviceUUID(self, dev_uuid):
2285 deviceClass, config = self.info['devices'].get(dev_uuid)
2286 self._waitForDevice(deviceClass, config['devid'])
2288 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2289 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2290 devid, backpath)
2292 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2293 return self.getDeviceController(deviceClass).reconfigureDevice(
2294 devid, devconfig)
2296 def _createDevices(self):
2297 """Create the devices for a vm.
2299 @raise: VmError for invalid devices
2300 """
2301 if self.image:
2302 self.image.prepareEnvironment()
2304 vscsi_uuidlist = {}
2305 vscsi_devidlist = []
2306 ordered_refs = self.info.ordered_device_refs()
2307 for dev_uuid in ordered_refs:
2308 devclass, config = self.info['devices'][dev_uuid]
2309 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2310 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2311 dev_uuid = config.get('uuid')
2312 if devclass != 'pci' or not self.info.is_hvm() :
2313 devid = self._createDevice(devclass, config)
2315 # store devid in XendConfig for caching reasons
2316 if dev_uuid in self.info['devices']:
2317 self.info['devices'][dev_uuid][1]['devid'] = devid
2319 elif devclass == 'vscsi':
2320 vscsi_config = config.get('devs', [])[0]
2321 devid = vscsi_config.get('devid', '')
2322 dev_uuid = config.get('uuid')
2323 vscsi_uuidlist[devid] = dev_uuid
2324 vscsi_devidlist.append(devid)
2326 #It is necessary to sorted it for /dev/sdxx in guest.
2327 if len(vscsi_uuidlist) > 0:
2328 vscsi_devidlist.sort()
2329 for vscsiid in vscsi_devidlist:
2330 dev_uuid = vscsi_uuidlist[vscsiid]
2331 devclass, config = self.info['devices'][dev_uuid]
2332 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2333 dev_uuid = config.get('uuid')
2334 devid = self._createDevice(devclass, config)
2335 # store devid in XendConfig for caching reasons
2336 if dev_uuid in self.info['devices']:
2337 self.info['devices'][dev_uuid][1]['devid'] = devid
2340 if self.image:
2341 self.image.createDeviceModel()
2343 #if have pass-through devs, need the virtual pci slots info from qemu
2344 self.pci_device_configure_boot()
2346 def _releaseDevices(self, suspend = False):
2347 """Release all domain's devices. Nothrow guarantee."""
2348 if self.image:
2349 try:
2350 log.debug("Destroying device model")
2351 self.image.destroyDeviceModel()
2352 except Exception, e:
2353 log.exception("Device model destroy failed %s" % str(e))
2354 else:
2355 log.debug("No device model")
2357 log.debug("Releasing devices")
2358 t = xstransact("%s/device" % self.vmpath)
2359 try:
2360 for devclass in XendDevices.valid_devices():
2361 for dev in t.list(devclass):
2362 try:
2363 log.debug("Removing %s", dev);
2364 self.destroyDevice(devclass, dev, False);
2365 except:
2366 # Log and swallow any exceptions in removal --
2367 # there's nothing more we can do.
2368 log.exception("Device release failed: %s; %s; %s",
2369 self.info['name_label'],
2370 devclass, dev)
2371 finally:
2372 t.abort()
2374 def getDeviceController(self, name):
2375 """Get the device controller for this domain, and if it
2376 doesn't exist, create it.
2378 @param name: device class name
2379 @type name: string
2380 @rtype: subclass of DevController
2381 """
2382 if name not in self._deviceControllers:
2383 devController = XendDevices.make_controller(name, self)
2384 if not devController:
2385 raise XendError("Unknown device type: %s" % name)
2386 self._deviceControllers[name] = devController
2388 return self._deviceControllers[name]
2391 # Migration functions (public)
2394 def testMigrateDevices(self, network, dst):
2395 """ Notify all device about intention of migration
2396 @raise: XendError for a device that cannot be migrated
2397 """
2398 for (n, c) in self.info.all_devices_sxpr():
2399 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2400 if rc != 0:
2401 raise XendError("Device of type '%s' refuses migration." % n)
2403 def migrateDevices(self, network, dst, step, domName=''):
2404 """Notify the devices about migration
2405 """
2406 ctr = 0
2407 try:
2408 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2409 self.migrateDevice(dev_type, dev_conf, network, dst,
2410 step, domName)
2411 ctr = ctr + 1
2412 except:
2413 for dev_type, dev_conf in self.info.all_devices_sxpr():
2414 if ctr == 0:
2415 step = step - 1
2416 ctr = ctr - 1
2417 self._recoverMigrateDevice(dev_type, dev_conf, network,
2418 dst, step, domName)
2419 raise
2421 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2422 step, domName=''):
2423 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2424 network, dst, step, domName)
2426 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2427 dst, step, domName=''):
2428 return self.getDeviceController(deviceClass).recover_migrate(
2429 deviceConfig, network, dst, step, domName)
2431 def setChangeHomeServer(self, chs):
2432 if chs is not None:
2433 self.info['change_home_server'] = bool(chs)
2434 else:
2435 if self.info.has_key('change_home_server'):
2436 del self.info['change_home_server']
2439 ## private:
2441 def _constructDomain(self):
2442 """Construct the domain.
2444 @raise: VmError on error
2445 """
2447 log.debug('XendDomainInfo.constructDomain')
2449 self.shutdownStartTime = None
2450 self.restart_in_progress = False
2452 hap = 0
2453 hvm = self.info.is_hvm()
2454 if hvm:
2455 hap = self.info.is_hap()
2456 info = xc.xeninfo()
2457 if 'hvm' not in info['xen_caps']:
2458 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2459 "supported by your CPU and enabled in your "
2460 "BIOS?")
2462 # Hack to pre-reserve some memory for initial domain creation.
2463 # There is an implicit memory overhead for any domain creation. This
2464 # overhead is greater for some types of domain than others. For
2465 # example, an x86 HVM domain will have a default shadow-pagetable
2466 # allocation of 1MB. We free up 4MB here to be on the safe side.
2467 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2468 balloon.free(4*1024, self) # 4MB should be plenty
2470 ssidref = 0
2471 if security.on() == xsconstants.XS_POLICY_USE:
2472 ssidref = security.calc_dom_ssidref_from_info(self.info)
2473 if security.has_authorization(ssidref) == False:
2474 raise VmError("VM is not authorized to run.")
2476 s3_integrity = 0
2477 if self.info.has_key('s3_integrity'):
2478 s3_integrity = self.info['s3_integrity']
2480 oos = self.info['platform'].get('oos', 1)
2481 oos_off = 1 - int(oos)
2483 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
2485 try:
2486 self.domid = xc.domain_create(
2487 domid = 0,
2488 ssidref = ssidref,
2489 handle = uuid.fromString(self.info['uuid']),
2490 flags = flags,
2491 target = self.info.target())
2492 except Exception, e:
2493 # may get here if due to ACM the operation is not permitted
2494 if security.on() == xsconstants.XS_POLICY_ACM:
2495 raise VmError('Domain in conflict set with running domain?')
2496 log.exception(e)
2498 if not self.domid or self.domid < 0:
2499 failmsg = 'Creating domain failed: name=%s' % self.info['name_label']
2500 if self.domid:
2501 failmsg += ', error=%i' % int(self.domid)
2502 raise VmError(failmsg)
2504 self.dompath = GetDomainPath(self.domid)
2506 self._recreateDom()
2508 # Set TSC mode of domain
2509 tsc_mode = self.info["platform"].get("tsc_mode")
2510 if arch.type == "x86" and tsc_mode is not None:
2511 xc.domain_set_tsc_info(self.domid, int(tsc_mode))
2513 # Set timer configuration of domain
2514 timer_mode = self.info["platform"].get("timer_mode")
2515 if hvm and timer_mode is not None:
2516 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2517 long(timer_mode))
2519 # Set Viridian interface configuration of domain
2520 viridian = self.info["platform"].get("viridian")
2521 if arch.type == "x86" and hvm and viridian is not None:
2522 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2524 # If nomigrate is set, disable migration
2525 nomigrate = self.info["platform"].get("nomigrate")
2526 if nomigrate is not None and long(nomigrate) != 0:
2527 xc.domain_disable_migrate(self.domid)
2529 # Optionally enable virtual HPET
2530 hpet = self.info["platform"].get("hpet")
2531 if hvm and hpet is not None:
2532 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2533 long(hpet))
2535 # Optionally enable periodic vpt aligning
2536 vpt_align = self.info["platform"].get("vpt_align")
2537 if hvm and vpt_align is not None:
2538 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2539 long(vpt_align))
2541 # Set maximum number of vcpus in domain
2542 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2544 # Check for cpu_{cap|weight} validity for credit scheduler
2545 if XendNode.instance().xenschedinfo() == 'credit':
2546 cap = self.getCap()
2547 weight = self.getWeight()
2549 assert type(weight) == int
2550 assert type(cap) == int
2552 if weight < 1 or weight > 65535:
2553 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2555 if cap < 0 or cap > self.getVCpuCount() * 100:
2556 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2557 (self.getVCpuCount() * 100))
2559 # Test whether the devices can be assigned with VT-d
2560 self.info.update_platform_pci()
2561 pci = self.info["platform"].get("pci")
2562 pci_str = ''
2563 if pci and len(pci) > 0:
2564 pci = map(lambda x: x[0:4], pci) # strip options
2565 pci_str = str(pci)
2567 # This test is done for both pv and hvm guest.
2568 for p in pci:
2569 pci_name = '%04x:%02x:%02x.%x' % \
2570 (parse_hex(p[0]), parse_hex(p[1]), parse_hex(p[2]), parse_hex(p[3]))
2571 try:
2572 pci_device = PciDevice(parse_pci_name(pci_name))
2573 except Exception, e:
2574 raise VmError("pci: failed to locate device and "+
2575 "parse its resources - "+str(e))
2576 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
2577 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
2578 %pci_device.name)
2579 if pci_name in get_all_assigned_pci_devices():
2580 raise VmError("failed to assign device %s that has"
2581 " already been assigned to other domain." % pci_name)
2583 if hvm and pci_str != '':
2584 bdf = xc.test_assign_device(0, pci_str)
2585 if bdf != 0:
2586 if bdf == -1:
2587 raise VmError("failed to assign device: maybe the platform"
2588 " doesn't support VT-d, or VT-d isn't enabled"
2589 " properly?")
2590 bus = (bdf >> 16) & 0xff
2591 devfn = (bdf >> 8) & 0xff
2592 dev = (devfn >> 3) & 0x1f
2593 func = devfn & 0x7
2594 raise VmError("failed to assign device %02x:%02x.%x: maybe it has"
2595 " already been assigned to other domain, or maybe"
2596 " it doesn't exist." % (bus, dev, func))
2598 # register the domain in the list
2599 from xen.xend import XendDomain
2600 XendDomain.instance().add_domain(self)
2602 def _introduceDomain(self):
2603 assert self.domid is not None
2604 assert self.store_mfn is not None
2605 assert self.store_port is not None
2607 try:
2608 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2609 except RuntimeError, exn:
2610 raise XendError(str(exn))
2612 def _setTarget(self, target):
2613 assert self.domid is not None
2615 try:
2616 SetTarget(self.domid, target)
2617 self.storeDom('target', target)
2618 except RuntimeError, exn:
2619 raise XendError(str(exn))
2622 def _setCPUAffinity(self):
2623 """ Repin domain vcpus if a restricted cpus list is provided.
2624 Returns the choosen node number.
2625 """
2627 def has_cpus():
2628 if self.info['cpus'] is not None:
2629 for c in self.info['cpus']:
2630 if c:
2631 return True
2632 return False
2634 def has_cpumap():
2635 if self.info.has_key('vcpus_params'):
2636 for k, v in self.info['vcpus_params'].items():
2637 if k.startswith('cpumap'):
2638 return True
2639 return False
2641 index = 0
2642 if has_cpumap():
2643 for v in range(0, self.info['VCPUs_max']):
2644 if self.info['vcpus_params'].has_key('cpumap%i' % v):
2645 cpumask = map(int, self.info['vcpus_params']['cpumap%i' % v].split(','))
2646 xc.vcpu_setaffinity(self.domid, v, cpumask)
2647 elif has_cpus():
2648 for v in range(0, self.info['VCPUs_max']):
2649 if self.info['cpus'][v]:
2650 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2651 else:
2652 def find_relaxed_node(node_list):
2653 import sys
2654 nr_nodes = info['nr_nodes']
2655 if node_list is None:
2656 node_list = range(0, nr_nodes)
2657 nodeload = [0]
2658 nodeload = nodeload * nr_nodes
2659 from xen.xend import XendDomain
2660 doms = XendDomain.instance().list('all')
2661 for dom in filter (lambda d: d.domid != self.domid, doms):
2662 cpuinfo = dom.getVCPUInfo()
2663 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2664 if sxp.child_value(vcpu, 'online') == 0: continue
2665 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2666 for i in range(0, nr_nodes):
2667 node_cpumask = info['node_to_cpu'][i]
2668 for j in node_cpumask:
2669 if j in cpumap:
2670 nodeload[i] += 1
2671 break
2672 for i in range(0, nr_nodes):
2673 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2674 if len(info['node_to_cpu'][i]) == 0 or i not in node_list:
2675 nodelist[i] += 8
2676 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
2678 info = xc.physinfo()
2679 if info['nr_nodes'] > 1:
2680 node_memory_list = info['node_to_memory']
2681 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2682 candidate_node_list = []
2683 for i in range(0, info['nr_nodes']):
2684 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2685 candidate_node_list.append(i)
2686 best_node = find_relaxed_node(candidate_node_list)[0]
2687 cpumask = info['node_to_cpu'][best_node]
2688 cores_per_node = info['nr_cpus'] / info['nr_nodes']
2689 nodes_required = (self.info['VCPUs_max'] + cores_per_node - 1) / cores_per_node
2690 if nodes_required > 1:
2691 log.debug("allocating %d NUMA nodes", nodes_required)
2692 best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['nr_nodes'])))
2693 for i in best_nodes[:nodes_required - 1]:
2694 cpumask = cpumask + info['node_to_cpu'][i]
2695 for v in range(0, self.info['VCPUs_max']):
2696 xc.vcpu_setaffinity(self.domid, v, cpumask)
2697 return index
2699 def _freeDMAmemory(self, node):
2701 # If we are PV and have PCI devices the guest will
2702 # turn on a SWIOTLB. The SWIOTLB _MUST_ be located in the DMA32
2703 # zone (under 4GB). To do so, we need to balloon down Dom0 to where
2704 # there is enough (64MB) memory under the 4GB mark. This balloon-ing
2705 # might take more memory out than just 64MB thought :-(
2706 if not self.info.is_pv_and_has_pci():
2707 return
2709 retries = 2000
2710 ask_for_mem = 0
2711 need_mem = 0
2712 try:
2713 while (retries > 0):
2714 physinfo = xc.physinfo()
2715 free_mem = physinfo['free_memory']
2716 nr_nodes = physinfo['nr_nodes']
2717 node_to_dma32_mem = physinfo['node_to_dma32_mem']
2718 if (node > nr_nodes):
2719 return
2720 # Extra 2MB above 64GB seems to do the trick.
2721 need_mem = 64 * 1024 + 2048 - node_to_dma32_mem[node]
2722 # our starting point. We ask just for the difference to
2723 # be have an extra 64MB under 4GB.
2724 ask_for_mem = max(need_mem, ask_for_mem);
2725 if (need_mem > 0):
2726 log.debug('_freeDMAmemory (%d) Need %dKiB DMA memory. '
2727 'Asking for %dKiB', retries, need_mem,
2728 ask_for_mem)
2730 balloon.free(ask_for_mem, self)
2731 ask_for_mem = ask_for_mem + 2048
2732 else:
2733 # OK. We got enough DMA memory.
2734 break
2735 retries = retries - 1
2736 except:
2737 # This is best-try after all.
2738 need_mem = max(1, need_mem)
2739 pass
2741 if (need_mem > 0):
2742 log.warn('We tried our best to balloon down DMA memory to '
2743 'accomodate your PV guest. We need %dKiB extra memory.',
2744 need_mem)
2746 def _setSchedParams(self):
2747 if XendNode.instance().xenschedinfo() == 'credit':
2748 from xen.xend import XendDomain
2749 XendDomain.instance().domain_sched_credit_set(self.getDomid(),
2750 self.getWeight(),
2751 self.getCap())
2753 def _initDomain(self):
2754 log.debug('XendDomainInfo.initDomain: %s %s',
2755 self.domid,
2756 self.info['vcpus_params']['weight'])
2758 self._configureBootloader()
2760 try:
2761 self.image = image.create(self, self.info)
2763 # repin domain vcpus if a restricted cpus list is provided
2764 # this is done prior to memory allocation to aide in memory
2765 # distribution for NUMA systems.
2766 node = self._setCPUAffinity()
2768 # Set scheduling parameters.
2769 self._setSchedParams()
2771 # Use architecture- and image-specific calculations to determine
2772 # the various headrooms necessary, given the raw configured
2773 # values. maxmem, memory, and shadow are all in KiB.
2774 # but memory_static_max etc are all stored in bytes now.
2775 memory = self.image.getRequiredAvailableMemory(
2776 self.info['memory_dynamic_max'] / 1024)
2777 maxmem = self.image.getRequiredAvailableMemory(
2778 self.info['memory_static_max'] / 1024)
2779 shadow = self.image.getRequiredShadowMemory(
2780 self.info['shadow_memory'] * 1024,
2781 self.info['memory_static_max'] / 1024)
2783 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2784 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2785 # takes MiB and we must not round down and end up under-providing.
2786 shadow = ((shadow + 1023) / 1024) * 1024
2788 # set memory limit
2789 xc.domain_setmaxmem(self.domid, maxmem)
2791 vtd_mem = 0
2792 info = xc.physinfo()
2793 if 'hvm_directio' in info['virt_caps']:
2794 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2795 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2796 # Round vtd_mem up to a multiple of a MiB.
2797 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2799 self.guest_bitsize = self.image.getBitSize()
2800 # Make sure there's enough RAM available for the domain
2801 balloon.free(memory + shadow + vtd_mem, self)
2803 # Set up the shadow memory
2804 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2805 self.info['shadow_memory'] = shadow_cur
2807 # machine address size
2808 if self.info.has_key('machine_address_size'):
2809 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2810 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2812 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2813 log.debug("_initDomain: suppressing spurious page faults")
2814 xc.domain_suppress_spurious_page_faults(self.domid)
2816 self._createChannels()
2818 channel_details = self.image.createImage()
2820 self.store_mfn = channel_details['store_mfn']
2821 if 'console_mfn' in channel_details:
2822 self.console_mfn = channel_details['console_mfn']
2823 if 'notes' in channel_details:
2824 self.info.set_notes(channel_details['notes'])
2825 if 'native_protocol' in channel_details:
2826 self.native_protocol = channel_details['native_protocol'];
2828 self._introduceDomain()
2829 if self.info.target():
2830 self._setTarget(self.info.target())
2832 self._freeDMAmemory(node)
2834 self._createDevices()
2836 self.image.cleanupTmpImages()
2838 self.info['start_time'] = time.time()
2840 self._stateSet(DOM_STATE_RUNNING)
2841 except VmError, exn:
2842 log.exception("XendDomainInfo.initDomain: exception occurred")
2843 if self.image:
2844 self.image.cleanupTmpImages()
2845 raise exn
2846 except RuntimeError, exn:
2847 log.exception("XendDomainInfo.initDomain: exception occurred")
2848 if self.image:
2849 self.image.cleanupTmpImages()
2850 raise VmError(str(exn))
2853 def cleanupDomain(self):
2854 """Cleanup domain resources; release devices. Idempotent. Nothrow
2855 guarantee."""
2857 self.refresh_shutdown_lock.acquire()
2858 try:
2859 self.unwatchShutdown()
2860 self._releaseDevices()
2861 bootloader_tidy(self)
2863 if self.image:
2864 self.image = None
2866 try:
2867 self._removeDom()
2868 except:
2869 log.exception("Removing domain path failed.")
2871 self._stateSet(DOM_STATE_HALTED)
2872 self.domid = None # Do not push into _stateSet()!
2873 finally:
2874 self.refresh_shutdown_lock.release()
2877 def unwatchShutdown(self):
2878 """Remove the watch on the domain's control/shutdown node, if any.
2879 Idempotent. Nothrow guarantee. Expects to be protected by the
2880 refresh_shutdown_lock."""
2882 try:
2883 try:
2884 if self.shutdownWatch:
2885 self.shutdownWatch.unwatch()
2886 finally:
2887 self.shutdownWatch = None
2888 except:
2889 log.exception("Unwatching control/shutdown failed.")
2891 def waitForShutdown(self):
2892 self.state_updated.acquire()
2893 try:
2894 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2895 self.state_updated.wait(timeout=1.0)
2896 finally:
2897 self.state_updated.release()
2899 def waitForSuspend(self):
2900 """Wait for the guest to respond to a suspend request by
2901 shutting down. If the guest hasn't re-written control/shutdown
2902 after a certain amount of time, it's obviously not listening and
2903 won't suspend, so we give up. HVM guests with no PV drivers
2904 should already be shutdown.
2905 """
2906 state = "suspend"
2907 nr_tries = 60
2909 self.state_updated.acquire()
2910 try:
2911 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2912 self.state_updated.wait(1.0)
2913 if state == "suspend":
2914 if nr_tries == 0:
2915 msg = ('Timeout waiting for domain %s to suspend'
2916 % self.domid)
2917 self._writeDom('control/shutdown', '')
2918 raise XendError(msg)
2919 state = self.readDom('control/shutdown')
2920 nr_tries -= 1
2921 finally:
2922 self.state_updated.release()
2925 # TODO: recategorise - called from XendCheckpoint
2928 def completeRestore(self, store_mfn, console_mfn):
2930 log.debug("XendDomainInfo.completeRestore")
2932 self.store_mfn = store_mfn
2933 self.console_mfn = console_mfn
2935 self._introduceDomain()
2936 self.image = image.create(self, self.info)
2937 if self.image:
2938 self.image.createDeviceModel(True)
2939 self._storeDomDetails()
2940 self._registerWatches()
2941 self.refreshShutdown()
2943 log.debug("XendDomainInfo.completeRestore done")
2946 def _endRestore(self):
2947 self.setResume(False)
2950 # VM Destroy
2953 def _prepare_phantom_paths(self):
2954 # get associated devices to destroy
2955 # build list of phantom devices to be removed after normal devices
2956 plist = []
2957 if self.domid is not None:
2958 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2959 try:
2960 for dev in t.list():
2961 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2962 % (self.dompath, dev))
2963 if backend_phantom_vbd is not None:
2964 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2965 % backend_phantom_vbd)
2966 plist.append(backend_phantom_vbd)
2967 plist.append(frontend_phantom_vbd)
2968 finally:
2969 t.abort()
2970 return plist
2972 def _cleanup_phantom_devs(self, plist):
2973 # remove phantom devices
2974 if not plist == []:
2975 time.sleep(2)
2976 for paths in plist:
2977 if paths.find('backend') != -1:
2978 # Modify online status /before/ updating state (latter is watched by
2979 # drivers, so this ordering avoids a race).
2980 xstransact.Write(paths, 'online', "0")
2981 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2982 # force
2983 xstransact.Remove(paths)
2985 def destroy(self):
2986 """Cleanup VM and destroy domain. Nothrow guarantee."""
2988 if self.domid is None:
2989 return
2990 from xen.xend import XendDomain
2991 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2993 paths = self._prepare_phantom_paths()
2995 if self.dompath is not None:
2996 try:
2997 xc.domain_destroy_hook(self.domid)
2998 xc.domain_pause(self.domid)
2999 do_FLR(self.domid, self.info.is_hvm())
3000 xc.domain_destroy(self.domid)
3001 for state in DOM_STATES_OLD:
3002 self.info[state] = 0
3003 self._stateSet(DOM_STATE_HALTED)
3004 except:
3005 log.exception("XendDomainInfo.destroy: domain destruction failed.")
3007 XendDomain.instance().remove_domain(self)
3008 self.cleanupDomain()
3010 if self.info.is_hvm() or self.guest_bitsize != 32:
3011 if self.alloc_mem:
3012 import MemoryPool
3013 log.debug("%s KiB need to add to Memory pool" %self.alloc_mem)
3014 MemoryPool.instance().increase_memory(self.alloc_mem)
3016 self._cleanup_phantom_devs(paths)
3017 self._cleanupVm()
3019 if ("transient" in self.info["other_config"] and \
3020 bool(self.info["other_config"]["transient"])) or \
3021 ("change_home_server" in self.info and \
3022 bool(self.info["change_home_server"])):
3023 XendDomain.instance().domain_delete_by_dominfo(self)
3026 def resetDomain(self):
3027 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
3029 old_domid = self.domid
3030 prev_vm_xend = self._listRecursiveVm('xend')
3031 new_dom_info = self.info
3032 try:
3033 self._unwatchVm()
3034 self.destroy()
3036 new_dom = None
3037 try:
3038 from xen.xend import XendDomain
3039 new_dom_info['domid'] = None
3040 new_dom = XendDomain.instance().domain_create_from_dict(
3041 new_dom_info)
3042 for x in prev_vm_xend[0][1]:
3043 new_dom._writeVm('xend/%s' % x[0], x[1])
3044 new_dom.waitForDevices()
3045 new_dom.unpause()
3046 except:
3047 if new_dom:
3048 new_dom.destroy()
3049 raise
3050 except:
3051 log.exception('Failed to reset domain %s.', str(old_domid))
3054 def resumeDomain(self):
3055 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
3057 # resume a suspended domain (e.g. after live checkpoint, or after
3058 # a later error during save or migate); checks that the domain
3059 # is currently suspended first so safe to call from anywhere
3061 xeninfo = dom_get(self.domid)
3062 if xeninfo is None:
3063 return
3064 if not xeninfo['shutdown']:
3065 return
3066 reason = shutdown_reason(xeninfo['shutdown_reason'])
3067 if reason != 'suspend':
3068 return
3070 try:
3071 # could also fetch a parsed note from xenstore
3072 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
3073 if not fast:
3074 self._releaseDevices()
3075 self.testDeviceComplete()
3076 self.testvifsComplete()
3077 log.debug("XendDomainInfo.resumeDomain: devices released")
3079 self._resetChannels()
3081 self._removeDom('control/shutdown')
3082 self._removeDom('device-misc/vif/nextDeviceID')
3084 self._createChannels()
3085 self._introduceDomain()
3086 self._storeDomDetails()
3088 self._createDevices()
3089 log.debug("XendDomainInfo.resumeDomain: devices created")
3091 xc.domain_resume(self.domid, fast)
3092 ResumeDomain(self.domid)
3093 except:
3094 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
3095 self.image.resumeDeviceModel()
3096 log.debug("XendDomainInfo.resumeDomain: completed")
3100 # Channels for xenstore and console
3103 def _createChannels(self):
3104 """Create the channels to the domain.
3105 """
3106 self.store_port = self._createChannel()
3107 self.console_port = self._createChannel()
3110 def _createChannel(self):
3111 """Create an event channel to the domain.
3112 """
3113 try:
3114 if self.domid != None:
3115 return xc.evtchn_alloc_unbound(domid = self.domid,
3116 remote_dom = 0)
3117 except:
3118 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
3119 raise
3121 def _resetChannels(self):
3122 """Reset all event channels in the domain.
3123 """
3124 try:
3125 if self.domid != None:
3126 return xc.evtchn_reset(dom = self.domid)
3127 except:
3128 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
3129 raise
3133 # Bootloader configuration
3136 def _configureBootloader(self):
3137 """Run the bootloader if we're configured to do so."""
3139 blexec = self.info['PV_bootloader']
3140 bootloader_args = self.info['PV_bootloader_args']
3141 kernel = self.info['PV_kernel']
3142 ramdisk = self.info['PV_ramdisk']
3143 args = self.info['PV_args']
3144 boot = self.info['HVM_boot_policy']
3146 if boot:
3147 # HVM booting.
3148 pass
3149 elif not blexec and kernel:
3150 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
3151 # will be picked up by image.py.
3152 pass
3153 else:
3154 # Boot using bootloader
3155 if not blexec or blexec == 'pygrub':
3156 blexec = auxbin.pathTo('pygrub')
3158 blcfg = None
3159 disks = [x for x in self.info['vbd_refs']
3160 if self.info['devices'][x][1]['bootable']]
3162 if not disks:
3163 msg = "Had a bootloader specified, but no disks are bootable"
3164 log.error(msg)
3165 raise VmError(msg)
3167 devinfo = self.info['devices'][disks[0]]
3168 devtype = devinfo[0]
3169 disk = devinfo[1]['uname']
3171 fn = blkdev_uname_to_file(disk)
3173 # If this is a drbd volume, check if we need to activate it
3174 if disk.find(":") != -1:
3175 (disktype, diskname) = disk.split(':', 1)
3176 if disktype == 'drbd':
3177 (drbdadmstdin, drbdadmstdout) = os.popen2(["/sbin/drbdadm", "state", diskname])
3178 (state, junk) = drbdadmstdout.readline().split('/', 1)
3179 if state == 'Secondary':
3180 os.system('/sbin/drbdadm primary ' + diskname)
3182 taptype = blkdev_uname_to_taptype(disk)
3183 mounted = devtype in ['tap', 'tap2'] and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
3184 if mounted:
3185 # This is a file, not a device. pygrub can cope with a
3186 # file if it's raw, but if it's QCOW or other such formats
3187 # used through blktap, then we need to mount it first.
3189 log.info("Mounting %s on %s." %
3190 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3192 vbd = {
3193 'mode': 'RO',
3194 'device': BOOTLOADER_LOOPBACK_DEVICE,
3197 from xen.xend import XendDomain
3198 dom0 = XendDomain.instance().privilegedDomain()
3199 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
3200 fn = BOOTLOADER_LOOPBACK_DEVICE
3202 try:
3203 blcfg = bootloader(blexec, fn, self, False,
3204 bootloader_args, kernel, ramdisk, args)
3205 finally:
3206 if mounted:
3207 log.info("Unmounting %s from %s." %
3208 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3210 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
3212 if blcfg is None:
3213 msg = "Had a bootloader specified, but can't find disk"
3214 log.error(msg)
3215 raise VmError(msg)
3217 self.info.update_with_image_sxp(blcfg, True)
3221 # VM Functions
3224 def _readVMDetails(self, params):
3225 """Read the specified parameters from the store.
3226 """
3227 try:
3228 return self._gatherVm(*params)
3229 except ValueError:
3230 # One of the int/float entries in params has a corresponding store
3231 # entry that is invalid. We recover, because older versions of
3232 # Xend may have put the entry there (memory/target, for example),
3233 # but this is in general a bad situation to have reached.
3234 log.exception(
3235 "Store corrupted at %s! Domain %d's configuration may be "
3236 "affected.", self.vmpath, self.domid)
3237 return []
3239 def _cleanupVm(self):
3240 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
3242 self._unwatchVm()
3244 try:
3245 self._removeVm()
3246 except:
3247 log.exception("Removing VM path failed.")
3250 def checkLiveMigrateMemory(self):
3251 """ Make sure there's enough memory to migrate this domain """
3252 overhead_kb = 0
3253 if arch.type == "x86":
3254 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3255 # the minimum that Xen would allocate if no value were given.
3256 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3257 (self.info['memory_static_max'] / 1024 / 1024) * 4
3258 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3259 # The domain might already have some shadow memory
3260 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3261 if overhead_kb > 0:
3262 balloon.free(overhead_kb, self)
3264 def _unwatchVm(self):
3265 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3266 guarantee."""
3267 try:
3268 try:
3269 if self.vmWatch:
3270 self.vmWatch.unwatch()
3271 finally:
3272 self.vmWatch = None
3273 except:
3274 log.exception("Unwatching VM path failed.")
3276 def testDeviceComplete(self):
3277 """ For Block IO migration safety we must ensure that
3278 the device has shutdown correctly, i.e. all blocks are
3279 flushed to disk
3280 """
3281 start = time.time()
3282 while True:
3283 test = 0
3284 diff = time.time() - start
3285 vbds = self.getDeviceController('vbd').deviceIDs()
3286 taps = self.getDeviceController('tap').deviceIDs()
3287 tap2s = self.getDeviceController('tap2').deviceIDs()
3288 for i in vbds + taps + tap2s:
3289 test = 1
3290 log.info("Dev %s still active, looping...", i)
3291 time.sleep(0.1)
3293 if test == 0:
3294 break
3295 if diff >= MIGRATE_TIMEOUT:
3296 log.info("Dev still active but hit max loop timeout")
3297 break
3299 def testvifsComplete(self):
3300 """ In case vifs are released and then created for the same
3301 domain, we need to wait the device shut down.
3302 """
3303 start = time.time()
3304 while True:
3305 test = 0
3306 diff = time.time() - start
3307 for i in self.getDeviceController('vif').deviceIDs():
3308 test = 1
3309 log.info("Dev %s still active, looping...", i)
3310 time.sleep(0.1)
3312 if test == 0:
3313 break
3314 if diff >= MIGRATE_TIMEOUT:
3315 log.info("Dev still active but hit max loop timeout")
3316 break
3318 def _storeVmDetails(self):
3319 to_store = {}
3321 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3322 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3323 if self._infoIsSet(info_key):
3324 to_store[key] = str(self.info[info_key])
3326 if self._infoIsSet("static_memory_min"):
3327 to_store["memory"] = str(self.info["static_memory_min"])
3328 if self._infoIsSet("static_memory_max"):
3329 to_store["maxmem"] = str(self.info["static_memory_max"])
3331 image_sxpr = self.info.image_sxpr()
3332 if image_sxpr:
3333 to_store['image'] = sxp.to_string(image_sxpr)
3335 if not self._readVm('xend/restart_count'):
3336 to_store['xend/restart_count'] = str(0)
3338 log.debug("Storing VM details: %s", scrub_password(to_store))
3340 self._writeVm(to_store)
3341 self._setVmPermissions()
3343 def _setVmPermissions(self):
3344 """Allow the guest domain to read its UUID. We don't allow it to
3345 access any other entry, for security."""
3346 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3347 { 'dom' : self.domid,
3348 'read' : True,
3349 'write' : False })
3352 # Utility functions
3355 def __getattr__(self, name):
3356 if name == "state":
3357 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3358 log.warn("".join(traceback.format_stack()))
3359 return self._stateGet()
3360 else:
3361 raise AttributeError(name)
3363 def __setattr__(self, name, value):
3364 if name == "state":
3365 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3366 log.warn("".join(traceback.format_stack()))
3367 self._stateSet(value)
3368 else:
3369 self.__dict__[name] = value
3371 def _stateSet(self, state):
3372 self.state_updated.acquire()
3373 try:
3374 # TODO Not sure this is correct...
3375 # _stateGet is live now. Why not fire event
3376 # even when it hasn't changed?
3377 if self._stateGet() != state:
3378 self.state_updated.notifyAll()
3379 import XendAPI
3380 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3381 'power_state')
3382 finally:
3383 self.state_updated.release()
3385 def _stateGet(self):
3386 # Lets try and reconsitute the state from xc
3387 # first lets try and get the domain info
3388 # from xc - this will tell us if the domain
3389 # exists
3390 info = dom_get(self.getDomid())
3391 if info is None or info['shutdown']:
3392 # We are either HALTED or SUSPENDED
3393 # check saved image exists
3394 from xen.xend import XendDomain
3395 managed_config_path = \
3396 XendDomain.instance()._managed_check_point_path( \
3397 self.get_uuid())
3398 if os.path.exists(managed_config_path):
3399 return XEN_API_VM_POWER_STATE_SUSPENDED
3400 else:
3401 return XEN_API_VM_POWER_STATE_HALTED
3402 elif info['crashed']:
3403 # Crashed
3404 return XEN_API_VM_POWER_STATE_CRASHED
3405 else:
3406 # We are either RUNNING or PAUSED
3407 if info['paused']:
3408 return XEN_API_VM_POWER_STATE_PAUSED
3409 else:
3410 return XEN_API_VM_POWER_STATE_RUNNING
3412 def _infoIsSet(self, name):
3413 return name in self.info and self.info[name] is not None
3415 def _checkName(self, name):
3416 """Check if a vm name is valid. Valid names contain alphabetic
3417 characters, digits, or characters in '_-.:/+'.
3418 The same name cannot be used for more than one vm at the same time.
3420 @param name: name
3421 @raise: VmError if invalid
3422 """
3423 from xen.xend import XendDomain
3425 if name is None or name == '':
3426 raise VmError('Missing VM Name')
3428 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3429 raise VmError('Invalid VM Name')
3431 dom = XendDomain.instance().domain_lookup_nr(name)
3432 if dom and dom.info['uuid'] != self.info['uuid']:
3433 raise VmError("VM name '%s' already exists%s" %
3434 (name,
3435 dom.domid is not None and
3436 (" as domain %s" % str(dom.domid)) or ""))
3439 def update(self, info = None, refresh = True, transaction = None):
3440 """Update with info from xc.domain_getinfo().
3441 """
3442 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3443 str(self.domid))
3445 if not info:
3446 info = dom_get(self.domid)
3447 if not info:
3448 return
3450 if info["maxmem_kb"] < 0:
3451 info["maxmem_kb"] = XendNode.instance() \
3452 .physinfo_dict()['total_memory'] * 1024
3454 # make sure state is reset for info
3455 # TODO: we should eventually get rid of old_dom_states
3457 self.info.update_config(info)
3458 self._update_consoles(transaction)
3460 if refresh:
3461 self.refreshShutdown(info)
3463 log.trace("XendDomainInfo.update done on domain %s: %s",
3464 str(self.domid), self.info)
3466 def sxpr(self, ignore_store = False, legacy_only = True):
3467 result = self.info.to_sxp(domain = self,
3468 ignore_devices = ignore_store,
3469 legacy_only = legacy_only)
3471 return result
3473 # Xen API
3474 # ----------------------------------------------------------------
3476 def get_uuid(self):
3477 dom_uuid = self.info.get('uuid')
3478 if not dom_uuid: # if it doesn't exist, make one up
3479 dom_uuid = uuid.createString()
3480 self.info['uuid'] = dom_uuid
3481 return dom_uuid
3483 def get_memory_static_max(self):
3484 return self.info.get('memory_static_max', 0)
3485 def get_memory_static_min(self):
3486 return self.info.get('memory_static_min', 0)
3487 def get_memory_dynamic_max(self):
3488 return self.info.get('memory_dynamic_max', 0)
3489 def get_memory_dynamic_min(self):
3490 return self.info.get('memory_dynamic_min', 0)
3492 # only update memory-related config values if they maintain sanity
3493 def _safe_set_memory(self, key, newval):
3494 oldval = self.info.get(key, 0)
3495 try:
3496 self.info[key] = newval
3497 self.info._memory_sanity_check()
3498 except Exception, ex:
3499 self.info[key] = oldval
3500 raise
3502 def set_memory_static_max(self, val):
3503 self._safe_set_memory('memory_static_max', val)
3504 def set_memory_static_min(self, val):
3505 self._safe_set_memory('memory_static_min', val)
3506 def set_memory_dynamic_max(self, val):
3507 self._safe_set_memory('memory_dynamic_max', val)
3508 def set_memory_dynamic_min(self, val):
3509 self._safe_set_memory('memory_dynamic_min', val)
3511 def get_vcpus_params(self):
3512 if self.getDomid() is None:
3513 return self.info['vcpus_params']
3515 retval = xc.sched_credit_domain_get(self.getDomid())
3516 return retval
3517 def get_power_state(self):
3518 return XEN_API_VM_POWER_STATE[self._stateGet()]
3519 def get_platform(self):
3520 return self.info.get('platform', {})
3521 def get_pci_bus(self):
3522 return self.info.get('pci_bus', '')
3523 def get_tools_version(self):
3524 return self.info.get('tools_version', {})
3525 def get_metrics(self):
3526 return self.metrics.get_uuid();
3529 def get_security_label(self, xspol=None):
3530 import xen.util.xsm.xsm as security
3531 label = security.get_security_label(self, xspol)
3532 return label
3534 def set_security_label(self, seclab, old_seclab, xspol=None,
3535 xspol_old=None):
3536 """
3537 Set the security label of a domain from its old to
3538 a new value.
3539 @param seclab New security label formatted in the form
3540 <policy type>:<policy name>:<vm label>
3541 @param old_seclab The current security label that the
3542 VM must have.
3543 @param xspol An optional policy under which this
3544 update should be done. If not given,
3545 then the current active policy is used.
3546 @param xspol_old The old policy; only to be passed during
3547 the updating of a policy
3548 @return Returns return code, a string with errors from
3549 the hypervisor's operation, old label of the
3550 domain
3551 """
3552 rc = 0
3553 errors = ""
3554 old_label = ""
3555 new_ssidref = 0
3556 domid = self.getDomid()
3557 res_labels = None
3558 is_policy_update = (xspol_old != None)
3560 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3562 state = self._stateGet()
3563 # Relabel only HALTED or RUNNING or PAUSED domains
3564 if domid != 0 and \
3565 state not in \
3566 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3567 DOM_STATE_SUSPENDED ]:
3568 log.warn("Relabeling domain not possible in state '%s'" %
3569 DOM_STATES[state])
3570 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3572 # Remove security label. Works only for halted or suspended domains
3573 if not seclab or seclab == "":
3574 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3575 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3577 if self.info.has_key('security_label'):
3578 old_label = self.info['security_label']
3579 # Check label against expected one.
3580 if old_label != old_seclab:
3581 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3582 del self.info['security_label']
3583 xen.xend.XendDomain.instance().managed_config_save(self)
3584 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3586 tmp = seclab.split(":")
3587 if len(tmp) != 3:
3588 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3589 typ, policy, label = tmp
3591 poladmin = XSPolicyAdminInstance()
3592 if not xspol:
3593 xspol = poladmin.get_policy_by_name(policy)
3595 try:
3596 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3598 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3599 #if domain is running or paused try to relabel in hypervisor
3600 if not xspol:
3601 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3603 if typ != xspol.get_type_name() or \
3604 policy != xspol.get_name():
3605 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3607 if typ == xsconstants.ACM_POLICY_ID:
3608 new_ssidref = xspol.vmlabel_to_ssidref(label)
3609 if new_ssidref == xsconstants.INVALID_SSIDREF:
3610 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3612 # Check that all used resources are accessible under the
3613 # new label
3614 if not is_policy_update and \
3615 not security.resources_compatible_with_vmlabel(xspol,
3616 self, label):
3617 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3619 #Check label against expected one. Can only do this
3620 # if the policy hasn't changed underneath in the meantime
3621 if xspol_old == None:
3622 old_label = self.get_security_label()
3623 if old_label != old_seclab:
3624 log.info("old_label != old_seclab: %s != %s" %
3625 (old_label, old_seclab))
3626 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3628 # relabel domain in the hypervisor
3629 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3630 log.info("rc from relabeling in HV: %d" % rc)
3631 else:
3632 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3634 if rc == 0:
3635 # HALTED, RUNNING or PAUSED
3636 if domid == 0:
3637 if xspol:
3638 self.info['security_label'] = seclab
3639 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3640 else:
3641 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3642 else:
3643 if self.info.has_key('security_label'):
3644 old_label = self.info['security_label']
3645 # Check label against expected one, unless wildcard
3646 if old_label != old_seclab:
3647 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3649 self.info['security_label'] = seclab
3651 try:
3652 xen.xend.XendDomain.instance().managed_config_save(self)
3653 except:
3654 pass
3655 return (rc, errors, old_label, new_ssidref)
3656 finally:
3657 xen.xend.XendDomain.instance().policy_lock.release()
3659 def get_on_shutdown(self):
3660 after_shutdown = self.info.get('actions_after_shutdown')
3661 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3662 return XEN_API_ON_NORMAL_EXIT[-1]
3663 return after_shutdown
3665 def get_on_reboot(self):
3666 after_reboot = self.info.get('actions_after_reboot')
3667 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3668 return XEN_API_ON_NORMAL_EXIT[-1]
3669 return after_reboot
3671 def get_on_suspend(self):
3672 # TODO: not supported
3673 after_suspend = self.info.get('actions_after_suspend')
3674 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3675 return XEN_API_ON_NORMAL_EXIT[-1]
3676 return after_suspend
3678 def get_on_crash(self):
3679 after_crash = self.info.get('actions_after_crash')
3680 if not after_crash or after_crash not in \
3681 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3682 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3683 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3685 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3686 """ Get's a device configuration either from XendConfig or
3687 from the DevController.
3689 @param dev_class: device class, either, 'vbd' or 'vif'
3690 @param dev_uuid: device UUID
3692 @rtype: dictionary
3693 """
3694 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3696 # shortcut if the domain isn't started because
3697 # the devcontrollers will have no better information
3698 # than XendConfig.
3699 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3700 XEN_API_VM_POWER_STATE_SUSPENDED):
3701 if dev_config:
3702 return copy.deepcopy(dev_config)
3703 return None
3705 # instead of using dev_class, we use the dev_type
3706 # that is from XendConfig.
3707 controller = self.getDeviceController(dev_type)
3708 if not controller:
3709 return None
3711 all_configs = controller.getAllDeviceConfigurations()
3712 if not all_configs:
3713 return None
3715 updated_dev_config = copy.deepcopy(dev_config)
3716 for _devid, _devcfg in all_configs.items():
3717 if _devcfg.get('uuid') == dev_uuid:
3718 updated_dev_config.update(_devcfg)
3719 updated_dev_config['id'] = _devid
3720 return updated_dev_config
3722 return updated_dev_config
3724 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3725 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3726 if not config:
3727 return {}
3729 config['VM'] = self.get_uuid()
3731 if dev_class == 'vif':
3732 if not config.has_key('name'):
3733 config['name'] = config.get('vifname', '')
3734 if not config.has_key('MAC'):
3735 config['MAC'] = config.get('mac', '')
3736 if not config.has_key('type'):
3737 config['type'] = 'paravirtualised'
3738 if not config.has_key('device'):
3739 devid = config.get('id')
3740 if devid != None:
3741 config['device'] = 'eth%s' % devid
3742 else:
3743 config['device'] = ''
3745 if not config.has_key('network'):
3746 try:
3747 bridge = config.get('bridge', None)
3748 if bridge is None:
3749 from xen.util import Brctl
3750 if_to_br = dict([(i,b)
3751 for (b,ifs) in Brctl.get_state().items()
3752 for i in ifs])
3753 vifname = "vif%s.%s" % (self.getDomid(),
3754 config.get('id'))
3755 bridge = if_to_br.get(vifname, None)
3756 config['network'] = \
3757 XendNode.instance().bridge_to_network(
3758 config.get('bridge')).get_uuid()
3759 except Exception:
3760 log.exception('bridge_to_network')
3761 # Ignore this for now -- it may happen if the device
3762 # has been specified using the legacy methods, but at
3763 # some point we're going to have to figure out how to
3764 # handle that properly.
3766 config['MTU'] = 1500 # TODO
3768 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3769 xennode = XendNode.instance()
3770 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3771 config['io_read_kbs'] = rx_bps/1024
3772 config['io_write_kbs'] = tx_bps/1024
3773 rx, tx = xennode.get_vif_stat(self.domid, devid)
3774 config['io_total_read_kbs'] = rx/1024
3775 config['io_total_write_kbs'] = tx/1024
3776 else:
3777 config['io_read_kbs'] = 0.0
3778 config['io_write_kbs'] = 0.0
3779 config['io_total_read_kbs'] = 0.0
3780 config['io_total_write_kbs'] = 0.0
3782 config['security_label'] = config.get('security_label', '')
3784 if dev_class == 'vbd':
3786 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3787 controller = self.getDeviceController(dev_class)
3788 devid, _1, _2 = controller.getDeviceDetails(config)
3789 xennode = XendNode.instance()
3790 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3791 config['io_read_kbs'] = rd_blkps
3792 config['io_write_kbs'] = wr_blkps
3793 else:
3794 config['io_read_kbs'] = 0.0
3795 config['io_write_kbs'] = 0.0
3797 config['VDI'] = config.get('VDI', '')
3798 config['device'] = config.get('dev', '')
3799 if config['device'].startswith('ioemu:'):
3800 _, vbd_device = config['device'].split(':', 1)
3801 config['device'] = vbd_device
3802 if ':' in config['device']:
3803 vbd_name, vbd_type = config['device'].split(':', 1)
3804 config['device'] = vbd_name
3805 if vbd_type == 'cdrom':
3806 config['type'] = XEN_API_VBD_TYPE[0]
3807 else:
3808 config['type'] = XEN_API_VBD_TYPE[1]
3810 config['driver'] = 'paravirtualised' # TODO
3811 config['image'] = config.get('uname', '')
3813 if config.get('mode', 'r') == 'r':
3814 config['mode'] = 'RO'
3815 else:
3816 config['mode'] = 'RW'
3818 if dev_class == 'vtpm':
3819 if not config.has_key('type'):
3820 config['type'] = 'paravirtualised' # TODO
3821 if not config.has_key('backend'):
3822 config['backend'] = "00000000-0000-0000-0000-000000000000"
3824 return config
3826 def get_dev_property(self, dev_class, dev_uuid, field):
3827 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3828 try:
3829 return config[field]
3830 except KeyError:
3831 raise XendError('Invalid property for device: %s' % field)
3833 def set_dev_property(self, dev_class, dev_uuid, field, value):
3834 self.info['devices'][dev_uuid][1][field] = value
3836 def get_vcpus_util(self):
3837 vcpu_util = {}
3838 xennode = XendNode.instance()
3839 if 'VCPUs_max' in self.info and self.domid != None:
3840 for i in range(0, self.info['VCPUs_max']):
3841 util = xennode.get_vcpu_util(self.domid, i)
3842 vcpu_util[str(i)] = util
3844 return vcpu_util
3846 def get_consoles(self):
3847 return self.info.get('console_refs', [])
3849 def get_vifs(self):
3850 return self.info.get('vif_refs', [])
3852 def get_vbds(self):
3853 return self.info.get('vbd_refs', [])
3855 def get_vtpms(self):
3856 return self.info.get('vtpm_refs', [])
3858 def get_dpcis(self):
3859 return XendDPCI.get_by_VM(self.info.get('uuid'))
3861 def get_dscsis(self):
3862 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3864 def get_dscsi_HBAs(self):
3865 return XendDSCSI_HBA.get_by_VM(self.info.get('uuid'))
3867 def create_vbd(self, xenapi_vbd, vdi_image_path):
3868 """Create a VBD using a VDI from XendStorageRepository.
3870 @param xenapi_vbd: vbd struct from the Xen API
3871 @param vdi_image_path: VDI UUID
3872 @rtype: string
3873 @return: uuid of the device
3874 """
3875 xenapi_vbd['image'] = vdi_image_path
3876 if vdi_image_path.startswith('tap'):
3877 dev_uuid = self.info.device_add('tap2', cfg_xenapi = xenapi_vbd)
3878 else:
3879 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3881 if not dev_uuid:
3882 raise XendError('Failed to create device')
3884 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3885 XEN_API_VM_POWER_STATE_PAUSED):
3886 _, config = self.info['devices'][dev_uuid]
3888 if vdi_image_path.startswith('tap'):
3889 dev_control = self.getDeviceController('tap2')
3890 else:
3891 dev_control = self.getDeviceController('vbd')
3893 try:
3894 devid = dev_control.createDevice(config)
3895 dev_type = self.getBlockDeviceClass(devid)
3896 self._waitForDevice(dev_type, devid)
3897 self.info.device_update(dev_uuid,
3898 cfg_xenapi = {'devid': devid})
3899 except Exception, exn:
3900 log.exception(exn)
3901 del self.info['devices'][dev_uuid]
3902 self.info['vbd_refs'].remove(dev_uuid)
3903 raise
3905 return dev_uuid
3907 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3908 """Create a VBD using a VDI from XendStorageRepository.
3910 @param xenapi_vbd: vbd struct from the Xen API
3911 @param vdi_image_path: VDI UUID
3912 @rtype: string
3913 @return: uuid of the device
3914 """
3915 xenapi_vbd['image'] = vdi_image_path
3916 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3917 if not dev_uuid:
3918 raise XendError('Failed to create device')
3920 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3921 _, config = self.info['devices'][dev_uuid]
3922 config['devid'] = self.getDeviceController('tap').createDevice(config)
3924 return config['devid']
3926 def create_vif(self, xenapi_vif):
3927 """Create VIF device from the passed struct in Xen API format.
3929 @param xenapi_vif: Xen API VIF Struct.
3930 @rtype: string
3931 @return: UUID
3932 """
3933 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3934 if not dev_uuid:
3935 raise XendError('Failed to create device')
3937 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3938 XEN_API_VM_POWER_STATE_PAUSED):
3940 _, config = self.info['devices'][dev_uuid]
3941 dev_control = self.getDeviceController('vif')
3943 try:
3944 devid = dev_control.createDevice(config)
3945 dev_control.waitForDevice(devid)
3946 self.info.device_update(dev_uuid,
3947 cfg_xenapi = {'devid': devid})
3948 except Exception, exn:
3949 log.exception(exn)
3950 del self.info['devices'][dev_uuid]
3951 self.info['vif_refs'].remove(dev_uuid)
3952 raise
3954 return dev_uuid
3956 def create_vtpm(self, xenapi_vtpm):
3957 """Create a VTPM device from the passed struct in Xen API format.
3959 @return: uuid of the device
3960 @rtype: string
3961 """
3963 if self._stateGet() not in (DOM_STATE_HALTED,):
3964 raise VmError("Can only add vTPM to a halted domain.")
3965 if self.get_vtpms() != []:
3966 raise VmError('Domain already has a vTPM.')
3967 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3968 if not dev_uuid:
3969 raise XendError('Failed to create device')
3971 return dev_uuid
3973 def create_console(self, xenapi_console):
3974 """ Create a console device from a Xen API struct.
3976 @return: uuid of device
3977 @rtype: string
3978 """
3979 if self._stateGet() not in (DOM_STATE_HALTED,):
3980 raise VmError("Can only add console to a halted domain.")
3982 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3983 if not dev_uuid:
3984 raise XendError('Failed to create device')
3986 return dev_uuid
3988 def set_console_other_config(self, console_uuid, other_config):
3989 self.info.console_update(console_uuid, 'other_config', other_config)
3991 def create_dpci(self, xenapi_pci):
3992 """Create pci device from the passed struct in Xen API format.
3994 @param xenapi_pci: DPCI struct from Xen API
3995 @rtype: bool
3996 #@rtype: string
3997 @return: True if successfully created device
3998 #@return: UUID
3999 """
4001 dpci_uuid = uuid.createString()
4003 dpci_opts = []
4004 opts_dict = xenapi_pci.get('options')
4005 for k in opts_dict.keys():
4006 dpci_opts.append([k, opts_dict[k]])
4007 opts_sxp = pci_opts_list_to_sxp(dpci_opts)
4009 # Convert xenapi to sxp
4010 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
4012 dev_sxp = ['dev',
4013 ['domain', '0x%02x' % ppci.get_domain()],
4014 ['bus', '0x%02x' % ppci.get_bus()],
4015 ['slot', '0x%02x' % ppci.get_slot()],
4016 ['func', '0x%1x' % ppci.get_func()],
4017 ['vdevfn', '0x%02x' % xenapi_pci.get('hotplug_slot')],
4018 ['key', xenapi_pci['key']],
4019 ['uuid', dpci_uuid]]
4020 dev_sxp = sxp.merge(dev_sxp, opts_sxp)
4022 target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
4024 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4026 old_pci_sxp = self._getDeviceInfo_pci(0)
4028 if old_pci_sxp is None:
4029 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
4030 if not dev_uuid:
4031 raise XendError('Failed to create device')
4033 else:
4034 new_pci_sxp = ['pci']
4035 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
4036 new_pci_sxp.append(existing_dev)
4037 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
4039 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4040 self.info.device_update(dev_uuid, new_pci_sxp)
4042 xen.xend.XendDomain.instance().managed_config_save(self)
4044 else:
4045 try:
4046 self.device_configure(target_pci_sxp)
4048 except Exception, exn:
4049 raise XendError('Failed to create device')
4051 return dpci_uuid
4053 def create_dscsi(self, xenapi_dscsi):
4054 """Create scsi device from the passed struct in Xen API format.
4056 @param xenapi_dscsi: DSCSI struct from Xen API
4057 @rtype: string
4058 @return: UUID
4059 """
4061 dscsi_uuid = uuid.createString()
4063 # Convert xenapi to sxp
4064 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
4065 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
4066 target_vscsi_sxp = \
4067 ['vscsi',
4068 ['dev',
4069 ['devid', devid],
4070 ['p-devname', pscsi.get_dev_name()],
4071 ['p-dev', pscsi.get_physical_HCTL()],
4072 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
4073 ['state', xenbusState['Initialising']],
4074 ['uuid', dscsi_uuid]
4075 ],
4076 ['feature-host', 0]
4079 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4081 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4083 if cur_vscsi_sxp is None:
4084 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
4085 if not dev_uuid:
4086 raise XendError('Failed to create device')
4088 else:
4089 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4090 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
4091 new_vscsi_sxp.append(existing_dev)
4092 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
4094 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4095 self.info.device_update(dev_uuid, new_vscsi_sxp)
4097 xen.xend.XendDomain.instance().managed_config_save(self)
4099 else:
4100 try:
4101 self.device_configure(target_vscsi_sxp)
4102 except Exception, exn:
4103 log.exception('create_dscsi: %s', exn)
4104 raise XendError('Failed to create device')
4106 return dscsi_uuid
4108 def create_dscsi_HBA(self, xenapi_dscsi):
4109 """Create scsi devices from the passed struct in Xen API format.
4111 @param xenapi_dscsi: DSCSI_HBA struct from Xen API
4112 @rtype: string
4113 @return: UUID
4114 """
4116 dscsi_HBA_uuid = uuid.createString()
4118 # Convert xenapi to sxp
4119 feature_host = xenapi_dscsi.get('assignment_mode', 'HOST') == 'HOST' and 1 or 0
4120 target_vscsi_sxp = \
4121 ['vscsi',
4122 ['feature-host', feature_host],
4123 ['uuid', dscsi_HBA_uuid],
4125 pscsi_HBA = XendAPIStore.get(xenapi_dscsi.get('PSCSI_HBA'), 'PSCSI_HBA')
4126 devid = pscsi_HBA.get_physical_host()
4127 for pscsi_uuid in pscsi_HBA.get_PSCSIs():
4128 pscsi = XendAPIStore.get(pscsi_uuid, 'PSCSI')
4129 pscsi_HCTL = pscsi.get_physical_HCTL()
4130 dscsi_uuid = uuid.createString()
4131 dev = \
4132 ['dev',
4133 ['devid', devid],
4134 ['p-devname', pscsi.get_dev_name()],
4135 ['p-dev', pscsi_HCTL],
4136 ['v-dev', pscsi_HCTL],
4137 ['state', xenbusState['Initialising']],
4138 ['uuid', dscsi_uuid]
4140 target_vscsi_sxp.append(dev)
4142 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4143 if not self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp):
4144 raise XendError('Failed to create device')
4145 xen.xend.XendDomain.instance().managed_config_save(self)
4146 else:
4147 try:
4148 self.device_configure(target_vscsi_sxp)
4149 except Exception, exn:
4150 log.exception('create_dscsi_HBA: %s', exn)
4151 raise XendError('Failed to create device')
4153 return dscsi_HBA_uuid
4156 def change_vdi_of_vbd(self, xenapi_vbd, vdi_image_path):
4157 """Change current VDI with the new VDI.
4159 @param xenapi_vbd: vbd struct from the Xen API
4160 @param vdi_image_path: path of VDI
4161 """
4162 dev_uuid = xenapi_vbd['uuid']
4163 if dev_uuid not in self.info['devices']:
4164 raise XendError('Device does not exist')
4166 # Convert xenapi to sxp
4167 if vdi_image_path.startswith('tap'):
4168 dev_class = 'tap'
4169 else:
4170 dev_class = 'vbd'
4171 dev_sxp = [
4172 dev_class,
4173 ['uuid', dev_uuid],
4174 ['uname', vdi_image_path],
4175 ['dev', '%s:cdrom' % xenapi_vbd['device']],
4176 ['mode', 'r'],
4177 ['VDI', xenapi_vbd['VDI']]
4180 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4181 XEN_API_VM_POWER_STATE_PAUSED):
4182 self.device_configure(dev_sxp)
4183 else:
4184 self.info.device_update(dev_uuid, dev_sxp)
4187 def destroy_device_by_uuid(self, dev_type, dev_uuid):
4188 if dev_uuid not in self.info['devices']:
4189 raise XendError('Device does not exist')
4191 try:
4192 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4193 XEN_API_VM_POWER_STATE_PAUSED):
4194 _, config = self.info['devices'][dev_uuid]
4195 devid = config.get('devid')
4196 if devid != None:
4197 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
4198 else:
4199 raise XendError('Unable to get devid for device: %s:%s' %
4200 (dev_type, dev_uuid))
4201 finally:
4202 del self.info['devices'][dev_uuid]
4203 self.info['%s_refs' % dev_type].remove(dev_uuid)
4205 def destroy_vbd(self, dev_uuid):
4206 self.destroy_device_by_uuid('vbd', dev_uuid)
4208 def destroy_vif(self, dev_uuid):
4209 self.destroy_device_by_uuid('vif', dev_uuid)
4211 def destroy_vtpm(self, dev_uuid):
4212 self.destroy_device_by_uuid('vtpm', dev_uuid)
4214 def destroy_dpci(self, dev_uuid):
4216 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
4217 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
4219 old_pci_sxp = self._getDeviceInfo_pci(0)
4220 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4221 target_dev = None
4222 new_pci_sxp = ['pci']
4223 for dev in sxp.children(old_pci_sxp, 'dev'):
4224 pci_dev = {}
4225 pci_dev['domain'] = sxp.child_value(dev, 'domain')
4226 pci_dev['bus'] = sxp.child_value(dev, 'bus')
4227 pci_dev['slot'] = sxp.child_value(dev, 'slot')
4228 pci_dev['func'] = sxp.child_value(dev, 'func')
4229 if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
4230 target_dev = dev
4231 else:
4232 new_pci_sxp.append(dev)
4234 if target_dev is None:
4235 raise XendError('Failed to destroy device')
4237 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
4239 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4241 self.info.device_update(dev_uuid, new_pci_sxp)
4242 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
4243 del self.info['devices'][dev_uuid]
4244 xen.xend.XendDomain.instance().managed_config_save(self)
4246 else:
4247 try:
4248 self.device_configure(target_pci_sxp)
4250 except Exception, exn:
4251 raise XendError('Failed to destroy device')
4253 def destroy_dscsi(self, dev_uuid):
4254 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
4255 devid = dscsi.get_virtual_host()
4256 vHCTL = dscsi.get_virtual_HCTL()
4257 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4258 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4260 target_dev = None
4261 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4262 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4263 if vHCTL == sxp.child_value(dev, 'v-dev'):
4264 target_dev = dev
4265 else:
4266 new_vscsi_sxp.append(dev)
4268 if target_dev is None:
4269 raise XendError('Failed to destroy device')
4271 target_dev.append(['state', xenbusState['Closing']])
4272 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
4274 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4276 self.info.device_update(dev_uuid, new_vscsi_sxp)
4277 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
4278 del self.info['devices'][dev_uuid]
4279 xen.xend.XendDomain.instance().managed_config_save(self)
4281 else:
4282 try:
4283 self.device_configure(target_vscsi_sxp)
4284 except Exception, exn:
4285 log.exception('destroy_dscsi: %s', exn)
4286 raise XendError('Failed to destroy device')
4288 def destroy_dscsi_HBA(self, dev_uuid):
4289 dscsi_HBA = XendAPIStore.get(dev_uuid, 'DSCSI_HBA')
4290 devid = dscsi_HBA.get_virtual_host()
4291 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4292 feature_host = sxp.child_value(cur_vscsi_sxp, 'feature-host')
4294 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4295 new_vscsi_sxp = ['vscsi', ['feature-host', feature_host]]
4296 self.info.device_update(dev_uuid, new_vscsi_sxp)
4297 del self.info['devices'][dev_uuid]
4298 xen.xend.XendDomain.instance().managed_config_save(self)
4299 else:
4300 # If feature_host is 1, all devices are destroyed by just
4301 # one reconfiguration.
4302 # If feature_host is 0, we should reconfigure all devices
4303 # one-by-one to destroy all devices.
4304 # See reconfigureDevice@VSCSIController.
4305 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4306 target_vscsi_sxp = [
4307 'vscsi',
4308 dev + [['state', xenbusState['Closing']]],
4309 ['feature-host', feature_host]
4311 try:
4312 self.device_configure(target_vscsi_sxp)
4313 except Exception, exn:
4314 log.exception('destroy_dscsi_HBA: %s', exn)
4315 raise XendError('Failed to destroy device')
4316 if feature_host:
4317 break
4319 def destroy_xapi_instances(self):
4320 """Destroy Xen-API instances stored in XendAPIStore.
4321 """
4322 # Xen-API classes based on XendBase have their instances stored
4323 # in XendAPIStore. Cleanup these instances here, if they are supposed
4324 # to be destroyed when the parent domain is dead.
4326 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
4327 # XendBase and there's no need to remove them from XendAPIStore.
4329 from xen.xend import XendDomain
4330 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
4331 # domain still exists.
4332 return
4334 # Destroy the VMMetrics instance.
4335 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
4336 is not None:
4337 self.metrics.destroy()
4339 # Destroy DPCI instances.
4340 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
4341 XendAPIStore.deregister(dpci_uuid, "DPCI")
4343 # Destroy DSCSI instances.
4344 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
4345 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
4347 # Destroy DSCSI_HBA instances.
4348 for dscsi_HBA_uuid in XendDSCSI_HBA.get_by_VM(self.info.get('uuid')):
4349 XendAPIStore.deregister(dscsi_HBA_uuid, "DSCSI_HBA")
4351 def has_device(self, dev_class, dev_uuid):
4352 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
4354 def __str__(self):
4355 return '<domain id=%s name=%s memory=%s state=%s>' % \
4356 (str(self.domid), self.info['name_label'],
4357 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
4359 __repr__ = __str__