debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 19821:61ec78692b13

xend: pass-through: Clean up hvm_destroyPCIDevice()

There seems to be little need to use the domain, bus, slot and
function to look up the virtual slot to pass as the argument to
hvm_destroyPCIDevice(), only to have hvm_destroyPCIDevice() use the
virtual slot for the sole purpose of looking up the domain, bus, slot
and function.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jun 17 07:39:27 2009 +0100 (2009-06-17)
parents a4036225c168
children ce01af317923
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import traceback
35 from types import StringTypes
37 import xen.lowlevel.xc
38 from xen.util import asserts, auxbin
39 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
40 import xen.util.xsm.xsm as security
41 from xen.util import xsconstants
42 from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
43 pci_dict_to_bdf_str, pci_dict_to_xc_str, \
44 pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
45 pci_dict_cmp
47 from xen.xend import balloon, sxp, uuid, image, arch
48 from xen.xend import XendOptions, XendNode, XendConfig
50 from xen.xend.XendConfig import scrub_password
51 from xen.xend.XendBootloader import bootloader, bootloader_tidy
52 from xen.xend.XendError import XendError, VmError
53 from xen.xend.XendDevices import XendDevices
54 from xen.xend.XendTask import XendTask
55 from xen.xend.xenstore.xstransact import xstransact, complete
56 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
57 from xen.xend.xenstore.xswatch import xswatch
58 from xen.xend.XendConstants import *
59 from xen.xend.XendAPIConstants import *
60 from xen.xend.server.DevConstants import xenbusState
62 from xen.xend.XendVMMetrics import XendVMMetrics
64 from xen.xend import XendAPIStore
65 from xen.xend.XendPPCI import XendPPCI
66 from xen.xend.XendDPCI import XendDPCI
67 from xen.xend.XendPSCSI import XendPSCSI
68 from xen.xend.XendDSCSI import XendDSCSI
70 MIGRATE_TIMEOUT = 30.0
71 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
73 xc = xen.lowlevel.xc.xc()
74 xoptions = XendOptions.instance()
76 log = logging.getLogger("xend.XendDomainInfo")
77 #log.setLevel(logging.TRACE)
80 def create(config):
81 """Creates and start a VM using the supplied configuration.
83 @param config: A configuration object involving lists of tuples.
84 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
86 @rtype: XendDomainInfo
87 @return: An up and running XendDomainInfo instance
88 @raise VmError: Invalid configuration or failure to start.
89 """
90 from xen.xend import XendDomain
91 domconfig = XendConfig.XendConfig(sxp_obj = config)
92 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
93 if othervm is None or othervm.domid is None:
94 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
95 if othervm is not None and othervm.domid is not None:
96 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
97 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
98 vm = XendDomainInfo(domconfig)
99 try:
100 vm.start()
101 except:
102 log.exception('Domain construction failed')
103 vm.destroy()
104 raise
106 return vm
108 def create_from_dict(config_dict):
109 """Creates and start a VM using the supplied configuration.
111 @param config_dict: An configuration dictionary.
113 @rtype: XendDomainInfo
114 @return: An up and running XendDomainInfo instance
115 @raise VmError: Invalid configuration or failure to start.
116 """
118 log.debug("XendDomainInfo.create_from_dict(%s)",
119 scrub_password(config_dict))
120 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
121 try:
122 vm.start()
123 except:
124 log.exception('Domain construction failed')
125 vm.destroy()
126 raise
127 return vm
129 def recreate(info, priv):
130 """Create the VM object for an existing domain. The domain must not
131 be dying, as the paths in the store should already have been removed,
132 and asking us to recreate them causes problems.
134 @param xeninfo: Parsed configuration
135 @type xeninfo: Dictionary
136 @param priv: Is a privileged domain (Dom 0)
137 @type priv: bool
139 @rtype: XendDomainInfo
140 @return: A up and running XendDomainInfo instance
141 @raise VmError: Invalid configuration.
142 @raise XendError: Errors with configuration.
143 """
145 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
147 assert not info['dying']
149 xeninfo = XendConfig.XendConfig(dominfo = info)
150 xeninfo['is_control_domain'] = priv
151 xeninfo['is_a_template'] = False
152 xeninfo['auto_power_on'] = False
153 domid = xeninfo['domid']
154 uuid1 = uuid.fromString(xeninfo['uuid'])
155 needs_reinitialising = False
157 dompath = GetDomainPath(domid)
158 if not dompath:
159 raise XendError('No domain path in store for existing '
160 'domain %d' % domid)
162 log.info("Recreating domain %d, UUID %s. at %s" %
163 (domid, xeninfo['uuid'], dompath))
165 # need to verify the path and uuid if not Domain-0
166 # if the required uuid and vm aren't set, then that means
167 # we need to recreate the dom with our own values
168 #
169 # NOTE: this is probably not desirable, really we should just
170 # abort or ignore, but there may be cases where xenstore's
171 # entry disappears (eg. xenstore-rm /)
172 #
173 try:
174 vmpath = xstransact.Read(dompath, "vm")
175 if not vmpath:
176 if not priv:
177 log.warn('/local/domain/%d/vm is missing. recreate is '
178 'confused, trying our best to recover' % domid)
179 needs_reinitialising = True
180 raise XendError('reinit')
182 uuid2_str = xstransact.Read(vmpath, "uuid")
183 if not uuid2_str:
184 log.warn('%s/uuid/ is missing. recreate is confused, '
185 'trying our best to recover' % vmpath)
186 needs_reinitialising = True
187 raise XendError('reinit')
189 uuid2 = uuid.fromString(uuid2_str)
190 if uuid1 != uuid2:
191 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
192 'Trying out best to recover' % domid)
193 needs_reinitialising = True
194 except XendError:
195 pass # our best shot at 'goto' in python :)
197 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
198 vmpath = vmpath)
200 if needs_reinitialising:
201 vm._recreateDom()
202 vm._removeVm()
203 vm._storeVmDetails()
204 vm._storeDomDetails()
206 vm.image = image.create(vm, vm.info)
207 vm.image.recreate()
209 vm._registerWatches()
210 vm.refreshShutdown(xeninfo)
212 # register the domain in the list
213 from xen.xend import XendDomain
214 XendDomain.instance().add_domain(vm)
216 return vm
219 def restore(config):
220 """Create a domain and a VM object to do a restore.
222 @param config: Domain SXP configuration
223 @type config: list of lists. (see C{create})
225 @rtype: XendDomainInfo
226 @return: A up and running XendDomainInfo instance
227 @raise VmError: Invalid configuration or failure to start.
228 @raise XendError: Errors with configuration.
229 """
231 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
232 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
233 resume = True)
234 try:
235 vm.resume()
236 return vm
237 except:
238 vm.destroy()
239 raise
241 def createDormant(domconfig):
242 """Create a dormant/inactive XenDomainInfo without creating VM.
243 This is for creating instances of persistent domains that are not
244 yet start.
246 @param domconfig: Parsed configuration
247 @type domconfig: XendConfig object
249 @rtype: XendDomainInfo
250 @return: A up and running XendDomainInfo instance
251 @raise XendError: Errors with configuration.
252 """
254 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
256 # domid does not make sense for non-running domains.
257 domconfig.pop('domid', None)
258 vm = XendDomainInfo(domconfig)
259 return vm
261 def domain_by_name(name):
262 """Get domain by name
264 @params name: Name of the domain
265 @type name: string
266 @return: XendDomainInfo or None
267 """
268 from xen.xend import XendDomain
269 return XendDomain.instance().domain_lookup_by_name_nr(name)
272 def shutdown_reason(code):
273 """Get a shutdown reason from a code.
275 @param code: shutdown code
276 @type code: int
277 @return: shutdown reason
278 @rtype: string
279 """
280 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
282 def dom_get(dom):
283 """Get info from xen for an existing domain.
285 @param dom: domain id
286 @type dom: int
287 @return: info or None
288 @rtype: dictionary
289 """
290 try:
291 domlist = xc.domain_getinfo(dom, 1)
292 if domlist and dom == domlist[0]['domid']:
293 return domlist[0]
294 except Exception, err:
295 # ignore missing domain
296 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
297 return None
299 def get_assigned_pci_devices(domid):
300 dev_str_list = []
301 path = '/local/domain/0/backend/pci/%u/0/' % domid
302 num_devs = xstransact.Read(path + 'num_devs');
303 if num_devs is None or num_devs == "":
304 return dev_str_list
305 num_devs = int(num_devs);
306 for i in range(num_devs):
307 dev_str = xstransact.Read(path + 'dev-%i' % i)
308 dev_str_list = dev_str_list + [dev_str]
309 return dev_str_list
311 def do_FLR(domid):
312 from xen.xend.server.pciif import parse_pci_name, PciDevice
313 dev_str_list = get_assigned_pci_devices(domid)
315 for dev_str in dev_str_list:
316 try:
317 dev = PciDevice(parse_pci_name(dev_str))
318 except Exception, e:
319 raise VmError("pci: failed to locate device and "+
320 "parse it's resources - "+str(e))
321 dev.do_FLR()
323 class XendDomainInfo:
324 """An object represents a domain.
326 @TODO: try to unify dom and domid, they mean the same thing, but
327 xc refers to it as dom, and everywhere else, including
328 xenstore it is domid. The best way is to change xc's
329 python interface.
331 @ivar info: Parsed configuration
332 @type info: dictionary
333 @ivar domid: Domain ID (if VM has started)
334 @type domid: int or None
335 @ivar vmpath: XenStore path to this VM.
336 @type vmpath: string
337 @ivar dompath: XenStore path to this Domain.
338 @type dompath: string
339 @ivar image: Reference to the VM Image.
340 @type image: xen.xend.image.ImageHandler
341 @ivar store_port: event channel to xenstored
342 @type store_port: int
343 @ivar console_port: event channel to xenconsoled
344 @type console_port: int
345 @ivar store_mfn: xenstored mfn
346 @type store_mfn: int
347 @ivar console_mfn: xenconsoled mfn
348 @type console_mfn: int
349 @ivar notes: OS image notes
350 @type notes: dictionary
351 @ivar vmWatch: reference to a watch on the xenstored vmpath
352 @type vmWatch: xen.xend.xenstore.xswatch
353 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
354 @type shutdownWatch: xen.xend.xenstore.xswatch
355 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
356 @type shutdownStartTime: float or None
357 @ivar restart_in_progress: Is a domain restart thread running?
358 @type restart_in_progress: bool
359 # @ivar state: Domain state
360 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
361 @ivar state_updated: lock for self.state
362 @type state_updated: threading.Condition
363 @ivar refresh_shutdown_lock: lock for polling shutdown state
364 @type refresh_shutdown_lock: threading.Condition
365 @ivar _deviceControllers: device controller cache for this domain
366 @type _deviceControllers: dict 'string' to DevControllers
367 """
369 def __init__(self, info, domid = None, dompath = None, augment = False,
370 priv = False, resume = False, vmpath = None):
371 """Constructor for a domain
373 @param info: parsed configuration
374 @type info: dictionary
375 @keyword domid: Set initial domain id (if any)
376 @type domid: int
377 @keyword dompath: Set initial dompath (if any)
378 @type dompath: string
379 @keyword augment: Augment given info with xenstored VM info
380 @type augment: bool
381 @keyword priv: Is a privileged domain (Dom 0)
382 @type priv: bool
383 @keyword resume: Is this domain being resumed?
384 @type resume: bool
385 """
387 self.info = info
388 if domid == None:
389 self.domid = self.info.get('domid')
390 else:
391 self.domid = domid
393 #REMOVE: uuid is now generated in XendConfig
394 #if not self._infoIsSet('uuid'):
395 # self.info['uuid'] = uuid.toString(uuid.create())
397 # Find a unique /vm/<uuid>/<integer> path if not specified.
398 # This avoids conflict between pre-/post-migrate domains when doing
399 # localhost relocation.
400 self.vmpath = vmpath
401 i = 0
402 while self.vmpath == None:
403 self.vmpath = XS_VMROOT + self.info['uuid']
404 if i != 0:
405 self.vmpath = self.vmpath + '-' + str(i)
406 try:
407 if self._readVm("uuid"):
408 self.vmpath = None
409 i = i + 1
410 except:
411 pass
413 self.dompath = dompath
415 self.image = None
416 self.store_port = None
417 self.store_mfn = None
418 self.console_port = None
419 self.console_mfn = None
421 self.native_protocol = None
423 self.vmWatch = None
424 self.shutdownWatch = None
425 self.shutdownStartTime = None
426 self._resume = resume
427 self.restart_in_progress = False
429 self.state_updated = threading.Condition()
430 self.refresh_shutdown_lock = threading.Condition()
431 self._stateSet(DOM_STATE_HALTED)
433 self._deviceControllers = {}
435 for state in DOM_STATES_OLD:
436 self.info[state] = 0
438 if augment:
439 self._augmentInfo(priv)
441 self._checkName(self.info['name_label'])
443 self.metrics = XendVMMetrics(uuid.createString(), self)
446 #
447 # Public functions available through XMLRPC
448 #
451 def start(self, is_managed = False):
452 """Attempts to start the VM by do the appropriate
453 initialisation if it not started.
454 """
455 from xen.xend import XendDomain
457 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
458 try:
459 XendTask.log_progress(0, 30, self._constructDomain)
460 XendTask.log_progress(31, 60, self._initDomain)
462 XendTask.log_progress(61, 70, self._storeVmDetails)
463 XendTask.log_progress(71, 80, self._storeDomDetails)
464 XendTask.log_progress(81, 90, self._registerWatches)
465 XendTask.log_progress(91, 100, self.refreshShutdown)
467 xendomains = XendDomain.instance()
468 xennode = XendNode.instance()
470 # save running configuration if XendDomains believe domain is
471 # persistent
472 if is_managed:
473 xendomains.managed_config_save(self)
475 if xennode.xenschedinfo() == 'credit':
476 xendomains.domain_sched_credit_set(self.getDomid(),
477 self.getWeight(),
478 self.getCap())
479 except:
480 log.exception('VM start failed')
481 self.destroy()
482 raise
483 else:
484 raise XendError('VM already running')
486 def resume(self):
487 """Resumes a domain that has come back from suspension."""
488 state = self._stateGet()
489 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
490 try:
491 self._constructDomain()
493 try:
494 self._setCPUAffinity()
495 except:
496 # usually a CPU we want to set affinity to does not exist
497 # we just ignore it so that the domain can still be restored
498 log.warn("Cannot restore CPU affinity")
500 self._storeVmDetails()
501 self._createChannels()
502 self._createDevices()
503 self._storeDomDetails()
504 self._endRestore()
505 except:
506 log.exception('VM resume failed')
507 self.destroy()
508 raise
509 else:
510 raise XendError('VM is not suspended; it is %s'
511 % XEN_API_VM_POWER_STATE[state])
513 def shutdown(self, reason):
514 """Shutdown a domain by signalling this via xenstored."""
515 log.debug('XendDomainInfo.shutdown(%s)', reason)
516 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
517 raise XendError('Domain cannot be shutdown')
519 if self.domid == 0:
520 raise XendError('Domain 0 cannot be shutdown')
522 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
523 raise XendError('Invalid reason: %s' % reason)
524 self.storeDom("control/shutdown", reason)
526 # HVM domain shuts itself down only if it has PV drivers
527 if self.info.is_hvm():
528 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
529 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
530 if not hvm_pvdrv or hvm_s_state != 0:
531 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
532 log.info("HVM save:remote shutdown dom %d!", self.domid)
533 xc.domain_shutdown(self.domid, code)
535 def pause(self):
536 """Pause domain
538 @raise XendError: Failed pausing a domain
539 """
540 try:
541 bepath="/local/domain/0/backend/"
542 if(self.domid):
544 dev = xstransact.List(bepath + 'vbd' + "/%d" % (self.domid,))
545 for x in dev:
546 path = self.getDeviceController('vbd').readBackend(x, 'params')
547 if path and path.startswith('/dev/xen/blktap-2'):
548 #Figure out the sysfs path.
549 pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
550 ctrlid = pattern.search(path)
551 ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
552 #pause the disk
553 f = open(ctrl + '/pause', 'w')
554 f.write('pause');
555 f.close()
556 except Exception, ex:
557 log.warn('Could not pause blktap disk.');
559 try:
560 xc.domain_pause(self.domid)
561 self._stateSet(DOM_STATE_PAUSED)
562 except Exception, ex:
563 log.exception(ex)
564 raise XendError("Domain unable to be paused: %s" % str(ex))
566 def unpause(self):
567 """Unpause domain
569 @raise XendError: Failed unpausing a domain
570 """
571 try:
572 bepath="/local/domain/0/backend/"
573 if(self.domid):
574 dev = xstransact.List(bepath + "vbd" + "/%d" % (self.domid,))
575 for x in dev:
576 path = self.getDeviceController('vbd').readBackend(x, 'params')
577 if path and path.startswith('/dev/xen/blktap-2'):
578 #Figure out the sysfs path.
579 pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
580 ctrlid = pattern.search(path)
581 ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
582 #unpause the disk
583 if(os.path.exists(ctrl + '/resume')):
584 f = open(ctrl + '/resume', 'w');
585 f.write('resume');
586 f.close();
588 except Exception, ex:
589 log.warn('Could not unpause blktap disk: %s' % str(ex));
591 try:
592 xc.domain_unpause(self.domid)
593 self._stateSet(DOM_STATE_RUNNING)
594 except Exception, ex:
595 log.exception(ex)
596 raise XendError("Domain unable to be unpaused: %s" % str(ex))
598 def send_sysrq(self, key):
599 """ Send a Sysrq equivalent key via xenstored."""
600 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
601 raise XendError("Domain '%s' is not started" % self.info['name_label'])
603 asserts.isCharConvertible(key)
604 self.storeDom("control/sysrq", '%c' % key)
606 def pci_device_configure_boot(self):
608 if not self.info.is_hvm():
609 return
611 devid = '0'
612 dev_info = self._getDeviceInfo_pci(devid)
613 if dev_info is None:
614 return
616 # get the virtual slot info from xenstore
617 dev_uuid = sxp.child_value(dev_info, 'uuid')
618 pci_conf = self.info['devices'][dev_uuid][1]
619 pci_devs = pci_conf['devs']
620 request = map(lambda x:
621 pci_convert_dict_to_sxp(x, 'Initialising', 'Booting'),
622 pci_devs)
624 for i in request:
625 self.pci_device_configure(i)
627 def hvm_pci_device_create(self, dev_config):
628 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
629 % scrub_password(dev_config))
631 if not self.info.is_hvm():
632 raise VmError("hvm_pci_device_create called on non-HVM guest")
634 #all the PCI devs share one conf node
635 devid = '0'
637 new_dev = dev_config['devs'][0]
638 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
640 #check conflict before trigger hotplug event
641 if dev_info is not None:
642 dev_uuid = sxp.child_value(dev_info, 'uuid')
643 pci_conf = self.info['devices'][dev_uuid][1]
644 pci_devs = pci_conf['devs']
645 for x in pci_devs:
646 if (int(x['vslot'], 16) == int(new_dev['vslot'], 16) and
647 int(x['vslot'], 16) != AUTO_PHP_SLOT):
648 raise VmError("vslot %s already have a device." % (new_dev['vslot']))
650 if (pci_dict_cmp(x, new_dev)):
651 raise VmError("device is already inserted")
653 # Test whether the devices can be assigned with VT-d
654 bdf = xc.test_assign_device(0, pci_dict_to_xc_str(new_dev))
655 if bdf != 0:
656 if bdf == -1:
657 raise VmError("failed to assign device: maybe the platform"
658 " doesn't support VT-d, or VT-d isn't enabled"
659 " properly?")
660 raise VmError("fail to assign device(%s): maybe it has"
661 " already been assigned to other domain, or maybe"
662 " it doesn't exist." % pci_dict_to_bdf_str(new_dev))
664 # Here, we duplicate some checkings (in some cases, we mustn't allow
665 # a device to be hot-plugged into an HVM guest) that are also done in
666 # pci_device_configure()'s self.device_create(dev_sxp) or
667 # dev_control.reconfigureDevice(devid, dev_config).
668 # We must make the checkings before sending the command 'pci-ins' to
669 # ioemu.
671 # Test whether the device is owned by pciback. For instance, we can't
672 # hotplug a device being used by Dom0 itself to an HVM guest.
673 from xen.xend.server.pciif import PciDevice, parse_pci_name
674 try:
675 pci_device = PciDevice(new_dev)
676 except Exception, e:
677 raise VmError("pci: failed to locate device and "+
678 "parse it's resources - "+str(e))
679 if pci_device.driver!='pciback':
680 raise VmError(("pci: PCI Backend does not own device "+ \
681 "%s\n"+ \
682 "See the pciback.hide kernel "+ \
683 "command-line parameter or\n"+ \
684 "bind your slot/device to the PCI backend using sysfs" \
685 )%(pci_device.name))
687 # Check non-page-aligned MMIO BAR.
688 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
689 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
690 pci_device.name)
692 # Check the co-assignment.
693 # To pci-attach a device D to domN, we should ensure each of D's
694 # co-assignment devices hasn't been assigned, or has been assigned to
695 # domN.
696 coassignment_list = pci_device.find_coassigned_devices()
697 pci_device.devs_check_driver(coassignment_list)
698 assigned_pci_device_str_list = self._get_assigned_pci_devices()
699 for pci_str in coassignment_list:
700 pci_dev = parse_pci_name(pci_str)
701 if xc.test_assign_device(0, pci_dict_to_xc_str(pci_dev)) == 0:
702 continue
703 if not pci_str in assigned_pci_device_str_list:
704 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
705 " because one of its co-assignment device %s has been" + \
706 " assigned to other domain." \
707 )% (pci_device.name, self.info['name_label'], pci_str))
709 return self.hvm_pci_device_insert_dev(new_dev)
711 def hvm_pci_device_insert(self, dev_config):
712 log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
713 % scrub_password(dev_config))
715 if not self.info.is_hvm():
716 raise VmError("hvm_pci_device_create called on non-HVM guest")
718 new_dev = dev_config['devs'][0]
720 return self.hvm_pci_device_insert_dev(new_dev)
722 def hvm_pci_device_insert_dev(self, new_dev):
723 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
724 % scrub_password(new_dev))
726 if self.domid is not None:
727 opts = ''
728 if new_dev.has_key('opts'):
729 opts = ',' + serialise_pci_opts(new_dev['opts'])
731 bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
732 int(new_dev['vslot'], 16), opts)
733 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
734 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
736 vslot = xstransact.Read("/local/domain/0/device-model/%i/parameter"
737 % self.getDomid())
738 try:
739 vslot_int = int(vslot, 16)
740 except ValueError:
741 raise VmError(("Cannot pass-through PCI function '%s'. " +
742 "Device model reported an error: %s") %
743 (bdf_str, vslot))
744 else:
745 vslot = new_dev['vslot']
747 return vslot
750 def device_create(self, dev_config):
751 """Create a new device.
753 @param dev_config: device configuration
754 @type dev_config: SXP object (parsed config)
755 """
756 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
757 dev_type = sxp.name(dev_config)
758 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
759 dev_config_dict = self.info['devices'][dev_uuid][1]
760 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
762 if dev_type == 'vif':
763 for x in dev_config:
764 if x != 'vif' and x[0] == 'mac':
765 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
766 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
767 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
769 if self.domid is not None:
770 try:
771 dev_config_dict['devid'] = devid = \
772 self._createDevice(dev_type, dev_config_dict)
773 self._waitForDevice(dev_type, devid)
774 except VmError, ex:
775 del self.info['devices'][dev_uuid]
776 if dev_type == 'pci':
777 for dev in dev_config_dict['devs']:
778 XendAPIStore.deregister(dev['uuid'], 'DPCI')
779 elif dev_type == 'vscsi':
780 for dev in dev_config_dict['devs']:
781 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
782 elif dev_type == 'tap':
783 self.info['vbd_refs'].remove(dev_uuid)
784 else:
785 self.info['%s_refs' % dev_type].remove(dev_uuid)
786 raise ex
787 else:
788 devid = None
790 xen.xend.XendDomain.instance().managed_config_save(self)
791 return self.getDeviceController(dev_type).sxpr(devid)
794 def pci_device_configure(self, dev_sxp, devid = 0):
795 """Configure an existing pci device.
797 @param dev_sxp: device configuration
798 @type dev_sxp: SXP object (parsed config)
799 @param devid: device id
800 @type devid: int
801 @return: Returns True if successfully updated device
802 @rtype: boolean
803 """
804 log.debug("XendDomainInfo.pci_device_configure: %s"
805 % scrub_password(dev_sxp))
807 dev_class = sxp.name(dev_sxp)
809 if dev_class != 'pci':
810 return False
812 pci_state = sxp.child_value(dev_sxp, 'state')
813 pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
814 existing_dev_info = self._getDeviceInfo_pci(devid)
816 if existing_dev_info is None and pci_state != 'Initialising':
817 raise XendError("Cannot detach when pci platform does not exist")
819 pci_dev = sxp.children(dev_sxp, 'dev')[0]
820 dev_config = pci_convert_sxp_to_dict(dev_sxp)
821 dev = dev_config['devs'][0]
823 # Do HVM specific processing
824 if self.info.is_hvm():
825 if pci_state == 'Initialising':
826 # HVM PCI device attachment
827 if pci_sub_state == 'Booting':
828 vslot = self.hvm_pci_device_insert(dev_config)
829 else:
830 vslot = self.hvm_pci_device_create(dev_config)
831 # Update vslot
832 dev['vslot'] = vslot
833 for n in sxp.children(pci_dev):
834 if(n[0] == 'vslot'):
835 n[1] = vslot
836 else:
837 # HVM PCI device detachment
838 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
839 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
840 existing_pci_devs = existing_pci_conf['devs']
841 new_devs = filter(lambda x: pci_dict_cmp(x, dev),
842 existing_pci_devs)
843 if len(new_devs) < 0:
844 raise VmError("Device %s is not connected" %
845 pci_dict_to_bdf_str(dev))
846 new_dev = new_devs[0]
847 self.hvm_destroyPCIDevice(new_dev)
848 # Update vslot
849 dev['vslot'] = new_dev['vslot']
850 for n in sxp.children(pci_dev):
851 if(n[0] == 'vslot'):
852 n[1] = new_dev['vslot']
854 # If pci platform does not exist, create and exit.
855 if existing_dev_info is None:
856 self.device_create(dev_sxp)
857 return True
859 if self.domid is not None:
860 # use DevController.reconfigureDevice to change device config
861 dev_control = self.getDeviceController(dev_class)
862 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
863 if not self.info.is_hvm():
864 # in PV case, wait until backend state becomes connected.
865 dev_control.waitForDevice_reconfigure(devid)
866 num_devs = dev_control.cleanupDevice(devid)
868 # update XendConfig with new device info
869 if dev_uuid:
870 new_dev_sxp = dev_control.configuration(devid)
871 self.info.device_update(dev_uuid, new_dev_sxp)
873 # If there is no device left, destroy pci and remove config.
874 if num_devs == 0:
875 if self.info.is_hvm():
876 self.destroyDevice('pci', devid, True)
877 else:
878 self.destroyDevice('pci', devid)
879 del self.info['devices'][dev_uuid]
880 else:
881 new_dev_sxp = ['pci']
882 for cur_dev in sxp.children(existing_dev_info, 'dev'):
883 if pci_state == 'Closing':
884 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
885 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
886 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
887 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
888 continue
889 new_dev_sxp.append(cur_dev)
891 if pci_state == 'Initialising' and pci_sub_state != 'Booting':
892 for new_dev in sxp.children(dev_sxp, 'dev'):
893 new_dev_sxp.append(new_dev)
895 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
896 self.info.device_update(dev_uuid, new_dev_sxp)
898 # If there is no device left, remove config.
899 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
900 del self.info['devices'][dev_uuid]
902 xen.xend.XendDomain.instance().managed_config_save(self)
904 return True
906 def vscsi_device_configure(self, dev_sxp):
907 """Configure an existing vscsi device.
908 quoted pci funciton
909 """
910 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
911 if not dev_info:
912 return False
913 for dev in sxp.children(dev_info, 'dev'):
914 if p_devs is not None:
915 if sxp.child_value(dev, 'p-dev') in p_devs:
916 return True
917 if v_devs is not None:
918 if sxp.child_value(dev, 'v-dev') in v_devs:
919 return True
920 return False
922 def _vscsi_be(be):
923 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
924 if be_xdi is not None:
925 be_domid = be_xdi.getDomid()
926 if be_domid is not None:
927 return str(be_domid)
928 return str(be)
930 dev_class = sxp.name(dev_sxp)
931 if dev_class != 'vscsi':
932 return False
934 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
935 devs = dev_config['devs']
936 v_devs = [d['v-dev'] for d in devs]
937 state = devs[0]['state']
938 req_devid = int(devs[0]['devid'])
939 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
941 if state == xenbusState['Initialising']:
942 # new create
943 # If request devid does not exist, create and exit.
944 p_devs = [d['p-dev'] for d in devs]
945 for dev_type, dev_info in self.info.all_devices_sxpr():
946 if dev_type != 'vscsi':
947 continue
948 if _is_vscsi_defined(dev_info, p_devs = p_devs):
949 raise XendError('The physical device "%s" is already defined' % \
950 p_devs[0])
951 if cur_dev_sxp is None:
952 self.device_create(dev_sxp)
953 return True
955 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
956 raise XendError('The virtual device "%s" is already defined' % \
957 v_devs[0])
959 if int(dev_config['feature-host']) != \
960 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
961 raise XendError('The physical device "%s" cannot define '
962 'because mode is different' % devs[0]['p-dev'])
964 new_be = dev_config.get('backend', None)
965 if new_be is not None:
966 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
967 if cur_be is None:
968 cur_be = xen.xend.XendDomain.DOM0_ID
969 new_be_dom = _vscsi_be(new_be)
970 cur_be_dom = _vscsi_be(cur_be)
971 if new_be_dom != cur_be_dom:
972 raise XendError('The physical device "%s" cannot define '
973 'because backend is different' % devs[0]['p-dev'])
975 elif state == xenbusState['Closing']:
976 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
977 raise XendError("Cannot detach vscsi device does not exist")
979 if self.domid is not None:
980 # use DevController.reconfigureDevice to change device config
981 dev_control = self.getDeviceController(dev_class)
982 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
983 dev_control.waitForDevice_reconfigure(req_devid)
984 num_devs = dev_control.cleanupDevice(req_devid)
986 # update XendConfig with new device info
987 if dev_uuid:
988 new_dev_sxp = dev_control.configuration(req_devid)
989 self.info.device_update(dev_uuid, new_dev_sxp)
991 # If there is no device left, destroy vscsi and remove config.
992 if num_devs == 0:
993 self.destroyDevice('vscsi', req_devid)
994 del self.info['devices'][dev_uuid]
996 else:
997 new_dev_sxp = ['vscsi']
998 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
999 new_dev_sxp.append(cur_mode)
1000 try:
1001 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1002 new_dev_sxp.append(cur_be)
1003 except IndexError:
1004 pass
1006 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1007 if state == xenbusState['Closing']:
1008 if int(cur_mode[1]) == 1:
1009 continue
1010 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1011 continue
1012 new_dev_sxp.append(cur_dev)
1014 if state == xenbusState['Initialising']:
1015 for new_dev in sxp.children(dev_sxp, 'dev'):
1016 new_dev_sxp.append(new_dev)
1018 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1019 self.info.device_update(dev_uuid, new_dev_sxp)
1021 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1022 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1023 del self.info['devices'][dev_uuid]
1025 xen.xend.XendDomain.instance().managed_config_save(self)
1027 return True
1029 def device_configure(self, dev_sxp, devid = None):
1030 """Configure an existing device.
1032 @param dev_config: device configuration
1033 @type dev_config: SXP object (parsed config)
1034 @param devid: device id
1035 @type devid: int
1036 @return: Returns True if successfully updated device
1037 @rtype: boolean
1038 """
1040 # convert device sxp to a dict
1041 dev_class = sxp.name(dev_sxp)
1042 dev_config = {}
1044 if dev_class == 'pci':
1045 return self.pci_device_configure(dev_sxp)
1047 if dev_class == 'vscsi':
1048 return self.vscsi_device_configure(dev_sxp)
1050 for opt_val in dev_sxp[1:]:
1051 try:
1052 dev_config[opt_val[0]] = opt_val[1]
1053 except IndexError:
1054 pass
1056 dev_control = self.getDeviceController(dev_class)
1057 if devid is None:
1058 dev = dev_config.get('dev', '')
1059 if not dev:
1060 raise VmError('Block device must have virtual details specified')
1061 if 'ioemu:' in dev:
1062 (_, dev) = dev.split(':', 1)
1063 try:
1064 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1065 except ValueError:
1066 pass
1067 devid = dev_control.convertToDeviceNumber(dev)
1068 dev_info = self._getDeviceInfo_vbd(devid)
1069 if dev_info is None:
1070 raise VmError("Device %s not connected" % devid)
1071 dev_uuid = sxp.child_value(dev_info, 'uuid')
1073 if self.domid is not None:
1074 # use DevController.reconfigureDevice to change device config
1075 dev_control.reconfigureDevice(devid, dev_config)
1076 else:
1077 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1078 if (new_f['device-type'] == 'cdrom' and
1079 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1080 new_b['mode'] == 'r' and
1081 sxp.child_value(dev_info, 'mode') == 'r'):
1082 pass
1083 else:
1084 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1085 (dev_class, devid, dev_config))
1087 # update XendConfig with new device info
1088 self.info.device_update(dev_uuid, dev_sxp)
1089 xen.xend.XendDomain.instance().managed_config_save(self)
1091 return True
1093 def waitForDevices(self):
1094 """Wait for this domain's configured devices to connect.
1096 @raise VmError: if any device fails to initialise.
1097 """
1098 for devclass in XendDevices.valid_devices():
1099 self.getDeviceController(devclass).waitForDevices()
1101 def hvm_destroyPCIDevice(self, pci_dev):
1102 log.debug("hvm_destroyPCIDevice: %s", pci_dev)
1104 if not self.info.is_hvm():
1105 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1107 # Check the co-assignment.
1108 # To pci-detach a device D from domN, we should ensure: for each DD in the
1109 # list of D's co-assignment devices, DD is not assigned (to domN).
1111 from xen.xend.server.pciif import PciDevice
1112 try:
1113 pci_device = PciDevice(pci_dev)
1114 except Exception, e:
1115 raise VmError("pci: failed to locate device and "+
1116 "parse it's resources - "+str(e))
1117 coassignment_list = pci_device.find_coassigned_devices()
1118 coassignment_list.remove(pci_device.name)
1119 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1120 for pci_str in coassignment_list:
1121 if pci_str in assigned_pci_device_str_list:
1122 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1123 " because one of its co-assignment device %s is still " + \
1124 " assigned to the domain." \
1125 )% (pci_device.name, self.info['name_label'], pci_str))
1128 bdf_str = pci_dict_to_bdf_str(pci_dev)
1129 log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
1130 if self.domid is not None:
1131 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1133 return 0
1135 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1136 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1137 deviceClass, devid)
1139 if rm_cfg:
1140 # Convert devid to device number. A device number is
1141 # needed to remove its configuration.
1142 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1144 # Save current sxprs. A device number and a backend
1145 # path are needed to remove its configuration but sxprs
1146 # do not have those after calling destroyDevice.
1147 sxprs = self.getDeviceSxprs(deviceClass)
1149 rc = None
1150 if self.domid is not None:
1152 #new blktap implementation may need a sysfs write after everything is torn down.
1153 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1154 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1155 if path and path.startswith('/dev/xen/blktap-2'):
1156 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1157 backpath = xstransact.Read(frontpath, "backend")
1158 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1160 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1161 if not force and rm_cfg:
1162 # The backend path, other than the device itself,
1163 # has to be passed because its accompanied frontend
1164 # path may be void until its removal is actually
1165 # issued. It is probable because destroyDevice is
1166 # issued first.
1167 for dev_num, dev_info in sxprs:
1168 dev_num = int(dev_num)
1169 if dev_num == dev:
1170 for x in dev_info:
1171 if x[0] == 'backend':
1172 backend = x[1]
1173 break
1174 break
1175 self._waitForDevice_destroy(deviceClass, devid, backend)
1177 if rm_cfg:
1178 if deviceClass == 'vif':
1179 if self.domid is not None:
1180 for dev_num, dev_info in sxprs:
1181 dev_num = int(dev_num)
1182 if dev_num == dev:
1183 for x in dev_info:
1184 if x[0] == 'mac':
1185 mac = x[1]
1186 break
1187 break
1188 dev_info = self._getDeviceInfo_vif(mac)
1189 else:
1190 _, dev_info = sxprs[dev]
1191 else: # 'vbd' or 'tap'
1192 dev_info = self._getDeviceInfo_vbd(dev)
1193 # To remove the UUID of the device from refs,
1194 # deviceClass must be always 'vbd'.
1195 deviceClass = 'vbd'
1196 if dev_info is None:
1197 raise XendError("Device %s is not defined" % devid)
1199 dev_uuid = sxp.child_value(dev_info, 'uuid')
1200 del self.info['devices'][dev_uuid]
1201 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1202 xen.xend.XendDomain.instance().managed_config_save(self)
1204 return rc
1206 def getDeviceSxprs(self, deviceClass):
1207 if deviceClass == 'pci':
1208 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1209 if dev_info is None:
1210 return []
1211 dev_uuid = sxp.child_value(dev_info, 'uuid')
1212 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1213 return pci_devs
1214 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1215 return self.getDeviceController(deviceClass).sxprs()
1216 else:
1217 sxprs = []
1218 dev_num = 0
1219 for dev_type, dev_info in self.info.all_devices_sxpr():
1220 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1221 (deviceClass != 'vbd' and dev_type != deviceClass):
1222 continue
1224 if deviceClass == 'vscsi':
1225 vscsi_devs = ['devs', []]
1226 for vscsi_dev in sxp.children(dev_info, 'dev'):
1227 vscsi_dev.append(['frontstate', None])
1228 vscsi_devs[1].append(vscsi_dev)
1229 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1230 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1231 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1232 elif deviceClass == 'vbd':
1233 dev = sxp.child_value(dev_info, 'dev')
1234 if 'ioemu:' in dev:
1235 (_, dev) = dev.split(':', 1)
1236 try:
1237 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1238 except ValueError:
1239 dev_name = dev
1240 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1241 sxprs.append([dev_num, dev_info])
1242 else:
1243 sxprs.append([dev_num, dev_info])
1244 dev_num += 1
1245 return sxprs
1247 def getBlockDeviceClass(self, devid):
1248 # To get a device number from the devid,
1249 # we temporarily use the device controller of VBD.
1250 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1251 dev_info = self._getDeviceInfo_vbd(dev)
1252 if dev_info:
1253 return dev_info[0]
1255 def _getDeviceInfo_vif(self, mac):
1256 for dev_type, dev_info in self.info.all_devices_sxpr():
1257 if dev_type != 'vif':
1258 continue
1259 if mac == sxp.child_value(dev_info, 'mac'):
1260 return dev_info
1262 def _getDeviceInfo_vbd(self, devid):
1263 for dev_type, dev_info in self.info.all_devices_sxpr():
1264 if dev_type != 'vbd' and dev_type != 'tap':
1265 continue
1266 dev = sxp.child_value(dev_info, 'dev')
1267 dev = dev.split(':')[0]
1268 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1269 if devid == dev:
1270 return dev_info
1272 def _getDeviceInfo_pci(self, devid):
1273 for dev_type, dev_info in self.info.all_devices_sxpr():
1274 if dev_type != 'pci':
1275 continue
1276 return dev_info
1277 return None
1279 def _getDeviceInfo_vscsi(self, devid):
1280 devid = int(devid)
1281 for dev_type, dev_info in self.info.all_devices_sxpr():
1282 if dev_type != 'vscsi':
1283 continue
1284 devs = sxp.children(dev_info, 'dev')
1285 if devid == int(sxp.child_value(devs[0], 'devid')):
1286 return dev_info
1287 return None
1289 def _get_assigned_pci_devices(self, devid = 0):
1290 if self.domid is not None:
1291 return get_assigned_pci_devices(self.domid)
1293 dev_info = self._getDeviceInfo_pci(devid)
1294 if dev_info is None:
1295 return []
1296 dev_uuid = sxp.child_value(dev_info, 'uuid')
1297 pci_conf = self.info['devices'][dev_uuid][1]
1298 return map(pci_dict_to_bdf_str, pci_conf['devs'])
1300 def setMemoryTarget(self, target):
1301 """Set the memory target of this domain.
1302 @param target: In MiB.
1303 """
1304 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1305 self.info['name_label'], str(self.domid), target)
1307 MiB = 1024 * 1024
1308 memory_cur = self.get_memory_dynamic_max() / MiB
1310 if self.domid == 0:
1311 dom0_min_mem = xoptions.get_dom0_min_mem()
1312 if target < memory_cur and dom0_min_mem > target:
1313 raise XendError("memory_dynamic_max too small")
1315 self._safe_set_memory('memory_dynamic_min', target * MiB)
1316 self._safe_set_memory('memory_dynamic_max', target * MiB)
1318 if self.domid >= 0:
1319 if target > memory_cur:
1320 balloon.free((target - memory_cur) * 1024, self)
1321 self.storeVm("memory", target)
1322 self.storeDom("memory/target", target << 10)
1323 xc.domain_set_target_mem(self.domid,
1324 (target * 1024))
1325 xen.xend.XendDomain.instance().managed_config_save(self)
1327 def setMemoryMaximum(self, limit):
1328 """Set the maximum memory limit of this domain
1329 @param limit: In MiB.
1330 """
1331 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1332 self.info['name_label'], str(self.domid), limit)
1334 maxmem_cur = self.get_memory_static_max()
1335 MiB = 1024 * 1024
1336 self._safe_set_memory('memory_static_max', limit * MiB)
1338 if self.domid >= 0:
1339 maxmem = int(limit) * 1024
1340 try:
1341 return xc.domain_setmaxmem(self.domid, maxmem)
1342 except Exception, ex:
1343 self._safe_set_memory('memory_static_max', maxmem_cur)
1344 raise XendError(str(ex))
1345 xen.xend.XendDomain.instance().managed_config_save(self)
1348 def getVCPUInfo(self):
1349 try:
1350 # We include the domain name and ID, to help xm.
1351 sxpr = ['domain',
1352 ['domid', self.domid],
1353 ['name', self.info['name_label']],
1354 ['vcpu_count', self.info['VCPUs_max']]]
1356 for i in range(0, self.info['VCPUs_max']):
1357 if self.domid is not None:
1358 info = xc.vcpu_getinfo(self.domid, i)
1360 sxpr.append(['vcpu',
1361 ['number', i],
1362 ['online', info['online']],
1363 ['blocked', info['blocked']],
1364 ['running', info['running']],
1365 ['cpu_time', info['cpu_time'] / 1e9],
1366 ['cpu', info['cpu']],
1367 ['cpumap', info['cpumap']]])
1368 else:
1369 sxpr.append(['vcpu',
1370 ['number', i],
1371 ['online', 0],
1372 ['blocked', 0],
1373 ['running', 0],
1374 ['cpu_time', 0.0],
1375 ['cpu', -1],
1376 ['cpumap', self.info['cpus'][i] and \
1377 self.info['cpus'][i] or range(64)]])
1379 return sxpr
1381 except RuntimeError, exn:
1382 raise XendError(str(exn))
1385 def getDomInfo(self):
1386 return dom_get(self.domid)
1389 # internal functions ... TODO: re-categorised
1392 def _augmentInfo(self, priv):
1393 """Augment self.info, as given to us through L{recreate}, with
1394 values taken from the store. This recovers those values known
1395 to xend but not to the hypervisor.
1396 """
1397 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1398 if priv:
1399 augment_entries.remove('memory')
1400 augment_entries.remove('maxmem')
1401 augment_entries.remove('vcpus')
1402 augment_entries.remove('vcpu_avail')
1404 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1405 for k in augment_entries])
1407 # make returned lists into a dictionary
1408 vm_config = dict(zip(augment_entries, vm_config))
1410 for arg in augment_entries:
1411 val = vm_config[arg]
1412 if val != None:
1413 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1414 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1415 self.info[xapiarg] = val
1416 elif arg == "memory":
1417 self.info["static_memory_min"] = val
1418 elif arg == "maxmem":
1419 self.info["static_memory_max"] = val
1420 else:
1421 self.info[arg] = val
1423 # read CPU Affinity
1424 self.info['cpus'] = []
1425 vcpus_info = self.getVCPUInfo()
1426 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1427 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1429 # For dom0, we ignore any stored value for the vcpus fields, and
1430 # read the current value from Xen instead. This allows boot-time
1431 # settings to take precedence over any entries in the store.
1432 if priv:
1433 xeninfo = dom_get(self.domid)
1434 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1435 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1437 # read image value
1438 image_sxp = self._readVm('image')
1439 if image_sxp:
1440 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1442 # read devices
1443 devices = []
1444 for devclass in XendDevices.valid_devices():
1445 devconfig = self.getDeviceController(devclass).configurations()
1446 if devconfig:
1447 devices.extend(devconfig)
1449 if not self.info['devices'] and devices is not None:
1450 for device in devices:
1451 self.info.device_add(device[0], cfg_sxp = device)
1453 self._update_consoles()
1455 def _update_consoles(self, transaction = None):
1456 if self.domid == None or self.domid == 0:
1457 return
1459 # Update VT100 port if it exists
1460 if transaction is None:
1461 self.console_port = self.readDom('console/port')
1462 else:
1463 self.console_port = self.readDomTxn(transaction, 'console/port')
1464 if self.console_port is not None:
1465 serial_consoles = self.info.console_get_all('vt100')
1466 if not serial_consoles:
1467 cfg = self.info.console_add('vt100', self.console_port)
1468 self._createDevice('console', cfg)
1469 else:
1470 console_uuid = serial_consoles[0].get('uuid')
1471 self.info.console_update(console_uuid, 'location',
1472 self.console_port)
1475 # Update VNC port if it exists and write to xenstore
1476 if transaction is None:
1477 vnc_port = self.readDom('console/vnc-port')
1478 else:
1479 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1480 if vnc_port is not None:
1481 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1482 if dev_type == 'vfb':
1483 old_location = dev_info.get('location')
1484 listen_host = dev_info.get('vnclisten', \
1485 XendOptions.instance().get_vnclisten_address())
1486 new_location = '%s:%s' % (listen_host, str(vnc_port))
1487 if old_location == new_location:
1488 break
1490 dev_info['location'] = new_location
1491 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1492 vfb_ctrl = self.getDeviceController('vfb')
1493 vfb_ctrl.reconfigureDevice(0, dev_info)
1494 break
1497 # Function to update xenstore /vm/*
1500 def _readVm(self, *args):
1501 return xstransact.Read(self.vmpath, *args)
1503 def _writeVm(self, *args):
1504 return xstransact.Write(self.vmpath, *args)
1506 def _removeVm(self, *args):
1507 return xstransact.Remove(self.vmpath, *args)
1509 def _gatherVm(self, *args):
1510 return xstransact.Gather(self.vmpath, *args)
1512 def _listRecursiveVm(self, *args):
1513 return xstransact.ListRecursive(self.vmpath, *args)
1515 def storeVm(self, *args):
1516 return xstransact.Store(self.vmpath, *args)
1518 def permissionsVm(self, *args):
1519 return xstransact.SetPermissions(self.vmpath, *args)
1522 # Function to update xenstore /dom/*
1525 def readDom(self, *args):
1526 return xstransact.Read(self.dompath, *args)
1528 def gatherDom(self, *args):
1529 return xstransact.Gather(self.dompath, *args)
1531 def _writeDom(self, *args):
1532 return xstransact.Write(self.dompath, *args)
1534 def _removeDom(self, *args):
1535 return xstransact.Remove(self.dompath, *args)
1537 def storeDom(self, *args):
1538 return xstransact.Store(self.dompath, *args)
1541 def readDomTxn(self, transaction, *args):
1542 paths = map(lambda x: self.dompath + "/" + x, args)
1543 return transaction.read(*paths)
1545 def gatherDomTxn(self, transaction, *args):
1546 paths = map(lambda x: self.dompath + "/" + x, args)
1547 return transaction.gather(*paths)
1549 def _writeDomTxn(self, transaction, *args):
1550 paths = map(lambda x: self.dompath + "/" + x, args)
1551 return transaction.write(*paths)
1553 def _removeDomTxn(self, transaction, *args):
1554 paths = map(lambda x: self.dompath + "/" + x, args)
1555 return transaction.remove(*paths)
1557 def storeDomTxn(self, transaction, *args):
1558 paths = map(lambda x: self.dompath + "/" + x, args)
1559 return transaction.store(*paths)
1562 def _recreateDom(self):
1563 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1565 def _recreateDomFunc(self, t):
1566 t.remove()
1567 t.mkdir()
1568 t.set_permissions({'dom' : self.domid, 'read' : True})
1569 t.write('vm', self.vmpath)
1570 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1571 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1572 t.mkdir(i)
1573 t.set_permissions(i, {'dom' : self.domid})
1575 def _storeDomDetails(self):
1576 to_store = {
1577 'domid': str(self.domid),
1578 'vm': self.vmpath,
1579 'name': self.info['name_label'],
1580 'console/limit': str(xoptions.get_console_limit() * 1024),
1581 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1584 def f(n, v):
1585 if v is not None:
1586 if type(v) == bool:
1587 to_store[n] = v and "1" or "0"
1588 else:
1589 to_store[n] = str(v)
1591 # Figure out if we need to tell xenconsoled to ignore this guest's
1592 # console - device model will handle console if it is running
1593 constype = "ioemu"
1594 if 'device_model' not in self.info['platform']:
1595 constype = "xenconsoled"
1597 f('console/port', self.console_port)
1598 f('console/ring-ref', self.console_mfn)
1599 f('console/type', constype)
1600 f('store/port', self.store_port)
1601 f('store/ring-ref', self.store_mfn)
1603 if arch.type == "x86":
1604 f('control/platform-feature-multiprocessor-suspend', True)
1606 # elfnotes
1607 for n, v in self.info.get_notes().iteritems():
1608 n = n.lower().replace('_', '-')
1609 if n == 'features':
1610 for v in v.split('|'):
1611 v = v.replace('_', '-')
1612 if v.startswith('!'):
1613 f('image/%s/%s' % (n, v[1:]), False)
1614 else:
1615 f('image/%s/%s' % (n, v), True)
1616 else:
1617 f('image/%s' % n, v)
1619 if self.info.has_key('security_label'):
1620 f('security_label', self.info['security_label'])
1622 to_store.update(self._vcpuDomDetails())
1624 log.debug("Storing domain details: %s", scrub_password(to_store))
1626 self._writeDom(to_store)
1628 def _vcpuDomDetails(self):
1629 def availability(n):
1630 if self.info['vcpu_avail'] & (1 << n):
1631 return 'online'
1632 else:
1633 return 'offline'
1635 result = {}
1636 for v in range(0, self.info['VCPUs_max']):
1637 result["cpu/%d/availability" % v] = availability(v)
1638 return result
1641 # xenstore watches
1644 def _registerWatches(self):
1645 """Register a watch on this VM's entries in the store, and the
1646 domain's control/shutdown node, so that when they are changed
1647 externally, we keep up to date. This should only be called by {@link
1648 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1649 details have been written, but before the new instance is returned."""
1650 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1651 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1652 self._handleShutdownWatch)
1654 def _storeChanged(self, _):
1655 log.trace("XendDomainInfo.storeChanged");
1657 changed = False
1659 # Check whether values in the configuration have
1660 # changed in Xenstore.
1662 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1663 'rtc/timeoffset']
1665 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1666 for k in cfg_vm])
1668 # convert two lists into a python dictionary
1669 vm_details = dict(zip(cfg_vm, vm_details))
1671 for arg, val in vm_details.items():
1672 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1673 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1674 if val != None and val != self.info[xapiarg]:
1675 self.info[xapiarg] = val
1676 changed = True
1677 elif arg == "memory":
1678 if val != None and val != self.info["static_memory_min"]:
1679 self.info["static_memory_min"] = val
1680 changed = True
1681 elif arg == "maxmem":
1682 if val != None and val != self.info["static_memory_max"]:
1683 self.info["static_memory_max"] = val
1684 changed = True
1686 # Check whether image definition has been updated
1687 image_sxp = self._readVm('image')
1688 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1689 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1690 changed = True
1692 # Update the rtc_timeoffset to be preserved across reboot.
1693 # NB. No need to update xenstore domain section.
1694 val = int(vm_details.get("rtc/timeoffset", 0))
1695 self.info["platform"]["rtc_timeoffset"] = val
1697 if changed:
1698 # Update the domain section of the store, as this contains some
1699 # parameters derived from the VM configuration.
1700 self.refresh_shutdown_lock.acquire()
1701 try:
1702 state = self._stateGet()
1703 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1704 self._storeDomDetails()
1705 finally:
1706 self.refresh_shutdown_lock.release()
1708 return 1
1710 def _handleShutdownWatch(self, _):
1711 log.debug('XendDomainInfo.handleShutdownWatch')
1713 reason = self.readDom('control/shutdown')
1715 if reason and reason != 'suspend':
1716 sst = self.readDom('xend/shutdown_start_time')
1717 now = time.time()
1718 if sst:
1719 self.shutdownStartTime = float(sst)
1720 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1721 else:
1722 self.shutdownStartTime = now
1723 self.storeDom('xend/shutdown_start_time', now)
1724 timeout = SHUTDOWN_TIMEOUT
1726 log.trace(
1727 "Scheduling refreshShutdown on domain %d in %ds.",
1728 self.domid, timeout)
1729 threading.Timer(timeout, self.refreshShutdown).start()
1731 return True
1735 # Public Attributes for the VM
1739 def getDomid(self):
1740 return self.domid
1742 def setName(self, name, to_store = True):
1743 self._checkName(name)
1744 self.info['name_label'] = name
1745 if to_store:
1746 self.storeVm("name", name)
1748 def getName(self):
1749 return self.info['name_label']
1751 def getDomainPath(self):
1752 return self.dompath
1754 def getShutdownReason(self):
1755 return self.readDom('control/shutdown')
1757 def getStorePort(self):
1758 """For use only by image.py and XendCheckpoint.py."""
1759 return self.store_port
1761 def getConsolePort(self):
1762 """For use only by image.py and XendCheckpoint.py"""
1763 return self.console_port
1765 def getFeatures(self):
1766 """For use only by image.py."""
1767 return self.info['features']
1769 def getVCpuCount(self):
1770 return self.info['VCPUs_max']
1772 def setVCpuCount(self, vcpus):
1773 def vcpus_valid(n):
1774 if vcpus <= 0:
1775 raise XendError('Zero or less VCPUs is invalid')
1776 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1777 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1778 vcpus_valid(vcpus)
1780 self.info['vcpu_avail'] = (1 << vcpus) - 1
1781 if self.domid >= 0:
1782 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1783 self._writeDom(self._vcpuDomDetails())
1784 self.info['VCPUs_live'] = vcpus
1785 else:
1786 if self.info['VCPUs_max'] > vcpus:
1787 # decreasing
1788 del self.info['cpus'][vcpus:]
1789 elif self.info['VCPUs_max'] < vcpus:
1790 # increasing
1791 for c in range(self.info['VCPUs_max'], vcpus):
1792 self.info['cpus'].append(list())
1793 self.info['VCPUs_max'] = vcpus
1794 xen.xend.XendDomain.instance().managed_config_save(self)
1795 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1796 vcpus)
1798 def getMemoryTarget(self):
1799 """Get this domain's target memory size, in KB."""
1800 return self.info['memory_dynamic_max'] / 1024
1802 def getMemoryMaximum(self):
1803 """Get this domain's maximum memory size, in KB."""
1804 # remember, info now stores memory in bytes
1805 return self.info['memory_static_max'] / 1024
1807 def getResume(self):
1808 return str(self._resume)
1810 def setResume(self, isresume):
1811 self._resume = isresume
1813 def getCpus(self):
1814 return self.info['cpus']
1816 def setCpus(self, cpumap):
1817 self.info['cpus'] = cpumap
1819 def getCap(self):
1820 return self.info['vcpus_params']['cap']
1822 def setCap(self, cpu_cap):
1823 self.info['vcpus_params']['cap'] = cpu_cap
1825 def getWeight(self):
1826 return self.info['vcpus_params']['weight']
1828 def setWeight(self, cpu_weight):
1829 self.info['vcpus_params']['weight'] = cpu_weight
1831 def getRestartCount(self):
1832 return self._readVm('xend/restart_count')
1834 def refreshShutdown(self, xeninfo = None):
1835 """ Checks the domain for whether a shutdown is required.
1837 Called from XendDomainInfo and also image.py for HVM images.
1838 """
1840 # If set at the end of this method, a restart is required, with the
1841 # given reason. This restart has to be done out of the scope of
1842 # refresh_shutdown_lock.
1843 restart_reason = None
1845 self.refresh_shutdown_lock.acquire()
1846 try:
1847 if xeninfo is None:
1848 xeninfo = dom_get(self.domid)
1849 if xeninfo is None:
1850 # The domain no longer exists. This will occur if we have
1851 # scheduled a timer to check for shutdown timeouts and the
1852 # shutdown succeeded. It will also occur if someone
1853 # destroys a domain beneath us. We clean up the domain,
1854 # just in case, but we can't clean up the VM, because that
1855 # VM may have migrated to a different domain on this
1856 # machine.
1857 self.cleanupDomain()
1858 self._stateSet(DOM_STATE_HALTED)
1859 return
1861 if xeninfo['dying']:
1862 # Dying means that a domain has been destroyed, but has not
1863 # yet been cleaned up by Xen. This state could persist
1864 # indefinitely if, for example, another domain has some of its
1865 # pages mapped. We might like to diagnose this problem in the
1866 # future, but for now all we do is make sure that it's not us
1867 # holding the pages, by calling cleanupDomain. We can't
1868 # clean up the VM, as above.
1869 self.cleanupDomain()
1870 self._stateSet(DOM_STATE_SHUTDOWN)
1871 return
1873 elif xeninfo['crashed']:
1874 if self.readDom('xend/shutdown_completed'):
1875 # We've seen this shutdown already, but we are preserving
1876 # the domain for debugging. Leave it alone.
1877 return
1879 log.warn('Domain has crashed: name=%s id=%d.',
1880 self.info['name_label'], self.domid)
1881 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1883 restart_reason = 'crash'
1884 self._stateSet(DOM_STATE_HALTED)
1886 elif xeninfo['shutdown']:
1887 self._stateSet(DOM_STATE_SHUTDOWN)
1888 if self.readDom('xend/shutdown_completed'):
1889 # We've seen this shutdown already, but we are preserving
1890 # the domain for debugging. Leave it alone.
1891 return
1893 else:
1894 reason = shutdown_reason(xeninfo['shutdown_reason'])
1896 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1897 self.info['name_label'], self.domid, reason)
1898 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1900 self._clearRestart()
1902 if reason == 'suspend':
1903 self._stateSet(DOM_STATE_SUSPENDED)
1904 # Don't destroy the domain. XendCheckpoint will do
1905 # this once it has finished. However, stop watching
1906 # the VM path now, otherwise we will end up with one
1907 # watch for the old domain, and one for the new.
1908 self._unwatchVm()
1909 elif reason in ('poweroff', 'reboot'):
1910 restart_reason = reason
1911 else:
1912 self.destroy()
1914 elif self.dompath is None:
1915 # We have yet to manage to call introduceDomain on this
1916 # domain. This can happen if a restore is in progress, or has
1917 # failed. Ignore this domain.
1918 pass
1919 else:
1920 # Domain is alive. If we are shutting it down, log a message
1921 # if it seems unresponsive.
1922 if xeninfo['paused']:
1923 self._stateSet(DOM_STATE_PAUSED)
1924 else:
1925 self._stateSet(DOM_STATE_RUNNING)
1927 if self.shutdownStartTime:
1928 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1929 self.shutdownStartTime)
1930 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1931 log.info(
1932 "Domain shutdown timeout expired: name=%s id=%s",
1933 self.info['name_label'], self.domid)
1934 self.storeDom('xend/unresponsive', 'True')
1935 finally:
1936 self.refresh_shutdown_lock.release()
1938 if restart_reason and not self.restart_in_progress:
1939 self.restart_in_progress = True
1940 threading.Thread(target = self._maybeRestart,
1941 args = (restart_reason,)).start()
1945 # Restart functions - handling whether we come back up on shutdown.
1948 def _clearRestart(self):
1949 self._removeDom("xend/shutdown_start_time")
1951 def _maybeDumpCore(self, reason):
1952 if reason == 'crash':
1953 if xoptions.get_enable_dump() or self.get_on_crash() \
1954 in ['coredump_and_destroy', 'coredump_and_restart']:
1955 try:
1956 self.dumpCore()
1957 except XendError:
1958 # This error has been logged -- there's nothing more
1959 # we can do in this context.
1960 pass
1962 def _maybeRestart(self, reason):
1963 # Before taking configured action, dump core if configured to do so.
1965 self._maybeDumpCore(reason)
1967 # Dispatch to the correct method based upon the configured on_{reason}
1968 # behaviour.
1969 actions = {"destroy" : self.destroy,
1970 "restart" : self._restart,
1971 "preserve" : self._preserve,
1972 "rename-restart" : self._renameRestart,
1973 "coredump-destroy" : self.destroy,
1974 "coredump-restart" : self._restart}
1976 action_conf = {
1977 'poweroff': 'actions_after_shutdown',
1978 'reboot': 'actions_after_reboot',
1979 'crash': 'actions_after_crash',
1982 action_target = self.info.get(action_conf.get(reason))
1983 func = actions.get(action_target, None)
1984 if func and callable(func):
1985 func()
1986 else:
1987 self.destroy() # default to destroy
1989 def _renameRestart(self):
1990 self._restart(True)
1992 def _restart(self, rename = False):
1993 """Restart the domain after it has exited.
1995 @param rename True if the old domain is to be renamed and preserved,
1996 False if it is to be destroyed.
1997 """
1998 from xen.xend import XendDomain
2000 if self._readVm(RESTART_IN_PROGRESS):
2001 log.error('Xend failed during restart of domain %s. '
2002 'Refusing to restart to avoid loops.',
2003 str(self.domid))
2004 self.destroy()
2005 return
2007 old_domid = self.domid
2008 self._writeVm(RESTART_IN_PROGRESS, 'True')
2010 elapse = time.time() - self.info['start_time']
2011 if elapse < MINIMUM_RESTART_TIME:
2012 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2013 'Refusing to restart to avoid loops.',
2014 self.info['name_label'], elapse)
2015 self.destroy()
2016 return
2018 prev_vm_xend = self._listRecursiveVm('xend')
2019 new_dom_info = self.info
2020 try:
2021 if rename:
2022 new_dom_info = self._preserveForRestart()
2023 else:
2024 self._unwatchVm()
2025 self.destroy()
2027 # new_dom's VM will be the same as this domain's VM, except where
2028 # the rename flag has instructed us to call preserveForRestart.
2029 # In that case, it is important that we remove the
2030 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2031 # once the new one is available.
2033 new_dom = None
2034 try:
2035 new_dom = XendDomain.instance().domain_create_from_dict(
2036 new_dom_info)
2037 for x in prev_vm_xend[0][1]:
2038 new_dom._writeVm('xend/%s' % x[0], x[1])
2039 new_dom.waitForDevices()
2040 new_dom.unpause()
2041 rst_cnt = new_dom._readVm('xend/restart_count')
2042 rst_cnt = int(rst_cnt) + 1
2043 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2044 new_dom._removeVm(RESTART_IN_PROGRESS)
2045 except:
2046 if new_dom:
2047 new_dom._removeVm(RESTART_IN_PROGRESS)
2048 new_dom.destroy()
2049 else:
2050 self._removeVm(RESTART_IN_PROGRESS)
2051 raise
2052 except:
2053 log.exception('Failed to restart domain %s.', str(old_domid))
2055 def _preserveForRestart(self):
2056 """Preserve a domain that has been shut down, by giving it a new UUID,
2057 cloning the VM details, and giving it a new name. This allows us to
2058 keep this domain for debugging, but restart a new one in its place
2059 preserving the restart semantics (name and UUID preserved).
2060 """
2062 new_uuid = uuid.createString()
2063 new_name = 'Domain-%s' % new_uuid
2064 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2065 self.info['name_label'], self.domid, self.info['uuid'],
2066 new_name, new_uuid)
2067 self._unwatchVm()
2068 self._releaseDevices()
2069 # Remove existing vm node in xenstore
2070 self._removeVm()
2071 new_dom_info = self.info.copy()
2072 new_dom_info['name_label'] = self.info['name_label']
2073 new_dom_info['uuid'] = self.info['uuid']
2074 self.info['name_label'] = new_name
2075 self.info['uuid'] = new_uuid
2076 self.vmpath = XS_VMROOT + new_uuid
2077 # Write out new vm node to xenstore
2078 self._storeVmDetails()
2079 self._preserve()
2080 return new_dom_info
2083 def _preserve(self):
2084 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2085 self.domid)
2086 self._unwatchVm()
2087 self.storeDom('xend/shutdown_completed', 'True')
2088 self._stateSet(DOM_STATE_HALTED)
2091 # Debugging ..
2094 def dumpCore(self, corefile = None):
2095 """Create a core dump for this domain.
2097 @raise: XendError if core dumping failed.
2098 """
2100 if not corefile:
2101 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2102 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2103 self.info['name_label'], self.domid)
2105 if os.path.isdir(corefile):
2106 raise XendError("Cannot dump core in a directory: %s" %
2107 corefile)
2109 try:
2110 try:
2111 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2112 xc.domain_dumpcore(self.domid, corefile)
2113 except RuntimeError, ex:
2114 corefile_incomp = corefile+'-incomplete'
2115 try:
2116 os.rename(corefile, corefile_incomp)
2117 except:
2118 pass
2120 log.error("core dump failed: id = %s name = %s: %s",
2121 self.domid, self.info['name_label'], str(ex))
2122 raise XendError("Failed to dump core: %s" % str(ex))
2123 finally:
2124 self._removeVm(DUMPCORE_IN_PROGRESS)
2127 # Device creation/deletion functions
2130 def _createDevice(self, deviceClass, devConfig):
2131 return self.getDeviceController(deviceClass).createDevice(devConfig)
2133 def _waitForDevice(self, deviceClass, devid):
2134 return self.getDeviceController(deviceClass).waitForDevice(devid)
2136 def _waitForDeviceUUID(self, dev_uuid):
2137 deviceClass, config = self.info['devices'].get(dev_uuid)
2138 self._waitForDevice(deviceClass, config['devid'])
2140 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2141 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2142 devid, backpath)
2144 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2145 return self.getDeviceController(deviceClass).reconfigureDevice(
2146 devid, devconfig)
2148 def _createDevices(self):
2149 """Create the devices for a vm.
2151 @raise: VmError for invalid devices
2152 """
2153 if self.image:
2154 self.image.prepareEnvironment()
2156 vscsi_uuidlist = {}
2157 vscsi_devidlist = []
2158 ordered_refs = self.info.ordered_device_refs()
2159 for dev_uuid in ordered_refs:
2160 devclass, config = self.info['devices'][dev_uuid]
2161 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2162 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2163 dev_uuid = config.get('uuid')
2164 devid = self._createDevice(devclass, config)
2166 # store devid in XendConfig for caching reasons
2167 if dev_uuid in self.info['devices']:
2168 self.info['devices'][dev_uuid][1]['devid'] = devid
2170 elif devclass == 'vscsi':
2171 vscsi_config = config.get('devs', [])[0]
2172 devid = vscsi_config.get('devid', '')
2173 dev_uuid = config.get('uuid')
2174 vscsi_uuidlist[devid] = dev_uuid
2175 vscsi_devidlist.append(devid)
2177 #It is necessary to sorted it for /dev/sdxx in guest.
2178 if len(vscsi_uuidlist) > 0:
2179 vscsi_devidlist.sort()
2180 for vscsiid in vscsi_devidlist:
2181 dev_uuid = vscsi_uuidlist[vscsiid]
2182 devclass, config = self.info['devices'][dev_uuid]
2183 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2184 dev_uuid = config.get('uuid')
2185 devid = self._createDevice(devclass, config)
2186 # store devid in XendConfig for caching reasons
2187 if dev_uuid in self.info['devices']:
2188 self.info['devices'][dev_uuid][1]['devid'] = devid
2191 if self.image:
2192 self.image.createDeviceModel()
2194 #if have pass-through devs, need the virtual pci slots info from qemu
2195 self.pci_device_configure_boot()
2197 def _releaseDevices(self, suspend = False):
2198 """Release all domain's devices. Nothrow guarantee."""
2199 if self.image:
2200 try:
2201 log.debug("Destroying device model")
2202 self.image.destroyDeviceModel()
2203 except Exception, e:
2204 log.exception("Device model destroy failed %s" % str(e))
2205 else:
2206 log.debug("No device model")
2208 log.debug("Releasing devices")
2209 t = xstransact("%s/device" % self.dompath)
2210 try:
2211 for devclass in XendDevices.valid_devices():
2212 for dev in t.list(devclass):
2213 try:
2214 true_devclass = devclass
2215 if devclass == 'vbd':
2216 # In the case of "vbd", the true device class
2217 # may possibly be "tap". Just in case, verify
2218 # device class.
2219 devid = dev.split('/')[-1]
2220 true_devclass = self.getBlockDeviceClass(devid)
2221 log.debug("Removing %s", dev);
2222 self.destroyDevice(true_devclass, dev, False);
2223 except:
2224 # Log and swallow any exceptions in removal --
2225 # there's nothing more we can do.
2226 log.exception("Device release failed: %s; %s; %s",
2227 self.info['name_label'],
2228 true_devclass, dev)
2229 finally:
2230 t.abort()
2232 def getDeviceController(self, name):
2233 """Get the device controller for this domain, and if it
2234 doesn't exist, create it.
2236 @param name: device class name
2237 @type name: string
2238 @rtype: subclass of DevController
2239 """
2240 if name not in self._deviceControllers:
2241 devController = XendDevices.make_controller(name, self)
2242 if not devController:
2243 raise XendError("Unknown device type: %s" % name)
2244 self._deviceControllers[name] = devController
2246 return self._deviceControllers[name]
2249 # Migration functions (public)
2252 def testMigrateDevices(self, network, dst):
2253 """ Notify all device about intention of migration
2254 @raise: XendError for a device that cannot be migrated
2255 """
2256 for (n, c) in self.info.all_devices_sxpr():
2257 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2258 if rc != 0:
2259 raise XendError("Device of type '%s' refuses migration." % n)
2261 def migrateDevices(self, network, dst, step, domName=''):
2262 """Notify the devices about migration
2263 """
2264 ctr = 0
2265 try:
2266 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2267 self.migrateDevice(dev_type, dev_conf, network, dst,
2268 step, domName)
2269 ctr = ctr + 1
2270 except:
2271 for dev_type, dev_conf in self.info.all_devices_sxpr():
2272 if ctr == 0:
2273 step = step - 1
2274 ctr = ctr - 1
2275 self._recoverMigrateDevice(dev_type, dev_conf, network,
2276 dst, step, domName)
2277 raise
2279 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2280 step, domName=''):
2281 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2282 network, dst, step, domName)
2284 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2285 dst, step, domName=''):
2286 return self.getDeviceController(deviceClass).recover_migrate(
2287 deviceConfig, network, dst, step, domName)
2290 ## private:
2292 def _constructDomain(self):
2293 """Construct the domain.
2295 @raise: VmError on error
2296 """
2298 log.debug('XendDomainInfo.constructDomain')
2300 self.shutdownStartTime = None
2301 self.restart_in_progress = False
2303 hap = 0
2304 hvm = self.info.is_hvm()
2305 if hvm:
2306 hap = self.info.is_hap()
2307 info = xc.xeninfo()
2308 if 'hvm' not in info['xen_caps']:
2309 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2310 "supported by your CPU and enabled in your "
2311 "BIOS?")
2313 # Hack to pre-reserve some memory for initial domain creation.
2314 # There is an implicit memory overhead for any domain creation. This
2315 # overhead is greater for some types of domain than others. For
2316 # example, an x86 HVM domain will have a default shadow-pagetable
2317 # allocation of 1MB. We free up 4MB here to be on the safe side.
2318 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2319 balloon.free(4*1024, self) # 4MB should be plenty
2321 ssidref = 0
2322 if security.on() == xsconstants.XS_POLICY_USE:
2323 ssidref = security.calc_dom_ssidref_from_info(self.info)
2324 if security.has_authorization(ssidref) == False:
2325 raise VmError("VM is not authorized to run.")
2327 s3_integrity = 0
2328 if self.info.has_key('s3_integrity'):
2329 s3_integrity = self.info['s3_integrity']
2330 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2332 try:
2333 self.domid = xc.domain_create(
2334 domid = 0,
2335 ssidref = ssidref,
2336 handle = uuid.fromString(self.info['uuid']),
2337 flags = flags,
2338 target = self.info.target())
2339 except Exception, e:
2340 # may get here if due to ACM the operation is not permitted
2341 if security.on() == xsconstants.XS_POLICY_ACM:
2342 raise VmError('Domain in conflict set with running domain?')
2344 if self.domid < 0:
2345 raise VmError('Creating domain failed: name=%s' %
2346 self.info['name_label'])
2348 self.dompath = GetDomainPath(self.domid)
2350 self._recreateDom()
2352 # Set timer configration of domain
2353 timer_mode = self.info["platform"].get("timer_mode")
2354 if hvm and timer_mode is not None:
2355 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2356 long(timer_mode))
2358 # Set Viridian interface configuration of domain
2359 viridian = self.info["platform"].get("viridian")
2360 if arch.type == "x86" and hvm and viridian is not None:
2361 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2363 # Optionally enable virtual HPET
2364 hpet = self.info["platform"].get("hpet")
2365 if hvm and hpet is not None:
2366 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2367 long(hpet))
2369 # Optionally enable periodic vpt aligning
2370 vpt_align = self.info["platform"].get("vpt_align")
2371 if hvm and vpt_align is not None:
2372 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2373 long(vpt_align))
2375 # Set maximum number of vcpus in domain
2376 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2378 # Check for cpu_{cap|weight} validity for credit scheduler
2379 if XendNode.instance().xenschedinfo() == 'credit':
2380 cap = self.getCap()
2381 weight = self.getWeight()
2383 assert type(weight) == int
2384 assert type(cap) == int
2386 if weight < 1 or weight > 65535:
2387 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2389 if cap < 0 or cap > self.getVCpuCount() * 100:
2390 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2391 (self.getVCpuCount() * 100))
2393 # Test whether the devices can be assigned with VT-d
2394 self.info.update_platform_pci()
2395 pci = self.info["platform"].get("pci")
2396 pci_str = ''
2397 if pci and len(pci) > 0:
2398 pci = map(lambda x: x[0:4], pci) # strip options
2399 pci_str = str(pci)
2400 if hvm and pci_str:
2401 bdf = xc.test_assign_device(0, pci_str)
2402 if bdf != 0:
2403 if bdf == -1:
2404 raise VmError("failed to assign device: maybe the platform"
2405 " doesn't support VT-d, or VT-d isn't enabled"
2406 " properly?")
2407 bus = (bdf >> 16) & 0xff
2408 devfn = (bdf >> 8) & 0xff
2409 dev = (devfn >> 3) & 0x1f
2410 func = devfn & 0x7
2411 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2412 " already been assigned to other domain, or maybe"
2413 " it doesn't exist." % (bus, dev, func))
2415 # register the domain in the list
2416 from xen.xend import XendDomain
2417 XendDomain.instance().add_domain(self)
2419 def _introduceDomain(self):
2420 assert self.domid is not None
2421 assert self.store_mfn is not None
2422 assert self.store_port is not None
2424 try:
2425 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2426 except RuntimeError, exn:
2427 raise XendError(str(exn))
2429 def _setTarget(self, target):
2430 assert self.domid is not None
2432 try:
2433 SetTarget(self.domid, target)
2434 self.storeDom('target', target)
2435 except RuntimeError, exn:
2436 raise XendError(str(exn))
2439 def _setCPUAffinity(self):
2440 """ Repin domain vcpus if a restricted cpus list is provided
2441 """
2443 def has_cpus():
2444 if self.info['cpus'] is not None:
2445 for c in self.info['cpus']:
2446 if c:
2447 return True
2448 return False
2450 if has_cpus():
2451 for v in range(0, self.info['VCPUs_max']):
2452 if self.info['cpus'][v]:
2453 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2454 else:
2455 def find_relaxed_node(node_list):
2456 import sys
2457 nr_nodes = info['nr_nodes']
2458 if node_list is None:
2459 node_list = range(0, nr_nodes)
2460 nodeload = [0]
2461 nodeload = nodeload * nr_nodes
2462 from xen.xend import XendDomain
2463 doms = XendDomain.instance().list('all')
2464 for dom in filter (lambda d: d.domid != self.domid, doms):
2465 cpuinfo = dom.getVCPUInfo()
2466 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2467 if sxp.child_value(vcpu, 'online') == 0: continue
2468 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2469 for i in range(0, nr_nodes):
2470 node_cpumask = info['node_to_cpu'][i]
2471 for j in node_cpumask:
2472 if j in cpumap:
2473 nodeload[i] += 1
2474 break
2475 for i in range(0, nr_nodes):
2476 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2477 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2478 else:
2479 nodeload[i] = sys.maxint
2480 index = nodeload.index( min(nodeload) )
2481 return index
2483 info = xc.physinfo()
2484 if info['nr_nodes'] > 1:
2485 node_memory_list = info['node_to_memory']
2486 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2487 candidate_node_list = []
2488 for i in range(0, info['nr_nodes']):
2489 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2490 candidate_node_list.append(i)
2491 index = find_relaxed_node(candidate_node_list)
2492 cpumask = info['node_to_cpu'][index]
2493 for v in range(0, self.info['VCPUs_max']):
2494 xc.vcpu_setaffinity(self.domid, v, cpumask)
2497 def _initDomain(self):
2498 log.debug('XendDomainInfo.initDomain: %s %s',
2499 self.domid,
2500 self.info['vcpus_params']['weight'])
2502 self._configureBootloader()
2504 try:
2505 self.image = image.create(self, self.info)
2507 # repin domain vcpus if a restricted cpus list is provided
2508 # this is done prior to memory allocation to aide in memory
2509 # distribution for NUMA systems.
2510 self._setCPUAffinity()
2512 # Use architecture- and image-specific calculations to determine
2513 # the various headrooms necessary, given the raw configured
2514 # values. maxmem, memory, and shadow are all in KiB.
2515 # but memory_static_max etc are all stored in bytes now.
2516 memory = self.image.getRequiredAvailableMemory(
2517 self.info['memory_dynamic_max'] / 1024)
2518 maxmem = self.image.getRequiredAvailableMemory(
2519 self.info['memory_static_max'] / 1024)
2520 shadow = self.image.getRequiredShadowMemory(
2521 self.info['shadow_memory'] * 1024,
2522 self.info['memory_static_max'] / 1024)
2524 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2525 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2526 # takes MiB and we must not round down and end up under-providing.
2527 shadow = ((shadow + 1023) / 1024) * 1024
2529 # set memory limit
2530 xc.domain_setmaxmem(self.domid, maxmem)
2532 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2533 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2534 # Round vtd_mem up to a multiple of a MiB.
2535 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2537 # Make sure there's enough RAM available for the domain
2538 balloon.free(memory + shadow + vtd_mem, self)
2540 # Set up the shadow memory
2541 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2542 self.info['shadow_memory'] = shadow_cur
2544 # machine address size
2545 if self.info.has_key('machine_address_size'):
2546 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2547 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2549 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2550 log.debug("_initDomain: suppressing spurious page faults")
2551 xc.domain_suppress_spurious_page_faults(self.domid)
2553 self._createChannels()
2555 channel_details = self.image.createImage()
2557 self.store_mfn = channel_details['store_mfn']
2558 if 'console_mfn' in channel_details:
2559 self.console_mfn = channel_details['console_mfn']
2560 if 'notes' in channel_details:
2561 self.info.set_notes(channel_details['notes'])
2562 if 'native_protocol' in channel_details:
2563 self.native_protocol = channel_details['native_protocol'];
2565 self._introduceDomain()
2566 if self.info.target():
2567 self._setTarget(self.info.target())
2569 self._createDevices()
2571 self.image.cleanupBootloading()
2573 self.info['start_time'] = time.time()
2575 self._stateSet(DOM_STATE_RUNNING)
2576 except VmError, exn:
2577 log.exception("XendDomainInfo.initDomain: exception occurred")
2578 if self.image:
2579 self.image.cleanupBootloading()
2580 raise exn
2581 except RuntimeError, exn:
2582 log.exception("XendDomainInfo.initDomain: exception occurred")
2583 if self.image:
2584 self.image.cleanupBootloading()
2585 raise VmError(str(exn))
2588 def cleanupDomain(self):
2589 """Cleanup domain resources; release devices. Idempotent. Nothrow
2590 guarantee."""
2592 self.refresh_shutdown_lock.acquire()
2593 try:
2594 self.unwatchShutdown()
2595 self._releaseDevices()
2596 bootloader_tidy(self)
2598 if self.image:
2599 self.image = None
2601 try:
2602 self._removeDom()
2603 except:
2604 log.exception("Removing domain path failed.")
2606 self._stateSet(DOM_STATE_HALTED)
2607 self.domid = None # Do not push into _stateSet()!
2608 finally:
2609 self.refresh_shutdown_lock.release()
2612 def unwatchShutdown(self):
2613 """Remove the watch on the domain's control/shutdown node, if any.
2614 Idempotent. Nothrow guarantee. Expects to be protected by the
2615 refresh_shutdown_lock."""
2617 try:
2618 try:
2619 if self.shutdownWatch:
2620 self.shutdownWatch.unwatch()
2621 finally:
2622 self.shutdownWatch = None
2623 except:
2624 log.exception("Unwatching control/shutdown failed.")
2626 def waitForShutdown(self):
2627 self.state_updated.acquire()
2628 try:
2629 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2630 self.state_updated.wait(timeout=1.0)
2631 finally:
2632 self.state_updated.release()
2634 def waitForSuspend(self):
2635 """Wait for the guest to respond to a suspend request by
2636 shutting down. If the guest hasn't re-written control/shutdown
2637 after a certain amount of time, it's obviously not listening and
2638 won't suspend, so we give up. HVM guests with no PV drivers
2639 should already be shutdown.
2640 """
2641 state = "suspend"
2642 nr_tries = 60
2644 self.state_updated.acquire()
2645 try:
2646 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2647 self.state_updated.wait(1.0)
2648 if state == "suspend":
2649 if nr_tries == 0:
2650 msg = ('Timeout waiting for domain %s to suspend'
2651 % self.domid)
2652 self._writeDom('control/shutdown', '')
2653 raise XendError(msg)
2654 state = self.readDom('control/shutdown')
2655 nr_tries -= 1
2656 finally:
2657 self.state_updated.release()
2660 # TODO: recategorise - called from XendCheckpoint
2663 def completeRestore(self, store_mfn, console_mfn):
2665 log.debug("XendDomainInfo.completeRestore")
2667 self.store_mfn = store_mfn
2668 self.console_mfn = console_mfn
2670 self._introduceDomain()
2671 self.image = image.create(self, self.info)
2672 if self.image:
2673 self.image.createDeviceModel(True)
2674 self._storeDomDetails()
2675 self._registerWatches()
2676 self.refreshShutdown()
2678 log.debug("XendDomainInfo.completeRestore done")
2681 def _endRestore(self):
2682 self.setResume(False)
2685 # VM Destroy
2688 def _prepare_phantom_paths(self):
2689 # get associated devices to destroy
2690 # build list of phantom devices to be removed after normal devices
2691 plist = []
2692 if self.domid is not None:
2693 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2694 try:
2695 for dev in t.list():
2696 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2697 % (self.dompath, dev))
2698 if backend_phantom_vbd is not None:
2699 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2700 % backend_phantom_vbd)
2701 plist.append(backend_phantom_vbd)
2702 plist.append(frontend_phantom_vbd)
2703 finally:
2704 t.abort()
2705 return plist
2707 def _cleanup_phantom_devs(self, plist):
2708 # remove phantom devices
2709 if not plist == []:
2710 time.sleep(2)
2711 for paths in plist:
2712 if paths.find('backend') != -1:
2713 # Modify online status /before/ updating state (latter is watched by
2714 # drivers, so this ordering avoids a race).
2715 xstransact.Write(paths, 'online', "0")
2716 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2717 # force
2718 xstransact.Remove(paths)
2720 def destroy(self):
2721 """Cleanup VM and destroy domain. Nothrow guarantee."""
2723 if self.domid is None:
2724 return
2726 from xen.xend import XendDomain
2727 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2729 paths = self._prepare_phantom_paths()
2731 if self.dompath is not None:
2732 try:
2733 xc.domain_destroy_hook(self.domid)
2734 xc.domain_pause(self.domid)
2735 do_FLR(self.domid)
2736 xc.domain_destroy(self.domid)
2737 for state in DOM_STATES_OLD:
2738 self.info[state] = 0
2739 self._stateSet(DOM_STATE_HALTED)
2740 except:
2741 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2743 XendDomain.instance().remove_domain(self)
2744 self.cleanupDomain()
2746 self._cleanup_phantom_devs(paths)
2747 self._cleanupVm()
2749 if "transient" in self.info["other_config"] \
2750 and bool(self.info["other_config"]["transient"]):
2751 XendDomain.instance().domain_delete_by_dominfo(self)
2754 def resetDomain(self):
2755 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2757 old_domid = self.domid
2758 prev_vm_xend = self._listRecursiveVm('xend')
2759 new_dom_info = self.info
2760 try:
2761 self._unwatchVm()
2762 self.destroy()
2764 new_dom = None
2765 try:
2766 from xen.xend import XendDomain
2767 new_dom_info['domid'] = None
2768 new_dom = XendDomain.instance().domain_create_from_dict(
2769 new_dom_info)
2770 for x in prev_vm_xend[0][1]:
2771 new_dom._writeVm('xend/%s' % x[0], x[1])
2772 new_dom.waitForDevices()
2773 new_dom.unpause()
2774 except:
2775 if new_dom:
2776 new_dom.destroy()
2777 raise
2778 except:
2779 log.exception('Failed to reset domain %s.', str(old_domid))
2782 def resumeDomain(self):
2783 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2785 # resume a suspended domain (e.g. after live checkpoint, or after
2786 # a later error during save or migate); checks that the domain
2787 # is currently suspended first so safe to call from anywhere
2789 xeninfo = dom_get(self.domid)
2790 if xeninfo is None:
2791 return
2792 if not xeninfo['shutdown']:
2793 return
2794 reason = shutdown_reason(xeninfo['shutdown_reason'])
2795 if reason != 'suspend':
2796 return
2798 try:
2799 # could also fetch a parsed note from xenstore
2800 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2801 if not fast:
2802 self._releaseDevices()
2803 self.testDeviceComplete()
2804 self.testvifsComplete()
2805 log.debug("XendDomainInfo.resumeDomain: devices released")
2807 self._resetChannels()
2809 self._removeDom('control/shutdown')
2810 self._removeDom('device-misc/vif/nextDeviceID')
2812 self._createChannels()
2813 self._introduceDomain()
2814 self._storeDomDetails()
2816 self._createDevices()
2817 log.debug("XendDomainInfo.resumeDomain: devices created")
2819 xc.domain_resume(self.domid, fast)
2820 ResumeDomain(self.domid)
2821 except:
2822 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2823 self.image.resumeDeviceModel()
2824 log.debug("XendDomainInfo.resumeDomain: completed")
2828 # Channels for xenstore and console
2831 def _createChannels(self):
2832 """Create the channels to the domain.
2833 """
2834 self.store_port = self._createChannel()
2835 self.console_port = self._createChannel()
2838 def _createChannel(self):
2839 """Create an event channel to the domain.
2840 """
2841 try:
2842 if self.domid != None:
2843 return xc.evtchn_alloc_unbound(domid = self.domid,
2844 remote_dom = 0)
2845 except:
2846 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2847 raise
2849 def _resetChannels(self):
2850 """Reset all event channels in the domain.
2851 """
2852 try:
2853 if self.domid != None:
2854 return xc.evtchn_reset(dom = self.domid)
2855 except:
2856 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2857 raise
2861 # Bootloader configuration
2864 def _configureBootloader(self):
2865 """Run the bootloader if we're configured to do so."""
2867 blexec = self.info['PV_bootloader']
2868 bootloader_args = self.info['PV_bootloader_args']
2869 kernel = self.info['PV_kernel']
2870 ramdisk = self.info['PV_ramdisk']
2871 args = self.info['PV_args']
2872 boot = self.info['HVM_boot_policy']
2874 if boot:
2875 # HVM booting.
2876 pass
2877 elif not blexec and kernel:
2878 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2879 # will be picked up by image.py.
2880 pass
2881 else:
2882 # Boot using bootloader
2883 if not blexec or blexec == 'pygrub':
2884 blexec = auxbin.pathTo('pygrub')
2886 blcfg = None
2887 disks = [x for x in self.info['vbd_refs']
2888 if self.info['devices'][x][1]['bootable']]
2890 if not disks:
2891 msg = "Had a bootloader specified, but no disks are bootable"
2892 log.error(msg)
2893 raise VmError(msg)
2895 devinfo = self.info['devices'][disks[0]]
2896 devtype = devinfo[0]
2897 disk = devinfo[1]['uname']
2899 fn = blkdev_uname_to_file(disk)
2900 taptype = blkdev_uname_to_taptype(disk)
2901 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2902 if mounted:
2903 # This is a file, not a device. pygrub can cope with a
2904 # file if it's raw, but if it's QCOW or other such formats
2905 # used through blktap, then we need to mount it first.
2907 log.info("Mounting %s on %s." %
2908 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2910 vbd = {
2911 'mode': 'RO',
2912 'device': BOOTLOADER_LOOPBACK_DEVICE,
2915 from xen.xend import XendDomain
2916 dom0 = XendDomain.instance().privilegedDomain()
2917 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2918 fn = BOOTLOADER_LOOPBACK_DEVICE
2920 try:
2921 blcfg = bootloader(blexec, fn, self, False,
2922 bootloader_args, kernel, ramdisk, args)
2923 finally:
2924 if mounted:
2925 log.info("Unmounting %s from %s." %
2926 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2928 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2930 if blcfg is None:
2931 msg = "Had a bootloader specified, but can't find disk"
2932 log.error(msg)
2933 raise VmError(msg)
2935 self.info.update_with_image_sxp(blcfg, True)
2939 # VM Functions
2942 def _readVMDetails(self, params):
2943 """Read the specified parameters from the store.
2944 """
2945 try:
2946 return self._gatherVm(*params)
2947 except ValueError:
2948 # One of the int/float entries in params has a corresponding store
2949 # entry that is invalid. We recover, because older versions of
2950 # Xend may have put the entry there (memory/target, for example),
2951 # but this is in general a bad situation to have reached.
2952 log.exception(
2953 "Store corrupted at %s! Domain %d's configuration may be "
2954 "affected.", self.vmpath, self.domid)
2955 return []
2957 def _cleanupVm(self):
2958 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2960 self._unwatchVm()
2962 try:
2963 self._removeVm()
2964 except:
2965 log.exception("Removing VM path failed.")
2968 def checkLiveMigrateMemory(self):
2969 """ Make sure there's enough memory to migrate this domain """
2970 overhead_kb = 0
2971 if arch.type == "x86":
2972 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2973 # the minimum that Xen would allocate if no value were given.
2974 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2975 (self.info['memory_static_max'] / 1024 / 1024) * 4
2976 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2977 # The domain might already have some shadow memory
2978 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2979 if overhead_kb > 0:
2980 balloon.free(overhead_kb, self)
2982 def _unwatchVm(self):
2983 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2984 guarantee."""
2985 try:
2986 try:
2987 if self.vmWatch:
2988 self.vmWatch.unwatch()
2989 finally:
2990 self.vmWatch = None
2991 except:
2992 log.exception("Unwatching VM path failed.")
2994 def testDeviceComplete(self):
2995 """ For Block IO migration safety we must ensure that
2996 the device has shutdown correctly, i.e. all blocks are
2997 flushed to disk
2998 """
2999 start = time.time()
3000 while True:
3001 test = 0
3002 diff = time.time() - start
3003 vbds = self.getDeviceController('vbd').deviceIDs()
3004 taps = self.getDeviceController('tap').deviceIDs()
3005 for i in vbds + taps:
3006 test = 1
3007 log.info("Dev %s still active, looping...", i)
3008 time.sleep(0.1)
3010 if test == 0:
3011 break
3012 if diff >= MIGRATE_TIMEOUT:
3013 log.info("Dev still active but hit max loop timeout")
3014 break
3016 def testvifsComplete(self):
3017 """ In case vifs are released and then created for the same
3018 domain, we need to wait the device shut down.
3019 """
3020 start = time.time()
3021 while True:
3022 test = 0
3023 diff = time.time() - start
3024 for i in self.getDeviceController('vif').deviceIDs():
3025 test = 1
3026 log.info("Dev %s still active, looping...", i)
3027 time.sleep(0.1)
3029 if test == 0:
3030 break
3031 if diff >= MIGRATE_TIMEOUT:
3032 log.info("Dev still active but hit max loop timeout")
3033 break
3035 def _storeVmDetails(self):
3036 to_store = {}
3038 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3039 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3040 if self._infoIsSet(info_key):
3041 to_store[key] = str(self.info[info_key])
3043 if self._infoIsSet("static_memory_min"):
3044 to_store["memory"] = str(self.info["static_memory_min"])
3045 if self._infoIsSet("static_memory_max"):
3046 to_store["maxmem"] = str(self.info["static_memory_max"])
3048 image_sxpr = self.info.image_sxpr()
3049 if image_sxpr:
3050 to_store['image'] = sxp.to_string(image_sxpr)
3052 if not self._readVm('xend/restart_count'):
3053 to_store['xend/restart_count'] = str(0)
3055 log.debug("Storing VM details: %s", scrub_password(to_store))
3057 self._writeVm(to_store)
3058 self._setVmPermissions()
3060 def _setVmPermissions(self):
3061 """Allow the guest domain to read its UUID. We don't allow it to
3062 access any other entry, for security."""
3063 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3064 { 'dom' : self.domid,
3065 'read' : True,
3066 'write' : False })
3069 # Utility functions
3072 def __getattr__(self, name):
3073 if name == "state":
3074 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3075 log.warn("".join(traceback.format_stack()))
3076 return self._stateGet()
3077 else:
3078 raise AttributeError(name)
3080 def __setattr__(self, name, value):
3081 if name == "state":
3082 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3083 log.warn("".join(traceback.format_stack()))
3084 self._stateSet(value)
3085 else:
3086 self.__dict__[name] = value
3088 def _stateSet(self, state):
3089 self.state_updated.acquire()
3090 try:
3091 # TODO Not sure this is correct...
3092 # _stateGet is live now. Why not fire event
3093 # even when it hasn't changed?
3094 if self._stateGet() != state:
3095 self.state_updated.notifyAll()
3096 import XendAPI
3097 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3098 'power_state')
3099 finally:
3100 self.state_updated.release()
3102 def _stateGet(self):
3103 # Lets try and reconsitute the state from xc
3104 # first lets try and get the domain info
3105 # from xc - this will tell us if the domain
3106 # exists
3107 info = dom_get(self.getDomid())
3108 if info is None or info['shutdown']:
3109 # We are either HALTED or SUSPENDED
3110 # check saved image exists
3111 from xen.xend import XendDomain
3112 managed_config_path = \
3113 XendDomain.instance()._managed_check_point_path( \
3114 self.get_uuid())
3115 if os.path.exists(managed_config_path):
3116 return XEN_API_VM_POWER_STATE_SUSPENDED
3117 else:
3118 return XEN_API_VM_POWER_STATE_HALTED
3119 elif info['crashed']:
3120 # Crashed
3121 return XEN_API_VM_POWER_STATE_CRASHED
3122 else:
3123 # We are either RUNNING or PAUSED
3124 if info['paused']:
3125 return XEN_API_VM_POWER_STATE_PAUSED
3126 else:
3127 return XEN_API_VM_POWER_STATE_RUNNING
3129 def _infoIsSet(self, name):
3130 return name in self.info and self.info[name] is not None
3132 def _checkName(self, name):
3133 """Check if a vm name is valid. Valid names contain alphabetic
3134 characters, digits, or characters in '_-.:/+'.
3135 The same name cannot be used for more than one vm at the same time.
3137 @param name: name
3138 @raise: VmError if invalid
3139 """
3140 from xen.xend import XendDomain
3142 if name is None or name == '':
3143 raise VmError('Missing VM Name')
3145 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3146 raise VmError('Invalid VM Name')
3148 dom = XendDomain.instance().domain_lookup_nr(name)
3149 if dom and dom.info['uuid'] != self.info['uuid']:
3150 raise VmError("VM name '%s' already exists%s" %
3151 (name,
3152 dom.domid is not None and
3153 (" as domain %s" % str(dom.domid)) or ""))
3156 def update(self, info = None, refresh = True, transaction = None):
3157 """Update with info from xc.domain_getinfo().
3158 """
3159 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3160 str(self.domid))
3162 if not info:
3163 info = dom_get(self.domid)
3164 if not info:
3165 return
3167 if info["maxmem_kb"] < 0:
3168 info["maxmem_kb"] = XendNode.instance() \
3169 .physinfo_dict()['total_memory'] * 1024
3171 # make sure state is reset for info
3172 # TODO: we should eventually get rid of old_dom_states
3174 self.info.update_config(info)
3175 self._update_consoles(transaction)
3177 if refresh:
3178 self.refreshShutdown(info)
3180 log.trace("XendDomainInfo.update done on domain %s: %s",
3181 str(self.domid), self.info)
3183 def sxpr(self, ignore_store = False, legacy_only = True):
3184 result = self.info.to_sxp(domain = self,
3185 ignore_devices = ignore_store,
3186 legacy_only = legacy_only)
3188 return result
3190 # Xen API
3191 # ----------------------------------------------------------------
3193 def get_uuid(self):
3194 dom_uuid = self.info.get('uuid')
3195 if not dom_uuid: # if it doesn't exist, make one up
3196 dom_uuid = uuid.createString()
3197 self.info['uuid'] = dom_uuid
3198 return dom_uuid
3200 def get_memory_static_max(self):
3201 return self.info.get('memory_static_max', 0)
3202 def get_memory_static_min(self):
3203 return self.info.get('memory_static_min', 0)
3204 def get_memory_dynamic_max(self):
3205 return self.info.get('memory_dynamic_max', 0)
3206 def get_memory_dynamic_min(self):
3207 return self.info.get('memory_dynamic_min', 0)
3209 # only update memory-related config values if they maintain sanity
3210 def _safe_set_memory(self, key, newval):
3211 oldval = self.info.get(key, 0)
3212 try:
3213 self.info[key] = newval
3214 self.info._memory_sanity_check()
3215 except Exception, ex:
3216 self.info[key] = oldval
3217 raise
3219 def set_memory_static_max(self, val):
3220 self._safe_set_memory('memory_static_max', val)
3221 def set_memory_static_min(self, val):
3222 self._safe_set_memory('memory_static_min', val)
3223 def set_memory_dynamic_max(self, val):
3224 self._safe_set_memory('memory_dynamic_max', val)
3225 def set_memory_dynamic_min(self, val):
3226 self._safe_set_memory('memory_dynamic_min', val)
3228 def get_vcpus_params(self):
3229 if self.getDomid() is None:
3230 return self.info['vcpus_params']
3232 retval = xc.sched_credit_domain_get(self.getDomid())
3233 return retval
3234 def get_power_state(self):
3235 return XEN_API_VM_POWER_STATE[self._stateGet()]
3236 def get_platform(self):
3237 return self.info.get('platform', {})
3238 def get_pci_bus(self):
3239 return self.info.get('pci_bus', '')
3240 def get_tools_version(self):
3241 return self.info.get('tools_version', {})
3242 def get_metrics(self):
3243 return self.metrics.get_uuid();
3246 def get_security_label(self, xspol=None):
3247 import xen.util.xsm.xsm as security
3248 label = security.get_security_label(self, xspol)
3249 return label
3251 def set_security_label(self, seclab, old_seclab, xspol=None,
3252 xspol_old=None):
3253 """
3254 Set the security label of a domain from its old to
3255 a new value.
3256 @param seclab New security label formatted in the form
3257 <policy type>:<policy name>:<vm label>
3258 @param old_seclab The current security label that the
3259 VM must have.
3260 @param xspol An optional policy under which this
3261 update should be done. If not given,
3262 then the current active policy is used.
3263 @param xspol_old The old policy; only to be passed during
3264 the updating of a policy
3265 @return Returns return code, a string with errors from
3266 the hypervisor's operation, old label of the
3267 domain
3268 """
3269 rc = 0
3270 errors = ""
3271 old_label = ""
3272 new_ssidref = 0
3273 domid = self.getDomid()
3274 res_labels = None
3275 is_policy_update = (xspol_old != None)
3277 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3279 state = self._stateGet()
3280 # Relabel only HALTED or RUNNING or PAUSED domains
3281 if domid != 0 and \
3282 state not in \
3283 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3284 DOM_STATE_SUSPENDED ]:
3285 log.warn("Relabeling domain not possible in state '%s'" %
3286 DOM_STATES[state])
3287 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3289 # Remove security label. Works only for halted or suspended domains
3290 if not seclab or seclab == "":
3291 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3292 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3294 if self.info.has_key('security_label'):
3295 old_label = self.info['security_label']
3296 # Check label against expected one.
3297 if old_label != old_seclab:
3298 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3299 del self.info['security_label']
3300 xen.xend.XendDomain.instance().managed_config_save(self)
3301 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3303 tmp = seclab.split(":")
3304 if len(tmp) != 3:
3305 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3306 typ, policy, label = tmp
3308 poladmin = XSPolicyAdminInstance()
3309 if not xspol:
3310 xspol = poladmin.get_policy_by_name(policy)
3312 try:
3313 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3315 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3316 #if domain is running or paused try to relabel in hypervisor
3317 if not xspol:
3318 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3320 if typ != xspol.get_type_name() or \
3321 policy != xspol.get_name():
3322 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3324 if typ == xsconstants.ACM_POLICY_ID:
3325 new_ssidref = xspol.vmlabel_to_ssidref(label)
3326 if new_ssidref == xsconstants.INVALID_SSIDREF:
3327 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3329 # Check that all used resources are accessible under the
3330 # new label
3331 if not is_policy_update and \
3332 not security.resources_compatible_with_vmlabel(xspol,
3333 self, label):
3334 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3336 #Check label against expected one. Can only do this
3337 # if the policy hasn't changed underneath in the meantime
3338 if xspol_old == None:
3339 old_label = self.get_security_label()
3340 if old_label != old_seclab:
3341 log.info("old_label != old_seclab: %s != %s" %
3342 (old_label, old_seclab))
3343 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3345 # relabel domain in the hypervisor
3346 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3347 log.info("rc from relabeling in HV: %d" % rc)
3348 else:
3349 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3351 if rc == 0:
3352 # HALTED, RUNNING or PAUSED
3353 if domid == 0:
3354 if xspol:
3355 self.info['security_label'] = seclab
3356 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3357 else:
3358 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3359 else:
3360 if self.info.has_key('security_label'):
3361 old_label = self.info['security_label']
3362 # Check label against expected one, unless wildcard
3363 if old_label != old_seclab:
3364 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3366 self.info['security_label'] = seclab
3368 try:
3369 xen.xend.XendDomain.instance().managed_config_save(self)
3370 except:
3371 pass
3372 return (rc, errors, old_label, new_ssidref)
3373 finally:
3374 xen.xend.XendDomain.instance().policy_lock.release()
3376 def get_on_shutdown(self):
3377 after_shutdown = self.info.get('actions_after_shutdown')
3378 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3379 return XEN_API_ON_NORMAL_EXIT[-1]
3380 return after_shutdown
3382 def get_on_reboot(self):
3383 after_reboot = self.info.get('actions_after_reboot')
3384 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3385 return XEN_API_ON_NORMAL_EXIT[-1]
3386 return after_reboot
3388 def get_on_suspend(self):
3389 # TODO: not supported
3390 after_suspend = self.info.get('actions_after_suspend')
3391 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3392 return XEN_API_ON_NORMAL_EXIT[-1]
3393 return after_suspend
3395 def get_on_crash(self):
3396 after_crash = self.info.get('actions_after_crash')
3397 if not after_crash or after_crash not in \
3398 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3399 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3400 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3402 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3403 """ Get's a device configuration either from XendConfig or
3404 from the DevController.
3406 @param dev_class: device class, either, 'vbd' or 'vif'
3407 @param dev_uuid: device UUID
3409 @rtype: dictionary
3410 """
3411 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3413 # shortcut if the domain isn't started because
3414 # the devcontrollers will have no better information
3415 # than XendConfig.
3416 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3417 XEN_API_VM_POWER_STATE_SUSPENDED):
3418 if dev_config:
3419 return copy.deepcopy(dev_config)
3420 return None
3422 # instead of using dev_class, we use the dev_type
3423 # that is from XendConfig.
3424 controller = self.getDeviceController(dev_type)
3425 if not controller:
3426 return None
3428 all_configs = controller.getAllDeviceConfigurations()
3429 if not all_configs:
3430 return None
3432 updated_dev_config = copy.deepcopy(dev_config)
3433 for _devid, _devcfg in all_configs.items():
3434 if _devcfg.get('uuid') == dev_uuid:
3435 updated_dev_config.update(_devcfg)
3436 updated_dev_config['id'] = _devid
3437 return updated_dev_config
3439 return updated_dev_config
3441 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3442 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3443 if not config:
3444 return {}
3446 config['VM'] = self.get_uuid()
3448 if dev_class == 'vif':
3449 if not config.has_key('name'):
3450 config['name'] = config.get('vifname', '')
3451 if not config.has_key('MAC'):
3452 config['MAC'] = config.get('mac', '')
3453 if not config.has_key('type'):
3454 config['type'] = 'paravirtualised'
3455 if not config.has_key('device'):
3456 devid = config.get('id')
3457 if devid != None:
3458 config['device'] = 'eth%s' % devid
3459 else:
3460 config['device'] = ''
3462 if not config.has_key('network'):
3463 try:
3464 bridge = config.get('bridge', None)
3465 if bridge is None:
3466 from xen.util import Brctl
3467 if_to_br = dict([(i,b)
3468 for (b,ifs) in Brctl.get_state().items()
3469 for i in ifs])
3470 vifname = "vif%s.%s" % (self.getDomid(),
3471 config.get('id'))
3472 bridge = if_to_br.get(vifname, None)
3473 config['network'] = \
3474 XendNode.instance().bridge_to_network(
3475 config.get('bridge')).get_uuid()
3476 except Exception:
3477 log.exception('bridge_to_network')
3478 # Ignore this for now -- it may happen if the device
3479 # has been specified using the legacy methods, but at
3480 # some point we're going to have to figure out how to
3481 # handle that properly.
3483 config['MTU'] = 1500 # TODO
3485 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3486 xennode = XendNode.instance()
3487 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3488 config['io_read_kbs'] = rx_bps/1024
3489 config['io_write_kbs'] = tx_bps/1024
3490 rx, tx = xennode.get_vif_stat(self.domid, devid)
3491 config['io_total_read_kbs'] = rx/1024
3492 config['io_total_write_kbs'] = tx/1024
3493 else:
3494 config['io_read_kbs'] = 0.0
3495 config['io_write_kbs'] = 0.0
3496 config['io_total_read_kbs'] = 0.0
3497 config['io_total_write_kbs'] = 0.0
3499 config['security_label'] = config.get('security_label', '')
3501 if dev_class == 'vbd':
3503 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3504 controller = self.getDeviceController(dev_class)
3505 devid, _1, _2 = controller.getDeviceDetails(config)
3506 xennode = XendNode.instance()
3507 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3508 config['io_read_kbs'] = rd_blkps
3509 config['io_write_kbs'] = wr_blkps
3510 else:
3511 config['io_read_kbs'] = 0.0
3512 config['io_write_kbs'] = 0.0
3514 config['VDI'] = config.get('VDI', '')
3515 config['device'] = config.get('dev', '')
3516 if ':' in config['device']:
3517 vbd_name, vbd_type = config['device'].split(':', 1)
3518 config['device'] = vbd_name
3519 if vbd_type == 'cdrom':
3520 config['type'] = XEN_API_VBD_TYPE[0]
3521 else:
3522 config['type'] = XEN_API_VBD_TYPE[1]
3524 config['driver'] = 'paravirtualised' # TODO
3525 config['image'] = config.get('uname', '')
3527 if config.get('mode', 'r') == 'r':
3528 config['mode'] = 'RO'
3529 else:
3530 config['mode'] = 'RW'
3532 if dev_class == 'vtpm':
3533 if not config.has_key('type'):
3534 config['type'] = 'paravirtualised' # TODO
3535 if not config.has_key('backend'):
3536 config['backend'] = "00000000-0000-0000-0000-000000000000"
3538 return config
3540 def get_dev_property(self, dev_class, dev_uuid, field):
3541 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3542 try:
3543 return config[field]
3544 except KeyError:
3545 raise XendError('Invalid property for device: %s' % field)
3547 def set_dev_property(self, dev_class, dev_uuid, field, value):
3548 self.info['devices'][dev_uuid][1][field] = value
3550 def get_vcpus_util(self):
3551 vcpu_util = {}
3552 xennode = XendNode.instance()
3553 if 'VCPUs_max' in self.info and self.domid != None:
3554 for i in range(0, self.info['VCPUs_max']):
3555 util = xennode.get_vcpu_util(self.domid, i)
3556 vcpu_util[str(i)] = util
3558 return vcpu_util
3560 def get_consoles(self):
3561 return self.info.get('console_refs', [])
3563 def get_vifs(self):
3564 return self.info.get('vif_refs', [])
3566 def get_vbds(self):
3567 return self.info.get('vbd_refs', [])
3569 def get_vtpms(self):
3570 return self.info.get('vtpm_refs', [])
3572 def get_dpcis(self):
3573 return XendDPCI.get_by_VM(self.info.get('uuid'))
3575 def get_dscsis(self):
3576 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3578 def create_vbd(self, xenapi_vbd, vdi_image_path):
3579 """Create a VBD using a VDI from XendStorageRepository.
3581 @param xenapi_vbd: vbd struct from the Xen API
3582 @param vdi_image_path: VDI UUID
3583 @rtype: string
3584 @return: uuid of the device
3585 """
3586 xenapi_vbd['image'] = vdi_image_path
3587 if vdi_image_path.startswith('tap'):
3588 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3589 else:
3590 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3592 if not dev_uuid:
3593 raise XendError('Failed to create device')
3595 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3596 XEN_API_VM_POWER_STATE_PAUSED):
3597 _, config = self.info['devices'][dev_uuid]
3599 if vdi_image_path.startswith('tap'):
3600 dev_control = self.getDeviceController('tap')
3601 else:
3602 dev_control = self.getDeviceController('vbd')
3604 try:
3605 devid = dev_control.createDevice(config)
3606 dev_control.waitForDevice(devid)
3607 self.info.device_update(dev_uuid,
3608 cfg_xenapi = {'devid': devid})
3609 except Exception, exn:
3610 log.exception(exn)
3611 del self.info['devices'][dev_uuid]
3612 self.info['vbd_refs'].remove(dev_uuid)
3613 raise
3615 return dev_uuid
3617 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3618 """Create a VBD using a VDI from XendStorageRepository.
3620 @param xenapi_vbd: vbd struct from the Xen API
3621 @param vdi_image_path: VDI UUID
3622 @rtype: string
3623 @return: uuid of the device
3624 """
3625 xenapi_vbd['image'] = vdi_image_path
3626 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3627 if not dev_uuid:
3628 raise XendError('Failed to create device')
3630 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3631 _, config = self.info['devices'][dev_uuid]
3632 config['devid'] = self.getDeviceController('tap').createDevice(config)
3634 return config['devid']
3636 def create_vif(self, xenapi_vif):
3637 """Create VIF device from the passed struct in Xen API format.
3639 @param xenapi_vif: Xen API VIF Struct.
3640 @rtype: string
3641 @return: UUID
3642 """
3643 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3644 if not dev_uuid:
3645 raise XendError('Failed to create device')
3647 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3648 XEN_API_VM_POWER_STATE_PAUSED):
3650 _, config = self.info['devices'][dev_uuid]
3651 dev_control = self.getDeviceController('vif')
3653 try:
3654 devid = dev_control.createDevice(config)
3655 dev_control.waitForDevice(devid)
3656 self.info.device_update(dev_uuid,
3657 cfg_xenapi = {'devid': devid})
3658 except Exception, exn:
3659 log.exception(exn)
3660 del self.info['devices'][dev_uuid]
3661 self.info['vif_refs'].remove(dev_uuid)
3662 raise
3664 return dev_uuid
3666 def create_vtpm(self, xenapi_vtpm):
3667 """Create a VTPM device from the passed struct in Xen API format.
3669 @return: uuid of the device
3670 @rtype: string
3671 """
3673 if self._stateGet() not in (DOM_STATE_HALTED,):
3674 raise VmError("Can only add vTPM to a halted domain.")
3675 if self.get_vtpms() != []:
3676 raise VmError('Domain already has a vTPM.')
3677 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3678 if not dev_uuid:
3679 raise XendError('Failed to create device')
3681 return dev_uuid
3683 def create_console(self, xenapi_console):
3684 """ Create a console device from a Xen API struct.
3686 @return: uuid of device
3687 @rtype: string
3688 """
3689 if self._stateGet() not in (DOM_STATE_HALTED,):
3690 raise VmError("Can only add console to a halted domain.")
3692 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3693 if not dev_uuid:
3694 raise XendError('Failed to create device')
3696 return dev_uuid
3698 def set_console_other_config(self, console_uuid, other_config):
3699 self.info.console_update(console_uuid, 'other_config', other_config)
3701 def create_dpci(self, xenapi_pci):
3702 """Create pci device from the passed struct in Xen API format.
3704 @param xenapi_pci: DPCI struct from Xen API
3705 @rtype: bool
3706 #@rtype: string
3707 @return: True if successfully created device
3708 #@return: UUID
3709 """
3711 dpci_uuid = uuid.createString()
3713 dpci_opts = []
3714 opts_dict = xenapi_pci.get('options')
3715 for k in opts_dict.keys():
3716 dpci_opts.append([k, opts_dict[k]])
3717 opts_sxp = pci_opts_list_to_sxp(dpci_opts)
3719 # Convert xenapi to sxp
3720 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3722 dev_sxp = ['dev',
3723 ['domain', '0x%02x' % ppci.get_domain()],
3724 ['bus', '0x%02x' % ppci.get_bus()],
3725 ['slot', '0x%02x' % ppci.get_slot()],
3726 ['func', '0x%1x' % ppci.get_func()],
3727 ['vslot', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3728 ['uuid', dpci_uuid]]
3729 dev_sxp = sxp.merge(dev_sxp, opts_sxp)
3731 target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
3733 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3735 old_pci_sxp = self._getDeviceInfo_pci(0)
3737 if old_pci_sxp is None:
3738 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3739 if not dev_uuid:
3740 raise XendError('Failed to create device')
3742 else:
3743 new_pci_sxp = ['pci']
3744 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3745 new_pci_sxp.append(existing_dev)
3746 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3748 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3749 self.info.device_update(dev_uuid, new_pci_sxp)
3751 xen.xend.XendDomain.instance().managed_config_save(self)
3753 else:
3754 try:
3755 self.device_configure(target_pci_sxp)
3757 except Exception, exn:
3758 raise XendError('Failed to create device')
3760 return dpci_uuid
3762 def create_dscsi(self, xenapi_dscsi):
3763 """Create scsi device from the passed struct in Xen API format.
3765 @param xenapi_dscsi: DSCSI struct from Xen API
3766 @rtype: string
3767 @return: UUID
3768 """
3770 dscsi_uuid = uuid.createString()
3772 # Convert xenapi to sxp
3773 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3774 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3775 target_vscsi_sxp = \
3776 ['vscsi',
3777 ['dev',
3778 ['devid', devid],
3779 ['p-devname', pscsi.get_dev_name()],
3780 ['p-dev', pscsi.get_physical_HCTL()],
3781 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3782 ['state', xenbusState['Initialising']],
3783 ['uuid', dscsi_uuid]
3784 ],
3785 ['feature-host', 0]
3788 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3790 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3792 if cur_vscsi_sxp is None:
3793 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3794 if not dev_uuid:
3795 raise XendError('Failed to create device')
3797 else:
3798 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3799 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3800 new_vscsi_sxp.append(existing_dev)
3801 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3803 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3804 self.info.device_update(dev_uuid, new_vscsi_sxp)
3806 xen.xend.XendDomain.instance().managed_config_save(self)
3808 else:
3809 try:
3810 self.device_configure(target_vscsi_sxp)
3812 except Exception, exn:
3813 raise XendError('Failed to create device')
3815 return dscsi_uuid
3818 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3819 if dev_uuid not in self.info['devices']:
3820 raise XendError('Device does not exist')
3822 try:
3823 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3824 XEN_API_VM_POWER_STATE_PAUSED):
3825 _, config = self.info['devices'][dev_uuid]
3826 devid = config.get('devid')
3827 if devid != None:
3828 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3829 else:
3830 raise XendError('Unable to get devid for device: %s:%s' %
3831 (dev_type, dev_uuid))
3832 finally:
3833 del self.info['devices'][dev_uuid]
3834 self.info['%s_refs' % dev_type].remove(dev_uuid)
3836 def destroy_vbd(self, dev_uuid):
3837 self.destroy_device_by_uuid('vbd', dev_uuid)
3839 def destroy_vif(self, dev_uuid):
3840 self.destroy_device_by_uuid('vif', dev_uuid)
3842 def destroy_vtpm(self, dev_uuid):
3843 self.destroy_device_by_uuid('vtpm', dev_uuid)
3845 def destroy_dpci(self, dev_uuid):
3847 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3848 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3850 old_pci_sxp = self._getDeviceInfo_pci(0)
3851 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3852 target_dev = None
3853 new_pci_sxp = ['pci']
3854 for dev in sxp.children(old_pci_sxp, 'dev'):
3855 pci_dev = {}
3856 pci_dev['domain'] = sxp.child_value(dev, 'domain')
3857 pci_dev['bus'] = sxp.child_value(dev, 'bus')
3858 pci_dev['slot'] = sxp.child_value(dev, 'slot')
3859 pci_dev['func'] = sxp.child_value(dev, 'func')
3860 if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
3861 target_dev = dev
3862 else:
3863 new_pci_sxp.append(dev)
3865 if target_dev is None:
3866 raise XendError('Failed to destroy device')
3868 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3870 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3872 self.info.device_update(dev_uuid, new_pci_sxp)
3873 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3874 del self.info['devices'][dev_uuid]
3875 xen.xend.XendDomain.instance().managed_config_save(self)
3877 else:
3878 try:
3879 self.device_configure(target_pci_sxp)
3881 except Exception, exn:
3882 raise XendError('Failed to destroy device')
3884 def destroy_dscsi(self, dev_uuid):
3885 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3886 devid = dscsi.get_virtual_host()
3887 vHCTL = dscsi.get_virtual_HCTL()
3888 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3889 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3891 target_dev = None
3892 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3893 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3894 if vHCTL == sxp.child_value(dev, 'v-dev'):
3895 target_dev = dev
3896 else:
3897 new_vscsi_sxp.append(dev)
3899 if target_dev is None:
3900 raise XendError('Failed to destroy device')
3902 target_dev.append(['state', xenbusState['Closing']])
3903 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3905 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3907 self.info.device_update(dev_uuid, new_vscsi_sxp)
3908 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3909 del self.info['devices'][dev_uuid]
3910 xen.xend.XendDomain.instance().managed_config_save(self)
3912 else:
3913 try:
3914 self.device_configure(target_vscsi_sxp)
3916 except Exception, exn:
3917 raise XendError('Failed to destroy device')
3919 def destroy_xapi_instances(self):
3920 """Destroy Xen-API instances stored in XendAPIStore.
3921 """
3922 # Xen-API classes based on XendBase have their instances stored
3923 # in XendAPIStore. Cleanup these instances here, if they are supposed
3924 # to be destroyed when the parent domain is dead.
3926 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3927 # XendBase and there's no need to remove them from XendAPIStore.
3929 from xen.xend import XendDomain
3930 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3931 # domain still exists.
3932 return
3934 # Destroy the VMMetrics instance.
3935 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3936 is not None:
3937 self.metrics.destroy()
3939 # Destroy DPCI instances.
3940 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3941 XendAPIStore.deregister(dpci_uuid, "DPCI")
3943 # Destroy DSCSI instances.
3944 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3945 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3947 def has_device(self, dev_class, dev_uuid):
3948 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3950 def __str__(self):
3951 return '<domain id=%s name=%s memory=%s state=%s>' % \
3952 (str(self.domid), self.info['name_label'],
3953 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3955 __repr__ = __str__