debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 19679:caa8c0e2d6f6

xend: Fix xm pci-detach for inactive devices

In the case where a device is attached to an inactive domain
and then removed before the domain is activated it won't have
a vslot assigned, but it should still be valid to remove it.

I don't think that there are any other cases where vslot can
be invalid.
Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 26 10:03:09 2009 +0100 (2009-05-26)
parents 23f9857f642f
children dc7de36c94e3
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts, auxbin
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 xeninfo['auto_power_on'] = False
148 domid = xeninfo['domid']
149 uuid1 = uuid.fromString(xeninfo['uuid'])
150 needs_reinitialising = False
152 dompath = GetDomainPath(domid)
153 if not dompath:
154 raise XendError('No domain path in store for existing '
155 'domain %d' % domid)
157 log.info("Recreating domain %d, UUID %s. at %s" %
158 (domid, xeninfo['uuid'], dompath))
160 # need to verify the path and uuid if not Domain-0
161 # if the required uuid and vm aren't set, then that means
162 # we need to recreate the dom with our own values
163 #
164 # NOTE: this is probably not desirable, really we should just
165 # abort or ignore, but there may be cases where xenstore's
166 # entry disappears (eg. xenstore-rm /)
167 #
168 try:
169 vmpath = xstransact.Read(dompath, "vm")
170 if not vmpath:
171 if not priv:
172 log.warn('/local/domain/%d/vm is missing. recreate is '
173 'confused, trying our best to recover' % domid)
174 needs_reinitialising = True
175 raise XendError('reinit')
177 uuid2_str = xstransact.Read(vmpath, "uuid")
178 if not uuid2_str:
179 log.warn('%s/uuid/ is missing. recreate is confused, '
180 'trying our best to recover' % vmpath)
181 needs_reinitialising = True
182 raise XendError('reinit')
184 uuid2 = uuid.fromString(uuid2_str)
185 if uuid1 != uuid2:
186 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
187 'Trying out best to recover' % domid)
188 needs_reinitialising = True
189 except XendError:
190 pass # our best shot at 'goto' in python :)
192 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
193 vmpath = vmpath)
195 if needs_reinitialising:
196 vm._recreateDom()
197 vm._removeVm()
198 vm._storeVmDetails()
199 vm._storeDomDetails()
201 vm.image = image.create(vm, vm.info)
202 vm.image.recreate()
204 vm._registerWatches()
205 vm.refreshShutdown(xeninfo)
207 # register the domain in the list
208 from xen.xend import XendDomain
209 XendDomain.instance().add_domain(vm)
211 return vm
214 def restore(config):
215 """Create a domain and a VM object to do a restore.
217 @param config: Domain SXP configuration
218 @type config: list of lists. (see C{create})
220 @rtype: XendDomainInfo
221 @return: A up and running XendDomainInfo instance
222 @raise VmError: Invalid configuration or failure to start.
223 @raise XendError: Errors with configuration.
224 """
226 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
227 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
228 resume = True)
229 try:
230 vm.resume()
231 return vm
232 except:
233 vm.destroy()
234 raise
236 def createDormant(domconfig):
237 """Create a dormant/inactive XenDomainInfo without creating VM.
238 This is for creating instances of persistent domains that are not
239 yet start.
241 @param domconfig: Parsed configuration
242 @type domconfig: XendConfig object
244 @rtype: XendDomainInfo
245 @return: A up and running XendDomainInfo instance
246 @raise XendError: Errors with configuration.
247 """
249 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
251 # domid does not make sense for non-running domains.
252 domconfig.pop('domid', None)
253 vm = XendDomainInfo(domconfig)
254 return vm
256 def domain_by_name(name):
257 """Get domain by name
259 @params name: Name of the domain
260 @type name: string
261 @return: XendDomainInfo or None
262 """
263 from xen.xend import XendDomain
264 return XendDomain.instance().domain_lookup_by_name_nr(name)
267 def shutdown_reason(code):
268 """Get a shutdown reason from a code.
270 @param code: shutdown code
271 @type code: int
272 @return: shutdown reason
273 @rtype: string
274 """
275 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
277 def dom_get(dom):
278 """Get info from xen for an existing domain.
280 @param dom: domain id
281 @type dom: int
282 @return: info or None
283 @rtype: dictionary
284 """
285 try:
286 domlist = xc.domain_getinfo(dom, 1)
287 if domlist and dom == domlist[0]['domid']:
288 return domlist[0]
289 except Exception, err:
290 # ignore missing domain
291 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
292 return None
294 def get_assigned_pci_devices(domid):
295 dev_str_list = []
296 path = '/local/domain/0/backend/pci/%u/0/' % domid
297 num_devs = xstransact.Read(path + 'num_devs');
298 if num_devs is None or num_devs == "":
299 return dev_str_list
300 num_devs = int(num_devs);
301 for i in range(num_devs):
302 dev_str = xstransact.Read(path + 'dev-%i' % i)
303 dev_str_list = dev_str_list + [dev_str]
304 return dev_str_list
306 def do_FLR(domid):
307 from xen.xend.server.pciif import parse_pci_name, PciDevice
308 dev_str_list = get_assigned_pci_devices(domid)
310 for dev_str in dev_str_list:
311 (dom, b, d, f) = parse_pci_name(dev_str)
312 try:
313 dev = PciDevice(dom, b, d, f)
314 except Exception, e:
315 raise VmError("pci: failed to locate device and "+
316 "parse it's resources - "+str(e))
317 dev.do_FLR()
319 class XendDomainInfo:
320 """An object represents a domain.
322 @TODO: try to unify dom and domid, they mean the same thing, but
323 xc refers to it as dom, and everywhere else, including
324 xenstore it is domid. The best way is to change xc's
325 python interface.
327 @ivar info: Parsed configuration
328 @type info: dictionary
329 @ivar domid: Domain ID (if VM has started)
330 @type domid: int or None
331 @ivar vmpath: XenStore path to this VM.
332 @type vmpath: string
333 @ivar dompath: XenStore path to this Domain.
334 @type dompath: string
335 @ivar image: Reference to the VM Image.
336 @type image: xen.xend.image.ImageHandler
337 @ivar store_port: event channel to xenstored
338 @type store_port: int
339 @ivar console_port: event channel to xenconsoled
340 @type console_port: int
341 @ivar store_mfn: xenstored mfn
342 @type store_mfn: int
343 @ivar console_mfn: xenconsoled mfn
344 @type console_mfn: int
345 @ivar notes: OS image notes
346 @type notes: dictionary
347 @ivar vmWatch: reference to a watch on the xenstored vmpath
348 @type vmWatch: xen.xend.xenstore.xswatch
349 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
350 @type shutdownWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
352 @type shutdownStartTime: float or None
353 @ivar restart_in_progress: Is a domain restart thread running?
354 @type restart_in_progress: bool
355 # @ivar state: Domain state
356 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
357 @ivar state_updated: lock for self.state
358 @type state_updated: threading.Condition
359 @ivar refresh_shutdown_lock: lock for polling shutdown state
360 @type refresh_shutdown_lock: threading.Condition
361 @ivar _deviceControllers: device controller cache for this domain
362 @type _deviceControllers: dict 'string' to DevControllers
363 """
365 def __init__(self, info, domid = None, dompath = None, augment = False,
366 priv = False, resume = False, vmpath = None):
367 """Constructor for a domain
369 @param info: parsed configuration
370 @type info: dictionary
371 @keyword domid: Set initial domain id (if any)
372 @type domid: int
373 @keyword dompath: Set initial dompath (if any)
374 @type dompath: string
375 @keyword augment: Augment given info with xenstored VM info
376 @type augment: bool
377 @keyword priv: Is a privileged domain (Dom 0)
378 @type priv: bool
379 @keyword resume: Is this domain being resumed?
380 @type resume: bool
381 """
383 self.info = info
384 if domid == None:
385 self.domid = self.info.get('domid')
386 else:
387 self.domid = domid
389 #REMOVE: uuid is now generated in XendConfig
390 #if not self._infoIsSet('uuid'):
391 # self.info['uuid'] = uuid.toString(uuid.create())
393 # Find a unique /vm/<uuid>/<integer> path if not specified.
394 # This avoids conflict between pre-/post-migrate domains when doing
395 # localhost relocation.
396 self.vmpath = vmpath
397 i = 0
398 while self.vmpath == None:
399 self.vmpath = XS_VMROOT + self.info['uuid']
400 if i != 0:
401 self.vmpath = self.vmpath + '-' + str(i)
402 try:
403 if self._readVm("uuid"):
404 self.vmpath = None
405 i = i + 1
406 except:
407 pass
409 self.dompath = dompath
411 self.image = None
412 self.store_port = None
413 self.store_mfn = None
414 self.console_port = None
415 self.console_mfn = None
417 self.native_protocol = None
419 self.vmWatch = None
420 self.shutdownWatch = None
421 self.shutdownStartTime = None
422 self._resume = resume
423 self.restart_in_progress = False
425 self.state_updated = threading.Condition()
426 self.refresh_shutdown_lock = threading.Condition()
427 self._stateSet(DOM_STATE_HALTED)
429 self._deviceControllers = {}
431 for state in DOM_STATES_OLD:
432 self.info[state] = 0
434 if augment:
435 self._augmentInfo(priv)
437 self._checkName(self.info['name_label'])
439 self.metrics = XendVMMetrics(uuid.createString(), self)
442 #
443 # Public functions available through XMLRPC
444 #
447 def start(self, is_managed = False):
448 """Attempts to start the VM by do the appropriate
449 initialisation if it not started.
450 """
451 from xen.xend import XendDomain
453 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
454 try:
455 XendTask.log_progress(0, 30, self._constructDomain)
456 XendTask.log_progress(31, 60, self._initDomain)
458 XendTask.log_progress(61, 70, self._storeVmDetails)
459 XendTask.log_progress(71, 80, self._storeDomDetails)
460 XendTask.log_progress(81, 90, self._registerWatches)
461 XendTask.log_progress(91, 100, self.refreshShutdown)
463 xendomains = XendDomain.instance()
464 xennode = XendNode.instance()
466 # save running configuration if XendDomains believe domain is
467 # persistent
468 if is_managed:
469 xendomains.managed_config_save(self)
471 if xennode.xenschedinfo() == 'credit':
472 xendomains.domain_sched_credit_set(self.getDomid(),
473 self.getWeight(),
474 self.getCap())
475 except:
476 log.exception('VM start failed')
477 self.destroy()
478 raise
479 else:
480 raise XendError('VM already running')
482 def resume(self):
483 """Resumes a domain that has come back from suspension."""
484 state = self._stateGet()
485 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
486 try:
487 self._constructDomain()
489 try:
490 self._setCPUAffinity()
491 except:
492 # usually a CPU we want to set affinity to does not exist
493 # we just ignore it so that the domain can still be restored
494 log.warn("Cannot restore CPU affinity")
496 self._storeVmDetails()
497 self._createChannels()
498 self._createDevices()
499 self._storeDomDetails()
500 self._endRestore()
501 except:
502 log.exception('VM resume failed')
503 self.destroy()
504 raise
505 else:
506 raise XendError('VM is not suspended; it is %s'
507 % XEN_API_VM_POWER_STATE[state])
509 def shutdown(self, reason):
510 """Shutdown a domain by signalling this via xenstored."""
511 log.debug('XendDomainInfo.shutdown(%s)', reason)
512 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
513 raise XendError('Domain cannot be shutdown')
515 if self.domid == 0:
516 raise XendError('Domain 0 cannot be shutdown')
518 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
519 raise XendError('Invalid reason: %s' % reason)
520 self.storeDom("control/shutdown", reason)
522 # HVM domain shuts itself down only if it has PV drivers
523 if self.info.is_hvm():
524 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
525 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
526 if not hvm_pvdrv or hvm_s_state != 0:
527 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
528 log.info("HVM save:remote shutdown dom %d!", self.domid)
529 xc.domain_shutdown(self.domid, code)
531 def pause(self):
532 """Pause domain
534 @raise XendError: Failed pausing a domain
535 """
536 try:
537 xc.domain_pause(self.domid)
538 self._stateSet(DOM_STATE_PAUSED)
539 except Exception, ex:
540 log.exception(ex)
541 raise XendError("Domain unable to be paused: %s" % str(ex))
543 def unpause(self):
544 """Unpause domain
546 @raise XendError: Failed unpausing a domain
547 """
548 try:
549 xc.domain_unpause(self.domid)
550 self._stateSet(DOM_STATE_RUNNING)
551 except Exception, ex:
552 log.exception(ex)
553 raise XendError("Domain unable to be unpaused: %s" % str(ex))
555 def send_sysrq(self, key):
556 """ Send a Sysrq equivalent key via xenstored."""
557 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
558 raise XendError("Domain '%s' is not started" % self.info['name_label'])
560 asserts.isCharConvertible(key)
561 self.storeDom("control/sysrq", '%c' % key)
563 def sync_pcidev_info(self):
565 if not self.info.is_hvm():
566 return
568 devid = '0'
569 dev_info = self._getDeviceInfo_pci(devid)
570 if dev_info is None:
571 return
573 # get the virtual slot info from xenstore
574 dev_uuid = sxp.child_value(dev_info, 'uuid')
575 pci_conf = self.info['devices'][dev_uuid][1]
576 pci_devs = pci_conf['devs']
578 count = 0
579 vslots = None
580 while vslots is None and count < 20:
581 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
582 % (self.getDomid(), devid))
583 time.sleep(0.1)
584 count += 1
585 if vslots is None:
586 log.error("Device model didn't tell the vslots for PCI device")
587 return
589 #delete last delim
590 if vslots[-1] == ";":
591 vslots = vslots[:-1]
593 slot_list = vslots.split(';')
594 if len(slot_list) != len(pci_devs):
595 log.error("Device model's pci dev num dismatch")
596 return
598 #update the vslot info
599 count = 0;
600 for x in pci_devs:
601 x['vslot'] = slot_list[count]
602 count += 1
605 def hvm_pci_device_create(self, dev_config):
606 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
607 % scrub_password(dev_config))
609 if not self.info.is_hvm():
610 raise VmError("hvm_pci_device_create called on non-HVM guest")
612 #all the PCI devs share one conf node
613 devid = '0'
615 new_dev = dev_config['devs'][0]
616 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
618 #check conflict before trigger hotplug event
619 if dev_info is not None:
620 dev_uuid = sxp.child_value(dev_info, 'uuid')
621 pci_conf = self.info['devices'][dev_uuid][1]
622 pci_devs = pci_conf['devs']
623 for x in pci_devs:
624 if x.has_key('vslot'):
625 x_vslot = x['vslot']
626 else:
627 x_vslot = x['requested_vslot']
628 if (int(x_vslot, 16) == int(new_dev['requested_vslot'], 16) and
629 int(x_vslot, 16) != AUTO_PHP_SLOT):
630 raise VmError("vslot %s already have a device." % (new_dev['requested_vslot']))
632 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
633 int(x['bus'], 16) == int(new_dev['bus'], 16) and
634 int(x['slot'], 16) == int(new_dev['slot'], 16) and
635 int(x['func'], 16) == int(new_dev['func'], 16) ):
636 raise VmError("device is already inserted")
638 # Test whether the devices can be assigned with VT-d
639 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
640 new_dev['bus'],
641 new_dev['slot'],
642 new_dev['func'])
643 bdf = xc.test_assign_device(0, pci_str)
644 if bdf != 0:
645 if bdf == -1:
646 raise VmError("failed to assign device: maybe the platform"
647 " doesn't support VT-d, or VT-d isn't enabled"
648 " properly?")
649 bus = (bdf >> 16) & 0xff
650 devfn = (bdf >> 8) & 0xff
651 dev = (devfn >> 3) & 0x1f
652 func = devfn & 0x7
653 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
654 " already been assigned to other domain, or maybe"
655 " it doesn't exist." % (bus, dev, func))
657 # Here, we duplicate some checkings (in some cases, we mustn't allow
658 # a device to be hot-plugged into an HVM guest) that are also done in
659 # pci_device_configure()'s self.device_create(dev_sxp) or
660 # dev_control.reconfigureDevice(devid, dev_config).
661 # We must make the checkings before sending the command 'pci-ins' to
662 # ioemu.
664 # Test whether the device is owned by pciback. For instance, we can't
665 # hotplug a device being used by Dom0 itself to an HVM guest.
666 from xen.xend.server.pciif import PciDevice, parse_pci_name
667 domain = int(new_dev['domain'],16)
668 bus = int(new_dev['bus'],16)
669 dev = int(new_dev['slot'],16)
670 func = int(new_dev['func'],16)
671 try:
672 pci_device = PciDevice(domain, bus, dev, func)
673 except Exception, e:
674 raise VmError("pci: failed to locate device and "+
675 "parse it's resources - "+str(e))
676 if pci_device.driver!='pciback':
677 raise VmError(("pci: PCI Backend does not own device "+ \
678 "%s\n"+ \
679 "See the pciback.hide kernel "+ \
680 "command-line parameter or\n"+ \
681 "bind your slot/device to the PCI backend using sysfs" \
682 )%(pci_device.name))
684 # Check non-page-aligned MMIO BAR.
685 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
686 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
687 pci_device.name)
689 # Check the co-assignment.
690 # To pci-attach a device D to domN, we should ensure each of D's
691 # co-assignment devices hasn't been assigned, or has been assigned to
692 # domN.
693 coassignment_list = pci_device.find_coassigned_devices()
694 assigned_pci_device_str_list = self._get_assigned_pci_devices()
695 for pci_str in coassignment_list:
696 (domain, bus, dev, func) = parse_pci_name(pci_str)
697 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
698 if xc.test_assign_device(0, dev_str) == 0:
699 continue
700 if not pci_str in assigned_pci_device_str_list:
701 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
702 " because one of its co-assignment device %s has been" + \
703 " assigned to other domain." \
704 )% (pci_device.name, self.info['name_label'], pci_str))
706 if self.domid is not None:
707 opts = ''
708 if 'opts' in new_dev and len(new_dev['opts']) > 0:
709 config_opts = new_dev['opts']
710 config_opts = map(lambda (x, y): x+'='+y, config_opts)
711 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
713 bdf_str = "%s:%s:%s.%s@%s%s" % (new_dev['domain'],
714 new_dev['bus'],
715 new_dev['slot'],
716 new_dev['func'],
717 new_dev['requested_vslot'],
718 opts)
719 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
721 vslot = xstransact.Read("/local/domain/0/device-model/%i/parameter"
722 % self.getDomid())
723 else:
724 vslot = new_dev['requested_vslot']
726 return vslot
729 def device_create(self, dev_config):
730 """Create a new device.
732 @param dev_config: device configuration
733 @type dev_config: SXP object (parsed config)
734 """
735 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
736 dev_type = sxp.name(dev_config)
737 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
738 dev_config_dict = self.info['devices'][dev_uuid][1]
739 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
741 if dev_type == 'vif':
742 for x in dev_config:
743 if x != 'vif' and x[0] == 'mac':
744 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
745 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
746 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
748 if self.domid is not None:
749 try:
750 dev_config_dict['devid'] = devid = \
751 self._createDevice(dev_type, dev_config_dict)
752 self._waitForDevice(dev_type, devid)
753 except VmError, ex:
754 del self.info['devices'][dev_uuid]
755 if dev_type == 'pci':
756 for dev in dev_config_dict['devs']:
757 XendAPIStore.deregister(dev['uuid'], 'DPCI')
758 elif dev_type == 'vscsi':
759 for dev in dev_config_dict['devs']:
760 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
761 elif dev_type == 'tap':
762 self.info['vbd_refs'].remove(dev_uuid)
763 else:
764 self.info['%s_refs' % dev_type].remove(dev_uuid)
765 raise ex
766 else:
767 devid = None
769 xen.xend.XendDomain.instance().managed_config_save(self)
770 return self.getDeviceController(dev_type).sxpr(devid)
773 def pci_device_configure(self, dev_sxp, devid = 0):
774 """Configure an existing pci device.
776 @param dev_sxp: device configuration
777 @type dev_sxp: SXP object (parsed config)
778 @param devid: device id
779 @type devid: int
780 @return: Returns True if successfully updated device
781 @rtype: boolean
782 """
783 log.debug("XendDomainInfo.pci_device_configure: %s"
784 % scrub_password(dev_sxp))
786 dev_class = sxp.name(dev_sxp)
788 if dev_class != 'pci':
789 return False
791 pci_state = sxp.child_value(dev_sxp, 'state')
792 existing_dev_info = self._getDeviceInfo_pci(devid)
794 if existing_dev_info is None and pci_state != 'Initialising':
795 raise XendError("Cannot detach when pci platform does not exist")
797 pci_dev = sxp.children(dev_sxp, 'dev')[0]
798 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
799 dev = dev_config['devs'][0]
801 # Do HVM specific processing
802 if self.info.is_hvm():
803 if pci_state == 'Initialising':
804 # HVM PCI device attachment
805 vslot = self.hvm_pci_device_create(dev_config)
806 # Update vslot
807 dev['vslot'] = vslot
808 for n in sxp.children(pci_dev):
809 if(n[0] == 'vslot'):
810 n[1] = vslot
811 else:
812 # HVM PCI device detachment
813 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
814 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
815 existing_pci_devs = existing_pci_conf['devs']
816 vslot = AUTO_PHP_SLOT_STR
817 for x in existing_pci_devs:
818 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
819 int(x['bus'], 16) == int(dev['bus'], 16) and
820 int(x['slot'], 16) == int(dev['slot'], 16) and
821 int(x['func'], 16) == int(dev['func'], 16) ):
822 if x.has_key('vslot'):
823 vslot = x['vslot']
824 else:
825 vslot = x['requested_vslot']
826 break
827 if vslot == AUTO_PHP_SLOT_STR:
828 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
829 % (int(dev['domain'],16), int(dev['bus'],16),
830 int(dev['slot'],16), int(dev['func'],16)))
831 self.hvm_destroyPCIDevice(int(vslot, 16))
832 # Update vslot
833 dev['vslot'] = vslot
834 for n in sxp.children(pci_dev):
835 if(n[0] == 'vslot'):
836 n[1] = vslot
838 # If pci platform does not exist, create and exit.
839 if existing_dev_info is None:
840 self.device_create(dev_sxp)
841 return True
843 if self.domid is not None:
844 # use DevController.reconfigureDevice to change device config
845 dev_control = self.getDeviceController(dev_class)
846 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
847 if not self.info.is_hvm():
848 # in PV case, wait until backend state becomes connected.
849 dev_control.waitForDevice_reconfigure(devid)
850 num_devs = dev_control.cleanupDevice(devid)
852 # update XendConfig with new device info
853 if dev_uuid:
854 new_dev_sxp = dev_control.configuration(devid)
855 self.info.device_update(dev_uuid, new_dev_sxp)
857 # If there is no device left, destroy pci and remove config.
858 if num_devs == 0:
859 if self.info.is_hvm():
860 self.destroyDevice('pci', devid, True)
861 del self.info['devices'][dev_uuid]
862 platform = self.info['platform']
863 orig_dev_num = len(platform['pci'])
864 # TODO: can use this to keep some info to ask high level
865 # management tools to hot insert a new passthrough dev
866 # after migration
867 if orig_dev_num != 0:
868 #platform['pci'] = ["%dDEVs" % orig_dev_num]
869 platform['pci'] = []
870 else:
871 self.destroyDevice('pci', devid)
872 del self.info['devices'][dev_uuid]
873 else:
874 new_dev_sxp = ['pci']
875 for cur_dev in sxp.children(existing_dev_info, 'dev'):
876 if pci_state == 'Closing':
877 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
878 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
879 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
880 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
881 continue
882 new_dev_sxp.append(cur_dev)
884 if pci_state == 'Initialising':
885 for new_dev in sxp.children(dev_sxp, 'dev'):
886 new_dev_sxp.append(new_dev)
888 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
889 self.info.device_update(dev_uuid, new_dev_sxp)
891 # If there is only 'vscsi' in new_dev_sxp, remove the config.
892 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
893 del self.info['devices'][dev_uuid]
894 if self.info.is_hvm():
895 platform = self.info['platform']
896 orig_dev_num = len(platform['pci'])
897 # TODO: can use this to keep some info to ask high level
898 # management tools to hot insert a new passthrough dev
899 # after migration
900 if orig_dev_num != 0:
901 #platform['pci'] = ["%dDEVs" % orig_dev_num]
902 platform['pci'] = []
904 xen.xend.XendDomain.instance().managed_config_save(self)
906 return True
908 def vscsi_device_configure(self, dev_sxp):
909 """Configure an existing vscsi device.
910 quoted pci funciton
911 """
912 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
913 if not dev_info:
914 return False
915 for dev in sxp.children(dev_info, 'dev'):
916 if p_devs is not None:
917 if sxp.child_value(dev, 'p-dev') in p_devs:
918 return True
919 if v_devs is not None:
920 if sxp.child_value(dev, 'v-dev') in v_devs:
921 return True
922 return False
924 def _vscsi_be(be):
925 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
926 if be_xdi is not None:
927 be_domid = be_xdi.getDomid()
928 if be_domid is not None:
929 return str(be_domid)
930 return str(be)
932 dev_class = sxp.name(dev_sxp)
933 if dev_class != 'vscsi':
934 return False
936 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
937 devs = dev_config['devs']
938 v_devs = [d['v-dev'] for d in devs]
939 state = devs[0]['state']
940 req_devid = int(devs[0]['devid'])
941 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
943 if state == xenbusState['Initialising']:
944 # new create
945 # If request devid does not exist, create and exit.
946 p_devs = [d['p-dev'] for d in devs]
947 for dev_type, dev_info in self.info.all_devices_sxpr():
948 if dev_type != 'vscsi':
949 continue
950 if _is_vscsi_defined(dev_info, p_devs = p_devs):
951 raise XendError('The physical device "%s" is already defined' % \
952 p_devs[0])
953 if cur_dev_sxp is None:
954 self.device_create(dev_sxp)
955 return True
957 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
958 raise XendError('The virtual device "%s" is already defined' % \
959 v_devs[0])
961 if int(dev_config['feature-host']) != \
962 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
963 raise XendError('The physical device "%s" cannot define '
964 'because mode is different' % devs[0]['p-dev'])
966 new_be = dev_config.get('backend', None)
967 if new_be is not None:
968 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
969 if cur_be is None:
970 cur_be = xen.xend.XendDomain.DOM0_ID
971 new_be_dom = _vscsi_be(new_be)
972 cur_be_dom = _vscsi_be(cur_be)
973 if new_be_dom != cur_be_dom:
974 raise XendError('The physical device "%s" cannot define '
975 'because backend is different' % devs[0]['p-dev'])
977 elif state == xenbusState['Closing']:
978 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
979 raise XendError("Cannot detach vscsi device does not exist")
981 if self.domid is not None:
982 # use DevController.reconfigureDevice to change device config
983 dev_control = self.getDeviceController(dev_class)
984 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
985 dev_control.waitForDevice_reconfigure(req_devid)
986 num_devs = dev_control.cleanupDevice(req_devid)
988 # update XendConfig with new device info
989 if dev_uuid:
990 new_dev_sxp = dev_control.configuration(req_devid)
991 self.info.device_update(dev_uuid, new_dev_sxp)
993 # If there is no device left, destroy vscsi and remove config.
994 if num_devs == 0:
995 self.destroyDevice('vscsi', req_devid)
996 del self.info['devices'][dev_uuid]
998 else:
999 new_dev_sxp = ['vscsi']
1000 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1001 new_dev_sxp.append(cur_mode)
1002 try:
1003 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1004 new_dev_sxp.append(cur_be)
1005 except IndexError:
1006 pass
1008 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1009 if state == xenbusState['Closing']:
1010 if int(cur_mode[1]) == 1:
1011 continue
1012 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1013 continue
1014 new_dev_sxp.append(cur_dev)
1016 if state == xenbusState['Initialising']:
1017 for new_dev in sxp.children(dev_sxp, 'dev'):
1018 new_dev_sxp.append(new_dev)
1020 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1021 self.info.device_update(dev_uuid, new_dev_sxp)
1023 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1024 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1025 del self.info['devices'][dev_uuid]
1027 xen.xend.XendDomain.instance().managed_config_save(self)
1029 return True
1031 def device_configure(self, dev_sxp, devid = None):
1032 """Configure an existing device.
1034 @param dev_config: device configuration
1035 @type dev_config: SXP object (parsed config)
1036 @param devid: device id
1037 @type devid: int
1038 @return: Returns True if successfully updated device
1039 @rtype: boolean
1040 """
1042 # convert device sxp to a dict
1043 dev_class = sxp.name(dev_sxp)
1044 dev_config = {}
1046 if dev_class == 'pci':
1047 return self.pci_device_configure(dev_sxp)
1049 if dev_class == 'vscsi':
1050 return self.vscsi_device_configure(dev_sxp)
1052 for opt_val in dev_sxp[1:]:
1053 try:
1054 dev_config[opt_val[0]] = opt_val[1]
1055 except IndexError:
1056 pass
1058 dev_control = self.getDeviceController(dev_class)
1059 if devid is None:
1060 dev = dev_config.get('dev', '')
1061 if not dev:
1062 raise VmError('Block device must have virtual details specified')
1063 if 'ioemu:' in dev:
1064 (_, dev) = dev.split(':', 1)
1065 try:
1066 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1067 except ValueError:
1068 pass
1069 devid = dev_control.convertToDeviceNumber(dev)
1070 dev_info = self._getDeviceInfo_vbd(devid)
1071 if dev_info is None:
1072 raise VmError("Device %s not connected" % devid)
1073 dev_uuid = sxp.child_value(dev_info, 'uuid')
1075 if self.domid is not None:
1076 # use DevController.reconfigureDevice to change device config
1077 dev_control.reconfigureDevice(devid, dev_config)
1078 else:
1079 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1080 if (new_f['device-type'] == 'cdrom' and
1081 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1082 new_b['mode'] == 'r' and
1083 sxp.child_value(dev_info, 'mode') == 'r'):
1084 pass
1085 else:
1086 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1087 (dev_class, devid, dev_config))
1089 # update XendConfig with new device info
1090 self.info.device_update(dev_uuid, dev_sxp)
1091 xen.xend.XendDomain.instance().managed_config_save(self)
1093 return True
1095 def waitForDevices(self):
1096 """Wait for this domain's configured devices to connect.
1098 @raise VmError: if any device fails to initialise.
1099 """
1100 for devclass in XendDevices.valid_devices():
1101 self.getDeviceController(devclass).waitForDevices()
1103 def hvm_destroyPCIDevice(self, vslot):
1104 log.debug("hvm_destroyPCIDevice called %s", vslot)
1106 if not self.info.is_hvm():
1107 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1109 #all the PCI devs share one conf node
1110 devid = '0'
1111 vslot = int(vslot)
1112 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1113 dev_uuid = sxp.child_value(dev_info, 'uuid')
1115 #delete the pci bdf config under the pci device
1116 pci_conf = self.info['devices'][dev_uuid][1]
1117 pci_len = len(pci_conf['devs'])
1119 #find the pass-through device with the virtual slot
1120 devnum = 0
1121 for x in pci_conf['devs']:
1122 if x.has_key('vslot'):
1123 x_vslot = x['vslot']
1124 else:
1125 x_vslot = x['requested_vslot']
1126 if int(x_vslot, 16) == vslot:
1127 break
1128 devnum += 1
1130 if devnum >= pci_len:
1131 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1133 # Check the co-assignment.
1134 # To pci-detach a device D from domN, we should ensure: for each DD in the
1135 # list of D's co-assignment devices, DD is not assigned (to domN).
1137 from xen.xend.server.pciif import PciDevice
1138 domain = int(x['domain'],16)
1139 bus = int(x['bus'],16)
1140 dev = int(x['slot'],16)
1141 func = int(x['func'],16)
1142 try:
1143 pci_device = PciDevice(domain, bus, dev, func)
1144 except Exception, e:
1145 raise VmError("pci: failed to locate device and "+
1146 "parse it's resources - "+str(e))
1147 coassignment_list = pci_device.find_coassigned_devices()
1148 coassignment_list.remove(pci_device.name)
1149 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1150 for pci_str in coassignment_list:
1151 if pci_str in assigned_pci_device_str_list:
1152 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1153 " because one of its co-assignment device %s is still " + \
1154 " assigned to the domain." \
1155 )% (pci_device.name, self.info['name_label'], pci_str))
1158 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1159 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1161 if self.domid is not None:
1162 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1164 return 0
1166 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1167 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1168 deviceClass, devid)
1170 if rm_cfg:
1171 # Convert devid to device number. A device number is
1172 # needed to remove its configuration.
1173 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1175 # Save current sxprs. A device number and a backend
1176 # path are needed to remove its configuration but sxprs
1177 # do not have those after calling destroyDevice.
1178 sxprs = self.getDeviceSxprs(deviceClass)
1180 rc = None
1181 if self.domid is not None:
1182 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1183 if not force and rm_cfg:
1184 # The backend path, other than the device itself,
1185 # has to be passed because its accompanied frontend
1186 # path may be void until its removal is actually
1187 # issued. It is probable because destroyDevice is
1188 # issued first.
1189 for dev_num, dev_info in sxprs:
1190 dev_num = int(dev_num)
1191 if dev_num == dev:
1192 for x in dev_info:
1193 if x[0] == 'backend':
1194 backend = x[1]
1195 break
1196 break
1197 self._waitForDevice_destroy(deviceClass, devid, backend)
1199 if rm_cfg:
1200 if deviceClass == 'vif':
1201 if self.domid is not None:
1202 for dev_num, dev_info in sxprs:
1203 dev_num = int(dev_num)
1204 if dev_num == dev:
1205 for x in dev_info:
1206 if x[0] == 'mac':
1207 mac = x[1]
1208 break
1209 break
1210 dev_info = self._getDeviceInfo_vif(mac)
1211 else:
1212 _, dev_info = sxprs[dev]
1213 else: # 'vbd' or 'tap'
1214 dev_info = self._getDeviceInfo_vbd(dev)
1215 # To remove the UUID of the device from refs,
1216 # deviceClass must be always 'vbd'.
1217 deviceClass = 'vbd'
1218 if dev_info is None:
1219 raise XendError("Device %s is not defined" % devid)
1221 dev_uuid = sxp.child_value(dev_info, 'uuid')
1222 del self.info['devices'][dev_uuid]
1223 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1224 xen.xend.XendDomain.instance().managed_config_save(self)
1226 return rc
1228 def getDeviceSxprs(self, deviceClass):
1229 if deviceClass == 'pci':
1230 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1231 if dev_info is None:
1232 return []
1233 dev_uuid = sxp.child_value(dev_info, 'uuid')
1234 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1235 return pci_devs
1236 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1237 return self.getDeviceController(deviceClass).sxprs()
1238 else:
1239 sxprs = []
1240 dev_num = 0
1241 for dev_type, dev_info in self.info.all_devices_sxpr():
1242 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1243 (deviceClass != 'vbd' and dev_type != deviceClass):
1244 continue
1246 if deviceClass == 'vscsi':
1247 vscsi_devs = ['devs', []]
1248 for vscsi_dev in sxp.children(dev_info, 'dev'):
1249 vscsi_dev.append(['frontstate', None])
1250 vscsi_devs[1].append(vscsi_dev)
1251 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1252 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1253 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1254 elif deviceClass == 'vbd':
1255 dev = sxp.child_value(dev_info, 'dev')
1256 if 'ioemu:' in dev:
1257 (_, dev) = dev.split(':', 1)
1258 try:
1259 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1260 except ValueError:
1261 dev_name = dev
1262 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1263 sxprs.append([dev_num, dev_info])
1264 else:
1265 sxprs.append([dev_num, dev_info])
1266 dev_num += 1
1267 return sxprs
1269 def getBlockDeviceClass(self, devid):
1270 # To get a device number from the devid,
1271 # we temporarily use the device controller of VBD.
1272 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1273 dev_info = self._getDeviceInfo_vbd(dev)
1274 if dev_info:
1275 return dev_info[0]
1277 def _getDeviceInfo_vif(self, mac):
1278 for dev_type, dev_info in self.info.all_devices_sxpr():
1279 if dev_type != 'vif':
1280 continue
1281 if mac == sxp.child_value(dev_info, 'mac'):
1282 return dev_info
1284 def _getDeviceInfo_vbd(self, devid):
1285 for dev_type, dev_info in self.info.all_devices_sxpr():
1286 if dev_type != 'vbd' and dev_type != 'tap':
1287 continue
1288 dev = sxp.child_value(dev_info, 'dev')
1289 dev = dev.split(':')[0]
1290 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1291 if devid == dev:
1292 return dev_info
1294 def _getDeviceInfo_pci(self, devid):
1295 for dev_type, dev_info in self.info.all_devices_sxpr():
1296 if dev_type != 'pci':
1297 continue
1298 return dev_info
1299 return None
1301 def _getDeviceInfo_vscsi(self, devid):
1302 devid = int(devid)
1303 for dev_type, dev_info in self.info.all_devices_sxpr():
1304 if dev_type != 'vscsi':
1305 continue
1306 devs = sxp.children(dev_info, 'dev')
1307 if devid == int(sxp.child_value(devs[0], 'devid')):
1308 return dev_info
1309 return None
1311 def _get_assigned_pci_devices(self, devid = 0):
1312 if self.domid is not None:
1313 return get_assigned_pci_devices(self.domid)
1315 dev_str_list = []
1316 dev_info = self._getDeviceInfo_pci(devid)
1317 if dev_info is None:
1318 return dev_str_list
1319 dev_uuid = sxp.child_value(dev_info, 'uuid')
1320 pci_conf = self.info['devices'][dev_uuid][1]
1321 pci_devs = pci_conf['devs']
1322 for pci_dev in pci_devs:
1323 domain = int(pci_dev['domain'], 16)
1324 bus = int(pci_dev['bus'], 16)
1325 slot = int(pci_dev['slot'], 16)
1326 func = int(pci_dev['func'], 16)
1327 dev_str = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
1328 dev_str_list = dev_str_list + [dev_str]
1329 return dev_str_list
1331 def setMemoryTarget(self, target):
1332 """Set the memory target of this domain.
1333 @param target: In MiB.
1334 """
1335 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1336 self.info['name_label'], str(self.domid), target)
1338 MiB = 1024 * 1024
1339 memory_cur = self.get_memory_dynamic_max() / MiB
1341 if self.domid == 0:
1342 dom0_min_mem = xoptions.get_dom0_min_mem()
1343 if target < memory_cur and dom0_min_mem > target:
1344 raise XendError("memory_dynamic_max too small")
1346 self._safe_set_memory('memory_dynamic_min', target * MiB)
1347 self._safe_set_memory('memory_dynamic_max', target * MiB)
1349 if self.domid >= 0:
1350 if target > memory_cur:
1351 balloon.free((target - memory_cur) * 1024, self)
1352 self.storeVm("memory", target)
1353 self.storeDom("memory/target", target << 10)
1354 xc.domain_set_target_mem(self.domid,
1355 (target * 1024))
1356 xen.xend.XendDomain.instance().managed_config_save(self)
1358 def setMemoryMaximum(self, limit):
1359 """Set the maximum memory limit of this domain
1360 @param limit: In MiB.
1361 """
1362 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1363 self.info['name_label'], str(self.domid), limit)
1365 maxmem_cur = self.get_memory_static_max()
1366 MiB = 1024 * 1024
1367 self._safe_set_memory('memory_static_max', limit * MiB)
1369 if self.domid >= 0:
1370 maxmem = int(limit) * 1024
1371 try:
1372 return xc.domain_setmaxmem(self.domid, maxmem)
1373 except Exception, ex:
1374 self._safe_set_memory('memory_static_max', maxmem_cur)
1375 raise XendError(str(ex))
1376 xen.xend.XendDomain.instance().managed_config_save(self)
1379 def getVCPUInfo(self):
1380 try:
1381 # We include the domain name and ID, to help xm.
1382 sxpr = ['domain',
1383 ['domid', self.domid],
1384 ['name', self.info['name_label']],
1385 ['vcpu_count', self.info['VCPUs_max']]]
1387 for i in range(0, self.info['VCPUs_max']):
1388 if self.domid is not None:
1389 info = xc.vcpu_getinfo(self.domid, i)
1391 sxpr.append(['vcpu',
1392 ['number', i],
1393 ['online', info['online']],
1394 ['blocked', info['blocked']],
1395 ['running', info['running']],
1396 ['cpu_time', info['cpu_time'] / 1e9],
1397 ['cpu', info['cpu']],
1398 ['cpumap', info['cpumap']]])
1399 else:
1400 sxpr.append(['vcpu',
1401 ['number', i],
1402 ['online', 0],
1403 ['blocked', 0],
1404 ['running', 0],
1405 ['cpu_time', 0.0],
1406 ['cpu', -1],
1407 ['cpumap', self.info['cpus'][i] and \
1408 self.info['cpus'][i] or range(64)]])
1410 return sxpr
1412 except RuntimeError, exn:
1413 raise XendError(str(exn))
1416 def getDomInfo(self):
1417 return dom_get(self.domid)
1420 # internal functions ... TODO: re-categorised
1423 def _augmentInfo(self, priv):
1424 """Augment self.info, as given to us through L{recreate}, with
1425 values taken from the store. This recovers those values known
1426 to xend but not to the hypervisor.
1427 """
1428 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1429 if priv:
1430 augment_entries.remove('memory')
1431 augment_entries.remove('maxmem')
1432 augment_entries.remove('vcpus')
1433 augment_entries.remove('vcpu_avail')
1435 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1436 for k in augment_entries])
1438 # make returned lists into a dictionary
1439 vm_config = dict(zip(augment_entries, vm_config))
1441 for arg in augment_entries:
1442 val = vm_config[arg]
1443 if val != None:
1444 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1445 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1446 self.info[xapiarg] = val
1447 elif arg == "memory":
1448 self.info["static_memory_min"] = val
1449 elif arg == "maxmem":
1450 self.info["static_memory_max"] = val
1451 else:
1452 self.info[arg] = val
1454 # read CPU Affinity
1455 self.info['cpus'] = []
1456 vcpus_info = self.getVCPUInfo()
1457 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1458 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1460 # For dom0, we ignore any stored value for the vcpus fields, and
1461 # read the current value from Xen instead. This allows boot-time
1462 # settings to take precedence over any entries in the store.
1463 if priv:
1464 xeninfo = dom_get(self.domid)
1465 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1466 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1468 # read image value
1469 image_sxp = self._readVm('image')
1470 if image_sxp:
1471 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1473 # read devices
1474 devices = []
1475 for devclass in XendDevices.valid_devices():
1476 devconfig = self.getDeviceController(devclass).configurations()
1477 if devconfig:
1478 devices.extend(devconfig)
1480 if not self.info['devices'] and devices is not None:
1481 for device in devices:
1482 self.info.device_add(device[0], cfg_sxp = device)
1484 self._update_consoles()
1486 def _update_consoles(self, transaction = None):
1487 if self.domid == None or self.domid == 0:
1488 return
1490 # Update VT100 port if it exists
1491 if transaction is None:
1492 self.console_port = self.readDom('console/port')
1493 else:
1494 self.console_port = self.readDomTxn(transaction, 'console/port')
1495 if self.console_port is not None:
1496 serial_consoles = self.info.console_get_all('vt100')
1497 if not serial_consoles:
1498 cfg = self.info.console_add('vt100', self.console_port)
1499 self._createDevice('console', cfg)
1500 else:
1501 console_uuid = serial_consoles[0].get('uuid')
1502 self.info.console_update(console_uuid, 'location',
1503 self.console_port)
1506 # Update VNC port if it exists and write to xenstore
1507 if transaction is None:
1508 vnc_port = self.readDom('console/vnc-port')
1509 else:
1510 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1511 if vnc_port is not None:
1512 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1513 if dev_type == 'vfb':
1514 old_location = dev_info.get('location')
1515 listen_host = dev_info.get('vnclisten', \
1516 XendOptions.instance().get_vnclisten_address())
1517 new_location = '%s:%s' % (listen_host, str(vnc_port))
1518 if old_location == new_location:
1519 break
1521 dev_info['location'] = new_location
1522 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1523 vfb_ctrl = self.getDeviceController('vfb')
1524 vfb_ctrl.reconfigureDevice(0, dev_info)
1525 break
1528 # Function to update xenstore /vm/*
1531 def _readVm(self, *args):
1532 return xstransact.Read(self.vmpath, *args)
1534 def _writeVm(self, *args):
1535 return xstransact.Write(self.vmpath, *args)
1537 def _removeVm(self, *args):
1538 return xstransact.Remove(self.vmpath, *args)
1540 def _gatherVm(self, *args):
1541 return xstransact.Gather(self.vmpath, *args)
1543 def _listRecursiveVm(self, *args):
1544 return xstransact.ListRecursive(self.vmpath, *args)
1546 def storeVm(self, *args):
1547 return xstransact.Store(self.vmpath, *args)
1549 def permissionsVm(self, *args):
1550 return xstransact.SetPermissions(self.vmpath, *args)
1553 # Function to update xenstore /dom/*
1556 def readDom(self, *args):
1557 return xstransact.Read(self.dompath, *args)
1559 def gatherDom(self, *args):
1560 return xstransact.Gather(self.dompath, *args)
1562 def _writeDom(self, *args):
1563 return xstransact.Write(self.dompath, *args)
1565 def _removeDom(self, *args):
1566 return xstransact.Remove(self.dompath, *args)
1568 def storeDom(self, *args):
1569 return xstransact.Store(self.dompath, *args)
1572 def readDomTxn(self, transaction, *args):
1573 paths = map(lambda x: self.dompath + "/" + x, args)
1574 return transaction.read(*paths)
1576 def gatherDomTxn(self, transaction, *args):
1577 paths = map(lambda x: self.dompath + "/" + x, args)
1578 return transaction.gather(*paths)
1580 def _writeDomTxn(self, transaction, *args):
1581 paths = map(lambda x: self.dompath + "/" + x, args)
1582 return transaction.write(*paths)
1584 def _removeDomTxn(self, transaction, *args):
1585 paths = map(lambda x: self.dompath + "/" + x, args)
1586 return transaction.remove(*paths)
1588 def storeDomTxn(self, transaction, *args):
1589 paths = map(lambda x: self.dompath + "/" + x, args)
1590 return transaction.store(*paths)
1593 def _recreateDom(self):
1594 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1596 def _recreateDomFunc(self, t):
1597 t.remove()
1598 t.mkdir()
1599 t.set_permissions({'dom' : self.domid, 'read' : True})
1600 t.write('vm', self.vmpath)
1601 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1602 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1603 t.mkdir(i)
1604 t.set_permissions(i, {'dom' : self.domid})
1606 def _storeDomDetails(self):
1607 to_store = {
1608 'domid': str(self.domid),
1609 'vm': self.vmpath,
1610 'name': self.info['name_label'],
1611 'console/limit': str(xoptions.get_console_limit() * 1024),
1612 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1615 def f(n, v):
1616 if v is not None:
1617 if type(v) == bool:
1618 to_store[n] = v and "1" or "0"
1619 else:
1620 to_store[n] = str(v)
1622 # Figure out if we need to tell xenconsoled to ignore this guest's
1623 # console - device model will handle console if it is running
1624 constype = "ioemu"
1625 if 'device_model' not in self.info['platform']:
1626 constype = "xenconsoled"
1628 f('console/port', self.console_port)
1629 f('console/ring-ref', self.console_mfn)
1630 f('console/type', constype)
1631 f('store/port', self.store_port)
1632 f('store/ring-ref', self.store_mfn)
1634 if arch.type == "x86":
1635 f('control/platform-feature-multiprocessor-suspend', True)
1637 # elfnotes
1638 for n, v in self.info.get_notes().iteritems():
1639 n = n.lower().replace('_', '-')
1640 if n == 'features':
1641 for v in v.split('|'):
1642 v = v.replace('_', '-')
1643 if v.startswith('!'):
1644 f('image/%s/%s' % (n, v[1:]), False)
1645 else:
1646 f('image/%s/%s' % (n, v), True)
1647 else:
1648 f('image/%s' % n, v)
1650 if self.info.has_key('security_label'):
1651 f('security_label', self.info['security_label'])
1653 to_store.update(self._vcpuDomDetails())
1655 log.debug("Storing domain details: %s", scrub_password(to_store))
1657 self._writeDom(to_store)
1659 def _vcpuDomDetails(self):
1660 def availability(n):
1661 if self.info['vcpu_avail'] & (1 << n):
1662 return 'online'
1663 else:
1664 return 'offline'
1666 result = {}
1667 for v in range(0, self.info['VCPUs_max']):
1668 result["cpu/%d/availability" % v] = availability(v)
1669 return result
1672 # xenstore watches
1675 def _registerWatches(self):
1676 """Register a watch on this VM's entries in the store, and the
1677 domain's control/shutdown node, so that when they are changed
1678 externally, we keep up to date. This should only be called by {@link
1679 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1680 details have been written, but before the new instance is returned."""
1681 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1682 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1683 self._handleShutdownWatch)
1685 def _storeChanged(self, _):
1686 log.trace("XendDomainInfo.storeChanged");
1688 changed = False
1690 # Check whether values in the configuration have
1691 # changed in Xenstore.
1693 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1694 'rtc/timeoffset']
1696 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1697 for k in cfg_vm])
1699 # convert two lists into a python dictionary
1700 vm_details = dict(zip(cfg_vm, vm_details))
1702 for arg, val in vm_details.items():
1703 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1704 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1705 if val != None and val != self.info[xapiarg]:
1706 self.info[xapiarg] = val
1707 changed = True
1708 elif arg == "memory":
1709 if val != None and val != self.info["static_memory_min"]:
1710 self.info["static_memory_min"] = val
1711 changed = True
1712 elif arg == "maxmem":
1713 if val != None and val != self.info["static_memory_max"]:
1714 self.info["static_memory_max"] = val
1715 changed = True
1717 # Check whether image definition has been updated
1718 image_sxp = self._readVm('image')
1719 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1720 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1721 changed = True
1723 # Update the rtc_timeoffset to be preserved across reboot.
1724 # NB. No need to update xenstore domain section.
1725 val = int(vm_details.get("rtc/timeoffset", 0))
1726 self.info["platform"]["rtc_timeoffset"] = val
1728 if changed:
1729 # Update the domain section of the store, as this contains some
1730 # parameters derived from the VM configuration.
1731 self.refresh_shutdown_lock.acquire()
1732 try:
1733 state = self._stateGet()
1734 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1735 self._storeDomDetails()
1736 finally:
1737 self.refresh_shutdown_lock.release()
1739 return 1
1741 def _handleShutdownWatch(self, _):
1742 log.debug('XendDomainInfo.handleShutdownWatch')
1744 reason = self.readDom('control/shutdown')
1746 if reason and reason != 'suspend':
1747 sst = self.readDom('xend/shutdown_start_time')
1748 now = time.time()
1749 if sst:
1750 self.shutdownStartTime = float(sst)
1751 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1752 else:
1753 self.shutdownStartTime = now
1754 self.storeDom('xend/shutdown_start_time', now)
1755 timeout = SHUTDOWN_TIMEOUT
1757 log.trace(
1758 "Scheduling refreshShutdown on domain %d in %ds.",
1759 self.domid, timeout)
1760 threading.Timer(timeout, self.refreshShutdown).start()
1762 return True
1766 # Public Attributes for the VM
1770 def getDomid(self):
1771 return self.domid
1773 def setName(self, name, to_store = True):
1774 self._checkName(name)
1775 self.info['name_label'] = name
1776 if to_store:
1777 self.storeVm("name", name)
1779 def getName(self):
1780 return self.info['name_label']
1782 def getDomainPath(self):
1783 return self.dompath
1785 def getShutdownReason(self):
1786 return self.readDom('control/shutdown')
1788 def getStorePort(self):
1789 """For use only by image.py and XendCheckpoint.py."""
1790 return self.store_port
1792 def getConsolePort(self):
1793 """For use only by image.py and XendCheckpoint.py"""
1794 return self.console_port
1796 def getFeatures(self):
1797 """For use only by image.py."""
1798 return self.info['features']
1800 def getVCpuCount(self):
1801 return self.info['VCPUs_max']
1803 def setVCpuCount(self, vcpus):
1804 def vcpus_valid(n):
1805 if vcpus <= 0:
1806 raise XendError('Zero or less VCPUs is invalid')
1807 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1808 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1809 vcpus_valid(vcpus)
1811 self.info['vcpu_avail'] = (1 << vcpus) - 1
1812 if self.domid >= 0:
1813 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1814 self._writeDom(self._vcpuDomDetails())
1815 self.info['VCPUs_live'] = vcpus
1816 else:
1817 if self.info['VCPUs_max'] > vcpus:
1818 # decreasing
1819 del self.info['cpus'][vcpus:]
1820 elif self.info['VCPUs_max'] < vcpus:
1821 # increasing
1822 for c in range(self.info['VCPUs_max'], vcpus):
1823 self.info['cpus'].append(list())
1824 self.info['VCPUs_max'] = vcpus
1825 xen.xend.XendDomain.instance().managed_config_save(self)
1826 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1827 vcpus)
1829 def getMemoryTarget(self):
1830 """Get this domain's target memory size, in KB."""
1831 return self.info['memory_dynamic_max'] / 1024
1833 def getMemoryMaximum(self):
1834 """Get this domain's maximum memory size, in KB."""
1835 # remember, info now stores memory in bytes
1836 return self.info['memory_static_max'] / 1024
1838 def getResume(self):
1839 return str(self._resume)
1841 def setResume(self, isresume):
1842 self._resume = isresume
1844 def getCpus(self):
1845 return self.info['cpus']
1847 def setCpus(self, cpumap):
1848 self.info['cpus'] = cpumap
1850 def getCap(self):
1851 return self.info['vcpus_params']['cap']
1853 def setCap(self, cpu_cap):
1854 self.info['vcpus_params']['cap'] = cpu_cap
1856 def getWeight(self):
1857 return self.info['vcpus_params']['weight']
1859 def setWeight(self, cpu_weight):
1860 self.info['vcpus_params']['weight'] = cpu_weight
1862 def getRestartCount(self):
1863 return self._readVm('xend/restart_count')
1865 def refreshShutdown(self, xeninfo = None):
1866 """ Checks the domain for whether a shutdown is required.
1868 Called from XendDomainInfo and also image.py for HVM images.
1869 """
1871 # If set at the end of this method, a restart is required, with the
1872 # given reason. This restart has to be done out of the scope of
1873 # refresh_shutdown_lock.
1874 restart_reason = None
1876 self.refresh_shutdown_lock.acquire()
1877 try:
1878 if xeninfo is None:
1879 xeninfo = dom_get(self.domid)
1880 if xeninfo is None:
1881 # The domain no longer exists. This will occur if we have
1882 # scheduled a timer to check for shutdown timeouts and the
1883 # shutdown succeeded. It will also occur if someone
1884 # destroys a domain beneath us. We clean up the domain,
1885 # just in case, but we can't clean up the VM, because that
1886 # VM may have migrated to a different domain on this
1887 # machine.
1888 self.cleanupDomain()
1889 self._stateSet(DOM_STATE_HALTED)
1890 return
1892 if xeninfo['dying']:
1893 # Dying means that a domain has been destroyed, but has not
1894 # yet been cleaned up by Xen. This state could persist
1895 # indefinitely if, for example, another domain has some of its
1896 # pages mapped. We might like to diagnose this problem in the
1897 # future, but for now all we do is make sure that it's not us
1898 # holding the pages, by calling cleanupDomain. We can't
1899 # clean up the VM, as above.
1900 self.cleanupDomain()
1901 self._stateSet(DOM_STATE_SHUTDOWN)
1902 return
1904 elif xeninfo['crashed']:
1905 if self.readDom('xend/shutdown_completed'):
1906 # We've seen this shutdown already, but we are preserving
1907 # the domain for debugging. Leave it alone.
1908 return
1910 log.warn('Domain has crashed: name=%s id=%d.',
1911 self.info['name_label'], self.domid)
1912 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1914 restart_reason = 'crash'
1915 self._stateSet(DOM_STATE_HALTED)
1917 elif xeninfo['shutdown']:
1918 self._stateSet(DOM_STATE_SHUTDOWN)
1919 if self.readDom('xend/shutdown_completed'):
1920 # We've seen this shutdown already, but we are preserving
1921 # the domain for debugging. Leave it alone.
1922 return
1924 else:
1925 reason = shutdown_reason(xeninfo['shutdown_reason'])
1927 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1928 self.info['name_label'], self.domid, reason)
1929 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1931 self._clearRestart()
1933 if reason == 'suspend':
1934 self._stateSet(DOM_STATE_SUSPENDED)
1935 # Don't destroy the domain. XendCheckpoint will do
1936 # this once it has finished. However, stop watching
1937 # the VM path now, otherwise we will end up with one
1938 # watch for the old domain, and one for the new.
1939 self._unwatchVm()
1940 elif reason in ('poweroff', 'reboot'):
1941 restart_reason = reason
1942 else:
1943 self.destroy()
1945 elif self.dompath is None:
1946 # We have yet to manage to call introduceDomain on this
1947 # domain. This can happen if a restore is in progress, or has
1948 # failed. Ignore this domain.
1949 pass
1950 else:
1951 # Domain is alive. If we are shutting it down, log a message
1952 # if it seems unresponsive.
1953 if xeninfo['paused']:
1954 self._stateSet(DOM_STATE_PAUSED)
1955 else:
1956 self._stateSet(DOM_STATE_RUNNING)
1958 if self.shutdownStartTime:
1959 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1960 self.shutdownStartTime)
1961 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1962 log.info(
1963 "Domain shutdown timeout expired: name=%s id=%s",
1964 self.info['name_label'], self.domid)
1965 self.storeDom('xend/unresponsive', 'True')
1966 finally:
1967 self.refresh_shutdown_lock.release()
1969 if restart_reason and not self.restart_in_progress:
1970 self.restart_in_progress = True
1971 threading.Thread(target = self._maybeRestart,
1972 args = (restart_reason,)).start()
1976 # Restart functions - handling whether we come back up on shutdown.
1979 def _clearRestart(self):
1980 self._removeDom("xend/shutdown_start_time")
1982 def _maybeDumpCore(self, reason):
1983 if reason == 'crash':
1984 if xoptions.get_enable_dump() or self.get_on_crash() \
1985 in ['coredump_and_destroy', 'coredump_and_restart']:
1986 try:
1987 self.dumpCore()
1988 except XendError:
1989 # This error has been logged -- there's nothing more
1990 # we can do in this context.
1991 pass
1993 def _maybeRestart(self, reason):
1994 # Before taking configured action, dump core if configured to do so.
1996 self._maybeDumpCore(reason)
1998 # Dispatch to the correct method based upon the configured on_{reason}
1999 # behaviour.
2000 actions = {"destroy" : self.destroy,
2001 "restart" : self._restart,
2002 "preserve" : self._preserve,
2003 "rename-restart" : self._renameRestart,
2004 "coredump-destroy" : self.destroy,
2005 "coredump-restart" : self._restart}
2007 action_conf = {
2008 'poweroff': 'actions_after_shutdown',
2009 'reboot': 'actions_after_reboot',
2010 'crash': 'actions_after_crash',
2013 action_target = self.info.get(action_conf.get(reason))
2014 func = actions.get(action_target, None)
2015 if func and callable(func):
2016 func()
2017 else:
2018 self.destroy() # default to destroy
2020 def _renameRestart(self):
2021 self._restart(True)
2023 def _restart(self, rename = False):
2024 """Restart the domain after it has exited.
2026 @param rename True if the old domain is to be renamed and preserved,
2027 False if it is to be destroyed.
2028 """
2029 from xen.xend import XendDomain
2031 if self._readVm(RESTART_IN_PROGRESS):
2032 log.error('Xend failed during restart of domain %s. '
2033 'Refusing to restart to avoid loops.',
2034 str(self.domid))
2035 self.destroy()
2036 return
2038 old_domid = self.domid
2039 self._writeVm(RESTART_IN_PROGRESS, 'True')
2041 elapse = time.time() - self.info['start_time']
2042 if elapse < MINIMUM_RESTART_TIME:
2043 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2044 'Refusing to restart to avoid loops.',
2045 self.info['name_label'], elapse)
2046 self.destroy()
2047 return
2049 prev_vm_xend = self._listRecursiveVm('xend')
2050 new_dom_info = self.info
2051 try:
2052 if rename:
2053 new_dom_info = self._preserveForRestart()
2054 else:
2055 self._unwatchVm()
2056 self.destroy()
2058 # new_dom's VM will be the same as this domain's VM, except where
2059 # the rename flag has instructed us to call preserveForRestart.
2060 # In that case, it is important that we remove the
2061 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2062 # once the new one is available.
2064 new_dom = None
2065 try:
2066 new_dom = XendDomain.instance().domain_create_from_dict(
2067 new_dom_info)
2068 for x in prev_vm_xend[0][1]:
2069 new_dom._writeVm('xend/%s' % x[0], x[1])
2070 new_dom.waitForDevices()
2071 new_dom.unpause()
2072 rst_cnt = new_dom._readVm('xend/restart_count')
2073 rst_cnt = int(rst_cnt) + 1
2074 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2075 new_dom._removeVm(RESTART_IN_PROGRESS)
2076 except:
2077 if new_dom:
2078 new_dom._removeVm(RESTART_IN_PROGRESS)
2079 new_dom.destroy()
2080 else:
2081 self._removeVm(RESTART_IN_PROGRESS)
2082 raise
2083 except:
2084 log.exception('Failed to restart domain %s.', str(old_domid))
2086 def _preserveForRestart(self):
2087 """Preserve a domain that has been shut down, by giving it a new UUID,
2088 cloning the VM details, and giving it a new name. This allows us to
2089 keep this domain for debugging, but restart a new one in its place
2090 preserving the restart semantics (name and UUID preserved).
2091 """
2093 new_uuid = uuid.createString()
2094 new_name = 'Domain-%s' % new_uuid
2095 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2096 self.info['name_label'], self.domid, self.info['uuid'],
2097 new_name, new_uuid)
2098 self._unwatchVm()
2099 self._releaseDevices()
2100 # Remove existing vm node in xenstore
2101 self._removeVm()
2102 new_dom_info = self.info.copy()
2103 new_dom_info['name_label'] = self.info['name_label']
2104 new_dom_info['uuid'] = self.info['uuid']
2105 self.info['name_label'] = new_name
2106 self.info['uuid'] = new_uuid
2107 self.vmpath = XS_VMROOT + new_uuid
2108 # Write out new vm node to xenstore
2109 self._storeVmDetails()
2110 self._preserve()
2111 return new_dom_info
2114 def _preserve(self):
2115 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2116 self.domid)
2117 self._unwatchVm()
2118 self.storeDom('xend/shutdown_completed', 'True')
2119 self._stateSet(DOM_STATE_HALTED)
2122 # Debugging ..
2125 def dumpCore(self, corefile = None):
2126 """Create a core dump for this domain.
2128 @raise: XendError if core dumping failed.
2129 """
2131 if not corefile:
2132 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2133 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2134 self.info['name_label'], self.domid)
2136 if os.path.isdir(corefile):
2137 raise XendError("Cannot dump core in a directory: %s" %
2138 corefile)
2140 try:
2141 try:
2142 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2143 xc.domain_dumpcore(self.domid, corefile)
2144 except RuntimeError, ex:
2145 corefile_incomp = corefile+'-incomplete'
2146 try:
2147 os.rename(corefile, corefile_incomp)
2148 except:
2149 pass
2151 log.error("core dump failed: id = %s name = %s: %s",
2152 self.domid, self.info['name_label'], str(ex))
2153 raise XendError("Failed to dump core: %s" % str(ex))
2154 finally:
2155 self._removeVm(DUMPCORE_IN_PROGRESS)
2158 # Device creation/deletion functions
2161 def _createDevice(self, deviceClass, devConfig):
2162 return self.getDeviceController(deviceClass).createDevice(devConfig)
2164 def _waitForDevice(self, deviceClass, devid):
2165 return self.getDeviceController(deviceClass).waitForDevice(devid)
2167 def _waitForDeviceUUID(self, dev_uuid):
2168 deviceClass, config = self.info['devices'].get(dev_uuid)
2169 self._waitForDevice(deviceClass, config['devid'])
2171 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2172 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2173 devid, backpath)
2175 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2176 return self.getDeviceController(deviceClass).reconfigureDevice(
2177 devid, devconfig)
2179 def _createDevices(self):
2180 """Create the devices for a vm.
2182 @raise: VmError for invalid devices
2183 """
2184 if self.image:
2185 self.image.prepareEnvironment()
2187 vscsi_uuidlist = {}
2188 vscsi_devidlist = []
2189 ordered_refs = self.info.ordered_device_refs()
2190 for dev_uuid in ordered_refs:
2191 devclass, config = self.info['devices'][dev_uuid]
2192 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2193 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2194 dev_uuid = config.get('uuid')
2195 devid = self._createDevice(devclass, config)
2197 # store devid in XendConfig for caching reasons
2198 if dev_uuid in self.info['devices']:
2199 self.info['devices'][dev_uuid][1]['devid'] = devid
2201 elif devclass == 'vscsi':
2202 vscsi_config = config.get('devs', [])[0]
2203 devid = vscsi_config.get('devid', '')
2204 dev_uuid = config.get('uuid')
2205 vscsi_uuidlist[devid] = dev_uuid
2206 vscsi_devidlist.append(devid)
2208 #It is necessary to sorted it for /dev/sdxx in guest.
2209 if len(vscsi_uuidlist) > 0:
2210 vscsi_devidlist.sort()
2211 for vscsiid in vscsi_devidlist:
2212 dev_uuid = vscsi_uuidlist[vscsiid]
2213 devclass, config = self.info['devices'][dev_uuid]
2214 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2215 dev_uuid = config.get('uuid')
2216 devid = self._createDevice(devclass, config)
2217 # store devid in XendConfig for caching reasons
2218 if dev_uuid in self.info['devices']:
2219 self.info['devices'][dev_uuid][1]['devid'] = devid
2222 if self.image:
2223 self.image.createDeviceModel()
2225 #if have pass-through devs, need the virtual pci slots info from qemu
2226 self.sync_pcidev_info()
2228 def _releaseDevices(self, suspend = False):
2229 """Release all domain's devices. Nothrow guarantee."""
2230 if self.image:
2231 try:
2232 log.debug("Destroying device model")
2233 self.image.destroyDeviceModel()
2234 except Exception, e:
2235 log.exception("Device model destroy failed %s" % str(e))
2236 else:
2237 log.debug("No device model")
2239 log.debug("Releasing devices")
2240 t = xstransact("%s/device" % self.dompath)
2241 try:
2242 for devclass in XendDevices.valid_devices():
2243 for dev in t.list(devclass):
2244 try:
2245 true_devclass = devclass
2246 if devclass == 'vbd':
2247 # In the case of "vbd", the true device class
2248 # may possibly be "tap". Just in case, verify
2249 # device class.
2250 devid = dev.split('/')[-1]
2251 true_devclass = self.getBlockDeviceClass(devid)
2252 log.debug("Removing %s", dev);
2253 self.destroyDevice(true_devclass, dev, False);
2254 except:
2255 # Log and swallow any exceptions in removal --
2256 # there's nothing more we can do.
2257 log.exception("Device release failed: %s; %s; %s",
2258 self.info['name_label'],
2259 true_devclass, dev)
2260 finally:
2261 t.abort()
2263 def getDeviceController(self, name):
2264 """Get the device controller for this domain, and if it
2265 doesn't exist, create it.
2267 @param name: device class name
2268 @type name: string
2269 @rtype: subclass of DevController
2270 """
2271 if name not in self._deviceControllers:
2272 devController = XendDevices.make_controller(name, self)
2273 if not devController:
2274 raise XendError("Unknown device type: %s" % name)
2275 self._deviceControllers[name] = devController
2277 return self._deviceControllers[name]
2280 # Migration functions (public)
2283 def testMigrateDevices(self, network, dst):
2284 """ Notify all device about intention of migration
2285 @raise: XendError for a device that cannot be migrated
2286 """
2287 for (n, c) in self.info.all_devices_sxpr():
2288 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2289 if rc != 0:
2290 raise XendError("Device of type '%s' refuses migration." % n)
2292 def migrateDevices(self, network, dst, step, domName=''):
2293 """Notify the devices about migration
2294 """
2295 ctr = 0
2296 try:
2297 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2298 self.migrateDevice(dev_type, dev_conf, network, dst,
2299 step, domName)
2300 ctr = ctr + 1
2301 except:
2302 for dev_type, dev_conf in self.info.all_devices_sxpr():
2303 if ctr == 0:
2304 step = step - 1
2305 ctr = ctr - 1
2306 self._recoverMigrateDevice(dev_type, dev_conf, network,
2307 dst, step, domName)
2308 raise
2310 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2311 step, domName=''):
2312 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2313 network, dst, step, domName)
2315 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2316 dst, step, domName=''):
2317 return self.getDeviceController(deviceClass).recover_migrate(
2318 deviceConfig, network, dst, step, domName)
2321 ## private:
2323 def _constructDomain(self):
2324 """Construct the domain.
2326 @raise: VmError on error
2327 """
2329 log.debug('XendDomainInfo.constructDomain')
2331 self.shutdownStartTime = None
2332 self.restart_in_progress = False
2334 hap = 0
2335 hvm = self.info.is_hvm()
2336 if hvm:
2337 hap = self.info.is_hap()
2338 info = xc.xeninfo()
2339 if 'hvm' not in info['xen_caps']:
2340 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2341 "supported by your CPU and enabled in your "
2342 "BIOS?")
2344 # Hack to pre-reserve some memory for initial domain creation.
2345 # There is an implicit memory overhead for any domain creation. This
2346 # overhead is greater for some types of domain than others. For
2347 # example, an x86 HVM domain will have a default shadow-pagetable
2348 # allocation of 1MB. We free up 4MB here to be on the safe side.
2349 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2350 balloon.free(4*1024, self) # 4MB should be plenty
2352 ssidref = 0
2353 if security.on() == xsconstants.XS_POLICY_USE:
2354 ssidref = security.calc_dom_ssidref_from_info(self.info)
2355 if security.has_authorization(ssidref) == False:
2356 raise VmError("VM is not authorized to run.")
2358 s3_integrity = 0
2359 if self.info.has_key('s3_integrity'):
2360 s3_integrity = self.info['s3_integrity']
2361 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2363 try:
2364 self.domid = xc.domain_create(
2365 domid = 0,
2366 ssidref = ssidref,
2367 handle = uuid.fromString(self.info['uuid']),
2368 flags = flags,
2369 target = self.info.target())
2370 except Exception, e:
2371 # may get here if due to ACM the operation is not permitted
2372 if security.on() == xsconstants.XS_POLICY_ACM:
2373 raise VmError('Domain in conflict set with running domain?')
2375 if self.domid < 0:
2376 raise VmError('Creating domain failed: name=%s' %
2377 self.info['name_label'])
2379 self.dompath = GetDomainPath(self.domid)
2381 self._recreateDom()
2383 # Set timer configration of domain
2384 timer_mode = self.info["platform"].get("timer_mode")
2385 if hvm and timer_mode is not None:
2386 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2387 long(timer_mode))
2389 # Set Viridian interface configuration of domain
2390 viridian = self.info["platform"].get("viridian")
2391 if arch.type == "x86" and hvm and viridian is not None:
2392 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2394 # Optionally enable virtual HPET
2395 hpet = self.info["platform"].get("hpet")
2396 if hvm and hpet is not None:
2397 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2398 long(hpet))
2400 # Optionally enable periodic vpt aligning
2401 vpt_align = self.info["platform"].get("vpt_align")
2402 if hvm and vpt_align is not None:
2403 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2404 long(vpt_align))
2406 # Set maximum number of vcpus in domain
2407 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2409 # Check for cpu_{cap|weight} validity for credit scheduler
2410 if XendNode.instance().xenschedinfo() == 'credit':
2411 cap = self.getCap()
2412 weight = self.getWeight()
2414 assert type(weight) == int
2415 assert type(cap) == int
2417 if weight < 1 or weight > 65535:
2418 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2420 if cap < 0 or cap > self.getVCpuCount() * 100:
2421 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2422 (self.getVCpuCount() * 100))
2424 # Test whether the devices can be assigned with VT-d
2425 pci = self.info["platform"].get("pci")
2426 pci_str = ''
2427 if pci and len(pci) > 0:
2428 pci = map(lambda x: x[0:4], pci) # strip options
2429 pci_str = str(pci)
2430 if hvm and pci_str:
2431 bdf = xc.test_assign_device(0, pci_str)
2432 if bdf != 0:
2433 if bdf == -1:
2434 raise VmError("failed to assign device: maybe the platform"
2435 " doesn't support VT-d, or VT-d isn't enabled"
2436 " properly?")
2437 bus = (bdf >> 16) & 0xff
2438 devfn = (bdf >> 8) & 0xff
2439 dev = (devfn >> 3) & 0x1f
2440 func = devfn & 0x7
2441 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2442 " already been assigned to other domain, or maybe"
2443 " it doesn't exist." % (bus, dev, func))
2445 # register the domain in the list
2446 from xen.xend import XendDomain
2447 XendDomain.instance().add_domain(self)
2449 def _introduceDomain(self):
2450 assert self.domid is not None
2451 assert self.store_mfn is not None
2452 assert self.store_port is not None
2454 try:
2455 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2456 except RuntimeError, exn:
2457 raise XendError(str(exn))
2459 def _setTarget(self, target):
2460 assert self.domid is not None
2462 try:
2463 SetTarget(self.domid, target)
2464 self.storeDom('target', target)
2465 except RuntimeError, exn:
2466 raise XendError(str(exn))
2469 def _setCPUAffinity(self):
2470 """ Repin domain vcpus if a restricted cpus list is provided
2471 """
2473 def has_cpus():
2474 if self.info['cpus'] is not None:
2475 for c in self.info['cpus']:
2476 if c:
2477 return True
2478 return False
2480 if has_cpus():
2481 for v in range(0, self.info['VCPUs_max']):
2482 if self.info['cpus'][v]:
2483 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2484 else:
2485 def find_relaxed_node(node_list):
2486 import sys
2487 nr_nodes = info['nr_nodes']
2488 if node_list is None:
2489 node_list = range(0, nr_nodes)
2490 nodeload = [0]
2491 nodeload = nodeload * nr_nodes
2492 from xen.xend import XendDomain
2493 doms = XendDomain.instance().list('all')
2494 for dom in filter (lambda d: d.domid != self.domid, doms):
2495 cpuinfo = dom.getVCPUInfo()
2496 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2497 if sxp.child_value(vcpu, 'online') == 0: continue
2498 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2499 for i in range(0, nr_nodes):
2500 node_cpumask = info['node_to_cpu'][i]
2501 for j in node_cpumask:
2502 if j in cpumap:
2503 nodeload[i] += 1
2504 break
2505 for i in range(0, nr_nodes):
2506 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2507 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2508 else:
2509 nodeload[i] = sys.maxint
2510 index = nodeload.index( min(nodeload) )
2511 return index
2513 info = xc.physinfo()
2514 if info['nr_nodes'] > 1:
2515 node_memory_list = info['node_to_memory']
2516 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2517 candidate_node_list = []
2518 for i in range(0, info['nr_nodes']):
2519 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2520 candidate_node_list.append(i)
2521 index = find_relaxed_node(candidate_node_list)
2522 cpumask = info['node_to_cpu'][index]
2523 for v in range(0, self.info['VCPUs_max']):
2524 xc.vcpu_setaffinity(self.domid, v, cpumask)
2527 def _initDomain(self):
2528 log.debug('XendDomainInfo.initDomain: %s %s',
2529 self.domid,
2530 self.info['vcpus_params']['weight'])
2532 self._configureBootloader()
2534 try:
2535 self.image = image.create(self, self.info)
2537 # repin domain vcpus if a restricted cpus list is provided
2538 # this is done prior to memory allocation to aide in memory
2539 # distribution for NUMA systems.
2540 self._setCPUAffinity()
2542 # Use architecture- and image-specific calculations to determine
2543 # the various headrooms necessary, given the raw configured
2544 # values. maxmem, memory, and shadow are all in KiB.
2545 # but memory_static_max etc are all stored in bytes now.
2546 memory = self.image.getRequiredAvailableMemory(
2547 self.info['memory_dynamic_max'] / 1024)
2548 maxmem = self.image.getRequiredAvailableMemory(
2549 self.info['memory_static_max'] / 1024)
2550 shadow = self.image.getRequiredShadowMemory(
2551 self.info['shadow_memory'] * 1024,
2552 self.info['memory_static_max'] / 1024)
2554 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2555 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2556 # takes MiB and we must not round down and end up under-providing.
2557 shadow = ((shadow + 1023) / 1024) * 1024
2559 # set memory limit
2560 xc.domain_setmaxmem(self.domid, maxmem)
2562 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2563 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2564 # Round vtd_mem up to a multiple of a MiB.
2565 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2567 # Make sure there's enough RAM available for the domain
2568 balloon.free(memory + shadow + vtd_mem, self)
2570 # Set up the shadow memory
2571 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2572 self.info['shadow_memory'] = shadow_cur
2574 # machine address size
2575 if self.info.has_key('machine_address_size'):
2576 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2577 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2579 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2580 log.debug("_initDomain: suppressing spurious page faults")
2581 xc.domain_suppress_spurious_page_faults(self.domid)
2583 self._createChannels()
2585 channel_details = self.image.createImage()
2587 self.store_mfn = channel_details['store_mfn']
2588 if 'console_mfn' in channel_details:
2589 self.console_mfn = channel_details['console_mfn']
2590 if 'notes' in channel_details:
2591 self.info.set_notes(channel_details['notes'])
2592 if 'native_protocol' in channel_details:
2593 self.native_protocol = channel_details['native_protocol'];
2595 self._introduceDomain()
2596 if self.info.target():
2597 self._setTarget(self.info.target())
2599 self._createDevices()
2601 self.image.cleanupBootloading()
2603 self.info['start_time'] = time.time()
2605 self._stateSet(DOM_STATE_RUNNING)
2606 except VmError, exn:
2607 log.exception("XendDomainInfo.initDomain: exception occurred")
2608 if self.image:
2609 self.image.cleanupBootloading()
2610 raise exn
2611 except RuntimeError, exn:
2612 log.exception("XendDomainInfo.initDomain: exception occurred")
2613 if self.image:
2614 self.image.cleanupBootloading()
2615 raise VmError(str(exn))
2618 def cleanupDomain(self):
2619 """Cleanup domain resources; release devices. Idempotent. Nothrow
2620 guarantee."""
2622 self.refresh_shutdown_lock.acquire()
2623 try:
2624 self.unwatchShutdown()
2625 self._releaseDevices()
2626 bootloader_tidy(self)
2628 if self.image:
2629 self.image = None
2631 try:
2632 self._removeDom()
2633 except:
2634 log.exception("Removing domain path failed.")
2636 self._stateSet(DOM_STATE_HALTED)
2637 self.domid = None # Do not push into _stateSet()!
2638 finally:
2639 self.refresh_shutdown_lock.release()
2642 def unwatchShutdown(self):
2643 """Remove the watch on the domain's control/shutdown node, if any.
2644 Idempotent. Nothrow guarantee. Expects to be protected by the
2645 refresh_shutdown_lock."""
2647 try:
2648 try:
2649 if self.shutdownWatch:
2650 self.shutdownWatch.unwatch()
2651 finally:
2652 self.shutdownWatch = None
2653 except:
2654 log.exception("Unwatching control/shutdown failed.")
2656 def waitForShutdown(self):
2657 self.state_updated.acquire()
2658 try:
2659 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2660 self.state_updated.wait(timeout=1.0)
2661 finally:
2662 self.state_updated.release()
2664 def waitForSuspend(self):
2665 """Wait for the guest to respond to a suspend request by
2666 shutting down. If the guest hasn't re-written control/shutdown
2667 after a certain amount of time, it's obviously not listening and
2668 won't suspend, so we give up. HVM guests with no PV drivers
2669 should already be shutdown.
2670 """
2671 state = "suspend"
2672 nr_tries = 60
2674 self.state_updated.acquire()
2675 try:
2676 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2677 self.state_updated.wait(1.0)
2678 if state == "suspend":
2679 if nr_tries == 0:
2680 msg = ('Timeout waiting for domain %s to suspend'
2681 % self.domid)
2682 self._writeDom('control/shutdown', '')
2683 raise XendError(msg)
2684 state = self.readDom('control/shutdown')
2685 nr_tries -= 1
2686 finally:
2687 self.state_updated.release()
2690 # TODO: recategorise - called from XendCheckpoint
2693 def completeRestore(self, store_mfn, console_mfn):
2695 log.debug("XendDomainInfo.completeRestore")
2697 self.store_mfn = store_mfn
2698 self.console_mfn = console_mfn
2700 self._introduceDomain()
2701 self.image = image.create(self, self.info)
2702 if self.image:
2703 self.image.createDeviceModel(True)
2704 self._storeDomDetails()
2705 self._registerWatches()
2706 self.refreshShutdown()
2708 log.debug("XendDomainInfo.completeRestore done")
2711 def _endRestore(self):
2712 self.setResume(False)
2715 # VM Destroy
2718 def _prepare_phantom_paths(self):
2719 # get associated devices to destroy
2720 # build list of phantom devices to be removed after normal devices
2721 plist = []
2722 if self.domid is not None:
2723 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2724 try:
2725 for dev in t.list():
2726 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2727 % (self.dompath, dev))
2728 if backend_phantom_vbd is not None:
2729 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2730 % backend_phantom_vbd)
2731 plist.append(backend_phantom_vbd)
2732 plist.append(frontend_phantom_vbd)
2733 finally:
2734 t.abort()
2735 return plist
2737 def _cleanup_phantom_devs(self, plist):
2738 # remove phantom devices
2739 if not plist == []:
2740 time.sleep(2)
2741 for paths in plist:
2742 if paths.find('backend') != -1:
2743 # Modify online status /before/ updating state (latter is watched by
2744 # drivers, so this ordering avoids a race).
2745 xstransact.Write(paths, 'online', "0")
2746 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2747 # force
2748 xstransact.Remove(paths)
2750 def destroy(self):
2751 """Cleanup VM and destroy domain. Nothrow guarantee."""
2753 if self.domid is None:
2754 return
2756 from xen.xend import XendDomain
2757 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2759 paths = self._prepare_phantom_paths()
2761 if self.dompath is not None:
2762 try:
2763 xc.domain_destroy_hook(self.domid)
2764 xc.domain_pause(self.domid)
2765 do_FLR(self.domid)
2766 xc.domain_destroy(self.domid)
2767 for state in DOM_STATES_OLD:
2768 self.info[state] = 0
2769 self._stateSet(DOM_STATE_HALTED)
2770 except:
2771 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2773 XendDomain.instance().remove_domain(self)
2774 self.cleanupDomain()
2776 self._cleanup_phantom_devs(paths)
2777 self._cleanupVm()
2779 if "transient" in self.info["other_config"] \
2780 and bool(self.info["other_config"]["transient"]):
2781 XendDomain.instance().domain_delete_by_dominfo(self)
2784 def resetDomain(self):
2785 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2787 old_domid = self.domid
2788 prev_vm_xend = self._listRecursiveVm('xend')
2789 new_dom_info = self.info
2790 try:
2791 self._unwatchVm()
2792 self.destroy()
2794 new_dom = None
2795 try:
2796 from xen.xend import XendDomain
2797 new_dom_info['domid'] = None
2798 new_dom = XendDomain.instance().domain_create_from_dict(
2799 new_dom_info)
2800 for x in prev_vm_xend[0][1]:
2801 new_dom._writeVm('xend/%s' % x[0], x[1])
2802 new_dom.waitForDevices()
2803 new_dom.unpause()
2804 except:
2805 if new_dom:
2806 new_dom.destroy()
2807 raise
2808 except:
2809 log.exception('Failed to reset domain %s.', str(old_domid))
2812 def resumeDomain(self):
2813 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2815 # resume a suspended domain (e.g. after live checkpoint, or after
2816 # a later error during save or migate); checks that the domain
2817 # is currently suspended first so safe to call from anywhere
2819 xeninfo = dom_get(self.domid)
2820 if xeninfo is None:
2821 return
2822 if not xeninfo['shutdown']:
2823 return
2824 reason = shutdown_reason(xeninfo['shutdown_reason'])
2825 if reason != 'suspend':
2826 return
2828 try:
2829 # could also fetch a parsed note from xenstore
2830 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2831 if not fast:
2832 self._releaseDevices()
2833 self.testDeviceComplete()
2834 self.testvifsComplete()
2835 log.debug("XendDomainInfo.resumeDomain: devices released")
2837 self._resetChannels()
2839 self._removeDom('control/shutdown')
2840 self._removeDom('device-misc/vif/nextDeviceID')
2842 self._createChannels()
2843 self._introduceDomain()
2844 self._storeDomDetails()
2846 self._createDevices()
2847 log.debug("XendDomainInfo.resumeDomain: devices created")
2849 xc.domain_resume(self.domid, fast)
2850 ResumeDomain(self.domid)
2851 except:
2852 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2853 self.image.resumeDeviceModel()
2854 log.debug("XendDomainInfo.resumeDomain: completed")
2858 # Channels for xenstore and console
2861 def _createChannels(self):
2862 """Create the channels to the domain.
2863 """
2864 self.store_port = self._createChannel()
2865 self.console_port = self._createChannel()
2868 def _createChannel(self):
2869 """Create an event channel to the domain.
2870 """
2871 try:
2872 if self.domid != None:
2873 return xc.evtchn_alloc_unbound(domid = self.domid,
2874 remote_dom = 0)
2875 except:
2876 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2877 raise
2879 def _resetChannels(self):
2880 """Reset all event channels in the domain.
2881 """
2882 try:
2883 if self.domid != None:
2884 return xc.evtchn_reset(dom = self.domid)
2885 except:
2886 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2887 raise
2891 # Bootloader configuration
2894 def _configureBootloader(self):
2895 """Run the bootloader if we're configured to do so."""
2897 blexec = self.info['PV_bootloader']
2898 bootloader_args = self.info['PV_bootloader_args']
2899 kernel = self.info['PV_kernel']
2900 ramdisk = self.info['PV_ramdisk']
2901 args = self.info['PV_args']
2902 boot = self.info['HVM_boot_policy']
2904 if boot:
2905 # HVM booting.
2906 pass
2907 elif not blexec and kernel:
2908 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2909 # will be picked up by image.py.
2910 pass
2911 else:
2912 # Boot using bootloader
2913 if not blexec or blexec == 'pygrub':
2914 blexec = auxbin.pathTo('pygrub')
2916 blcfg = None
2917 disks = [x for x in self.info['vbd_refs']
2918 if self.info['devices'][x][1]['bootable']]
2920 if not disks:
2921 msg = "Had a bootloader specified, but no disks are bootable"
2922 log.error(msg)
2923 raise VmError(msg)
2925 devinfo = self.info['devices'][disks[0]]
2926 devtype = devinfo[0]
2927 disk = devinfo[1]['uname']
2929 fn = blkdev_uname_to_file(disk)
2930 taptype = blkdev_uname_to_taptype(disk)
2931 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2932 if mounted:
2933 # This is a file, not a device. pygrub can cope with a
2934 # file if it's raw, but if it's QCOW or other such formats
2935 # used through blktap, then we need to mount it first.
2937 log.info("Mounting %s on %s." %
2938 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2940 vbd = {
2941 'mode': 'RO',
2942 'device': BOOTLOADER_LOOPBACK_DEVICE,
2945 from xen.xend import XendDomain
2946 dom0 = XendDomain.instance().privilegedDomain()
2947 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2948 fn = BOOTLOADER_LOOPBACK_DEVICE
2950 try:
2951 blcfg = bootloader(blexec, fn, self, False,
2952 bootloader_args, kernel, ramdisk, args)
2953 finally:
2954 if mounted:
2955 log.info("Unmounting %s from %s." %
2956 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2958 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2960 if blcfg is None:
2961 msg = "Had a bootloader specified, but can't find disk"
2962 log.error(msg)
2963 raise VmError(msg)
2965 self.info.update_with_image_sxp(blcfg, True)
2969 # VM Functions
2972 def _readVMDetails(self, params):
2973 """Read the specified parameters from the store.
2974 """
2975 try:
2976 return self._gatherVm(*params)
2977 except ValueError:
2978 # One of the int/float entries in params has a corresponding store
2979 # entry that is invalid. We recover, because older versions of
2980 # Xend may have put the entry there (memory/target, for example),
2981 # but this is in general a bad situation to have reached.
2982 log.exception(
2983 "Store corrupted at %s! Domain %d's configuration may be "
2984 "affected.", self.vmpath, self.domid)
2985 return []
2987 def _cleanupVm(self):
2988 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2990 self._unwatchVm()
2992 try:
2993 self._removeVm()
2994 except:
2995 log.exception("Removing VM path failed.")
2998 def checkLiveMigrateMemory(self):
2999 """ Make sure there's enough memory to migrate this domain """
3000 overhead_kb = 0
3001 if arch.type == "x86":
3002 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3003 # the minimum that Xen would allocate if no value were given.
3004 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3005 (self.info['memory_static_max'] / 1024 / 1024) * 4
3006 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3007 # The domain might already have some shadow memory
3008 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3009 if overhead_kb > 0:
3010 balloon.free(overhead_kb, self)
3012 def _unwatchVm(self):
3013 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3014 guarantee."""
3015 try:
3016 try:
3017 if self.vmWatch:
3018 self.vmWatch.unwatch()
3019 finally:
3020 self.vmWatch = None
3021 except:
3022 log.exception("Unwatching VM path failed.")
3024 def testDeviceComplete(self):
3025 """ For Block IO migration safety we must ensure that
3026 the device has shutdown correctly, i.e. all blocks are
3027 flushed to disk
3028 """
3029 start = time.time()
3030 while True:
3031 test = 0
3032 diff = time.time() - start
3033 vbds = self.getDeviceController('vbd').deviceIDs()
3034 taps = self.getDeviceController('tap').deviceIDs()
3035 for i in vbds + taps:
3036 test = 1
3037 log.info("Dev %s still active, looping...", i)
3038 time.sleep(0.1)
3040 if test == 0:
3041 break
3042 if diff >= MIGRATE_TIMEOUT:
3043 log.info("Dev still active but hit max loop timeout")
3044 break
3046 def testvifsComplete(self):
3047 """ In case vifs are released and then created for the same
3048 domain, we need to wait the device shut down.
3049 """
3050 start = time.time()
3051 while True:
3052 test = 0
3053 diff = time.time() - start
3054 for i in self.getDeviceController('vif').deviceIDs():
3055 test = 1
3056 log.info("Dev %s still active, looping...", i)
3057 time.sleep(0.1)
3059 if test == 0:
3060 break
3061 if diff >= MIGRATE_TIMEOUT:
3062 log.info("Dev still active but hit max loop timeout")
3063 break
3065 def _storeVmDetails(self):
3066 to_store = {}
3068 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3069 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3070 if self._infoIsSet(info_key):
3071 to_store[key] = str(self.info[info_key])
3073 if self._infoIsSet("static_memory_min"):
3074 to_store["memory"] = str(self.info["static_memory_min"])
3075 if self._infoIsSet("static_memory_max"):
3076 to_store["maxmem"] = str(self.info["static_memory_max"])
3078 image_sxpr = self.info.image_sxpr()
3079 if image_sxpr:
3080 to_store['image'] = sxp.to_string(image_sxpr)
3082 if not self._readVm('xend/restart_count'):
3083 to_store['xend/restart_count'] = str(0)
3085 log.debug("Storing VM details: %s", scrub_password(to_store))
3087 self._writeVm(to_store)
3088 self._setVmPermissions()
3090 def _setVmPermissions(self):
3091 """Allow the guest domain to read its UUID. We don't allow it to
3092 access any other entry, for security."""
3093 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3094 { 'dom' : self.domid,
3095 'read' : True,
3096 'write' : False })
3099 # Utility functions
3102 def __getattr__(self, name):
3103 if name == "state":
3104 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3105 log.warn("".join(traceback.format_stack()))
3106 return self._stateGet()
3107 else:
3108 raise AttributeError(name)
3110 def __setattr__(self, name, value):
3111 if name == "state":
3112 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3113 log.warn("".join(traceback.format_stack()))
3114 self._stateSet(value)
3115 else:
3116 self.__dict__[name] = value
3118 def _stateSet(self, state):
3119 self.state_updated.acquire()
3120 try:
3121 # TODO Not sure this is correct...
3122 # _stateGet is live now. Why not fire event
3123 # even when it hasn't changed?
3124 if self._stateGet() != state:
3125 self.state_updated.notifyAll()
3126 import XendAPI
3127 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3128 'power_state')
3129 finally:
3130 self.state_updated.release()
3132 def _stateGet(self):
3133 # Lets try and reconsitute the state from xc
3134 # first lets try and get the domain info
3135 # from xc - this will tell us if the domain
3136 # exists
3137 info = dom_get(self.getDomid())
3138 if info is None or info['shutdown']:
3139 # We are either HALTED or SUSPENDED
3140 # check saved image exists
3141 from xen.xend import XendDomain
3142 managed_config_path = \
3143 XendDomain.instance()._managed_check_point_path( \
3144 self.get_uuid())
3145 if os.path.exists(managed_config_path):
3146 return XEN_API_VM_POWER_STATE_SUSPENDED
3147 else:
3148 return XEN_API_VM_POWER_STATE_HALTED
3149 elif info['crashed']:
3150 # Crashed
3151 return XEN_API_VM_POWER_STATE_CRASHED
3152 else:
3153 # We are either RUNNING or PAUSED
3154 if info['paused']:
3155 return XEN_API_VM_POWER_STATE_PAUSED
3156 else:
3157 return XEN_API_VM_POWER_STATE_RUNNING
3159 def _infoIsSet(self, name):
3160 return name in self.info and self.info[name] is not None
3162 def _checkName(self, name):
3163 """Check if a vm name is valid. Valid names contain alphabetic
3164 characters, digits, or characters in '_-.:/+'.
3165 The same name cannot be used for more than one vm at the same time.
3167 @param name: name
3168 @raise: VmError if invalid
3169 """
3170 from xen.xend import XendDomain
3172 if name is None or name == '':
3173 raise VmError('Missing VM Name')
3175 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3176 raise VmError('Invalid VM Name')
3178 dom = XendDomain.instance().domain_lookup_nr(name)
3179 if dom and dom.info['uuid'] != self.info['uuid']:
3180 raise VmError("VM name '%s' already exists%s" %
3181 (name,
3182 dom.domid is not None and
3183 (" as domain %s" % str(dom.domid)) or ""))
3186 def update(self, info = None, refresh = True, transaction = None):
3187 """Update with info from xc.domain_getinfo().
3188 """
3189 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3190 str(self.domid))
3192 if not info:
3193 info = dom_get(self.domid)
3194 if not info:
3195 return
3197 if info["maxmem_kb"] < 0:
3198 info["maxmem_kb"] = XendNode.instance() \
3199 .physinfo_dict()['total_memory'] * 1024
3201 # make sure state is reset for info
3202 # TODO: we should eventually get rid of old_dom_states
3204 self.info.update_config(info)
3205 self._update_consoles(transaction)
3207 if refresh:
3208 self.refreshShutdown(info)
3210 log.trace("XendDomainInfo.update done on domain %s: %s",
3211 str(self.domid), self.info)
3213 def sxpr(self, ignore_store = False, legacy_only = True):
3214 result = self.info.to_sxp(domain = self,
3215 ignore_devices = ignore_store,
3216 legacy_only = legacy_only)
3218 return result
3220 # Xen API
3221 # ----------------------------------------------------------------
3223 def get_uuid(self):
3224 dom_uuid = self.info.get('uuid')
3225 if not dom_uuid: # if it doesn't exist, make one up
3226 dom_uuid = uuid.createString()
3227 self.info['uuid'] = dom_uuid
3228 return dom_uuid
3230 def get_memory_static_max(self):
3231 return self.info.get('memory_static_max', 0)
3232 def get_memory_static_min(self):
3233 return self.info.get('memory_static_min', 0)
3234 def get_memory_dynamic_max(self):
3235 return self.info.get('memory_dynamic_max', 0)
3236 def get_memory_dynamic_min(self):
3237 return self.info.get('memory_dynamic_min', 0)
3239 # only update memory-related config values if they maintain sanity
3240 def _safe_set_memory(self, key, newval):
3241 oldval = self.info.get(key, 0)
3242 try:
3243 self.info[key] = newval
3244 self.info._memory_sanity_check()
3245 except Exception, ex:
3246 self.info[key] = oldval
3247 raise
3249 def set_memory_static_max(self, val):
3250 self._safe_set_memory('memory_static_max', val)
3251 def set_memory_static_min(self, val):
3252 self._safe_set_memory('memory_static_min', val)
3253 def set_memory_dynamic_max(self, val):
3254 self._safe_set_memory('memory_dynamic_max', val)
3255 def set_memory_dynamic_min(self, val):
3256 self._safe_set_memory('memory_dynamic_min', val)
3258 def get_vcpus_params(self):
3259 if self.getDomid() is None:
3260 return self.info['vcpus_params']
3262 retval = xc.sched_credit_domain_get(self.getDomid())
3263 return retval
3264 def get_power_state(self):
3265 return XEN_API_VM_POWER_STATE[self._stateGet()]
3266 def get_platform(self):
3267 return self.info.get('platform', {})
3268 def get_pci_bus(self):
3269 return self.info.get('pci_bus', '')
3270 def get_tools_version(self):
3271 return self.info.get('tools_version', {})
3272 def get_metrics(self):
3273 return self.metrics.get_uuid();
3276 def get_security_label(self, xspol=None):
3277 import xen.util.xsm.xsm as security
3278 label = security.get_security_label(self, xspol)
3279 return label
3281 def set_security_label(self, seclab, old_seclab, xspol=None,
3282 xspol_old=None):
3283 """
3284 Set the security label of a domain from its old to
3285 a new value.
3286 @param seclab New security label formatted in the form
3287 <policy type>:<policy name>:<vm label>
3288 @param old_seclab The current security label that the
3289 VM must have.
3290 @param xspol An optional policy under which this
3291 update should be done. If not given,
3292 then the current active policy is used.
3293 @param xspol_old The old policy; only to be passed during
3294 the updating of a policy
3295 @return Returns return code, a string with errors from
3296 the hypervisor's operation, old label of the
3297 domain
3298 """
3299 rc = 0
3300 errors = ""
3301 old_label = ""
3302 new_ssidref = 0
3303 domid = self.getDomid()
3304 res_labels = None
3305 is_policy_update = (xspol_old != None)
3307 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3309 state = self._stateGet()
3310 # Relabel only HALTED or RUNNING or PAUSED domains
3311 if domid != 0 and \
3312 state not in \
3313 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3314 DOM_STATE_SUSPENDED ]:
3315 log.warn("Relabeling domain not possible in state '%s'" %
3316 DOM_STATES[state])
3317 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3319 # Remove security label. Works only for halted or suspended domains
3320 if not seclab or seclab == "":
3321 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3322 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3324 if self.info.has_key('security_label'):
3325 old_label = self.info['security_label']
3326 # Check label against expected one.
3327 if old_label != old_seclab:
3328 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3329 del self.info['security_label']
3330 xen.xend.XendDomain.instance().managed_config_save(self)
3331 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3333 tmp = seclab.split(":")
3334 if len(tmp) != 3:
3335 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3336 typ, policy, label = tmp
3338 poladmin = XSPolicyAdminInstance()
3339 if not xspol:
3340 xspol = poladmin.get_policy_by_name(policy)
3342 try:
3343 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3345 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3346 #if domain is running or paused try to relabel in hypervisor
3347 if not xspol:
3348 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3350 if typ != xspol.get_type_name() or \
3351 policy != xspol.get_name():
3352 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3354 if typ == xsconstants.ACM_POLICY_ID:
3355 new_ssidref = xspol.vmlabel_to_ssidref(label)
3356 if new_ssidref == xsconstants.INVALID_SSIDREF:
3357 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3359 # Check that all used resources are accessible under the
3360 # new label
3361 if not is_policy_update and \
3362 not security.resources_compatible_with_vmlabel(xspol,
3363 self, label):
3364 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3366 #Check label against expected one. Can only do this
3367 # if the policy hasn't changed underneath in the meantime
3368 if xspol_old == None:
3369 old_label = self.get_security_label()
3370 if old_label != old_seclab:
3371 log.info("old_label != old_seclab: %s != %s" %
3372 (old_label, old_seclab))
3373 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3375 # relabel domain in the hypervisor
3376 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3377 log.info("rc from relabeling in HV: %d" % rc)
3378 else:
3379 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3381 if rc == 0:
3382 # HALTED, RUNNING or PAUSED
3383 if domid == 0:
3384 if xspol:
3385 self.info['security_label'] = seclab
3386 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3387 else:
3388 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3389 else:
3390 if self.info.has_key('security_label'):
3391 old_label = self.info['security_label']
3392 # Check label against expected one, unless wildcard
3393 if old_label != old_seclab:
3394 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3396 self.info['security_label'] = seclab
3398 try:
3399 xen.xend.XendDomain.instance().managed_config_save(self)
3400 except:
3401 pass
3402 return (rc, errors, old_label, new_ssidref)
3403 finally:
3404 xen.xend.XendDomain.instance().policy_lock.release()
3406 def get_on_shutdown(self):
3407 after_shutdown = self.info.get('actions_after_shutdown')
3408 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3409 return XEN_API_ON_NORMAL_EXIT[-1]
3410 return after_shutdown
3412 def get_on_reboot(self):
3413 after_reboot = self.info.get('actions_after_reboot')
3414 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3415 return XEN_API_ON_NORMAL_EXIT[-1]
3416 return after_reboot
3418 def get_on_suspend(self):
3419 # TODO: not supported
3420 after_suspend = self.info.get('actions_after_suspend')
3421 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3422 return XEN_API_ON_NORMAL_EXIT[-1]
3423 return after_suspend
3425 def get_on_crash(self):
3426 after_crash = self.info.get('actions_after_crash')
3427 if not after_crash or after_crash not in \
3428 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3429 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3430 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3432 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3433 """ Get's a device configuration either from XendConfig or
3434 from the DevController.
3436 @param dev_class: device class, either, 'vbd' or 'vif'
3437 @param dev_uuid: device UUID
3439 @rtype: dictionary
3440 """
3441 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3443 # shortcut if the domain isn't started because
3444 # the devcontrollers will have no better information
3445 # than XendConfig.
3446 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3447 XEN_API_VM_POWER_STATE_SUSPENDED):
3448 if dev_config:
3449 return copy.deepcopy(dev_config)
3450 return None
3452 # instead of using dev_class, we use the dev_type
3453 # that is from XendConfig.
3454 controller = self.getDeviceController(dev_type)
3455 if not controller:
3456 return None
3458 all_configs = controller.getAllDeviceConfigurations()
3459 if not all_configs:
3460 return None
3462 updated_dev_config = copy.deepcopy(dev_config)
3463 for _devid, _devcfg in all_configs.items():
3464 if _devcfg.get('uuid') == dev_uuid:
3465 updated_dev_config.update(_devcfg)
3466 updated_dev_config['id'] = _devid
3467 return updated_dev_config
3469 return updated_dev_config
3471 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3472 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3473 if not config:
3474 return {}
3476 config['VM'] = self.get_uuid()
3478 if dev_class == 'vif':
3479 if not config.has_key('name'):
3480 config['name'] = config.get('vifname', '')
3481 if not config.has_key('MAC'):
3482 config['MAC'] = config.get('mac', '')
3483 if not config.has_key('type'):
3484 config['type'] = 'paravirtualised'
3485 if not config.has_key('device'):
3486 devid = config.get('id')
3487 if devid != None:
3488 config['device'] = 'eth%s' % devid
3489 else:
3490 config['device'] = ''
3492 if not config.has_key('network'):
3493 try:
3494 bridge = config.get('bridge', None)
3495 if bridge is None:
3496 from xen.util import Brctl
3497 if_to_br = dict([(i,b)
3498 for (b,ifs) in Brctl.get_state().items()
3499 for i in ifs])
3500 vifname = "vif%s.%s" % (self.getDomid(),
3501 config.get('id'))
3502 bridge = if_to_br.get(vifname, None)
3503 config['network'] = \
3504 XendNode.instance().bridge_to_network(
3505 config.get('bridge')).get_uuid()
3506 except Exception:
3507 log.exception('bridge_to_network')
3508 # Ignore this for now -- it may happen if the device
3509 # has been specified using the legacy methods, but at
3510 # some point we're going to have to figure out how to
3511 # handle that properly.
3513 config['MTU'] = 1500 # TODO
3515 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3516 xennode = XendNode.instance()
3517 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3518 config['io_read_kbs'] = rx_bps/1024
3519 config['io_write_kbs'] = tx_bps/1024
3520 rx, tx = xennode.get_vif_stat(self.domid, devid)
3521 config['io_total_read_kbs'] = rx/1024
3522 config['io_total_write_kbs'] = tx/1024
3523 else:
3524 config['io_read_kbs'] = 0.0
3525 config['io_write_kbs'] = 0.0
3526 config['io_total_read_kbs'] = 0.0
3527 config['io_total_write_kbs'] = 0.0
3529 config['security_label'] = config.get('security_label', '')
3531 if dev_class == 'vbd':
3533 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3534 controller = self.getDeviceController(dev_class)
3535 devid, _1, _2 = controller.getDeviceDetails(config)
3536 xennode = XendNode.instance()
3537 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3538 config['io_read_kbs'] = rd_blkps
3539 config['io_write_kbs'] = wr_blkps
3540 else:
3541 config['io_read_kbs'] = 0.0
3542 config['io_write_kbs'] = 0.0
3544 config['VDI'] = config.get('VDI', '')
3545 config['device'] = config.get('dev', '')
3546 if ':' in config['device']:
3547 vbd_name, vbd_type = config['device'].split(':', 1)
3548 config['device'] = vbd_name
3549 if vbd_type == 'cdrom':
3550 config['type'] = XEN_API_VBD_TYPE[0]
3551 else:
3552 config['type'] = XEN_API_VBD_TYPE[1]
3554 config['driver'] = 'paravirtualised' # TODO
3555 config['image'] = config.get('uname', '')
3557 if config.get('mode', 'r') == 'r':
3558 config['mode'] = 'RO'
3559 else:
3560 config['mode'] = 'RW'
3562 if dev_class == 'vtpm':
3563 if not config.has_key('type'):
3564 config['type'] = 'paravirtualised' # TODO
3565 if not config.has_key('backend'):
3566 config['backend'] = "00000000-0000-0000-0000-000000000000"
3568 return config
3570 def get_dev_property(self, dev_class, dev_uuid, field):
3571 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3572 try:
3573 return config[field]
3574 except KeyError:
3575 raise XendError('Invalid property for device: %s' % field)
3577 def set_dev_property(self, dev_class, dev_uuid, field, value):
3578 self.info['devices'][dev_uuid][1][field] = value
3580 def get_vcpus_util(self):
3581 vcpu_util = {}
3582 xennode = XendNode.instance()
3583 if 'VCPUs_max' in self.info and self.domid != None:
3584 for i in range(0, self.info['VCPUs_max']):
3585 util = xennode.get_vcpu_util(self.domid, i)
3586 vcpu_util[str(i)] = util
3588 return vcpu_util
3590 def get_consoles(self):
3591 return self.info.get('console_refs', [])
3593 def get_vifs(self):
3594 return self.info.get('vif_refs', [])
3596 def get_vbds(self):
3597 return self.info.get('vbd_refs', [])
3599 def get_vtpms(self):
3600 return self.info.get('vtpm_refs', [])
3602 def get_dpcis(self):
3603 return XendDPCI.get_by_VM(self.info.get('uuid'))
3605 def get_dscsis(self):
3606 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3608 def create_vbd(self, xenapi_vbd, vdi_image_path):
3609 """Create a VBD using a VDI from XendStorageRepository.
3611 @param xenapi_vbd: vbd struct from the Xen API
3612 @param vdi_image_path: VDI UUID
3613 @rtype: string
3614 @return: uuid of the device
3615 """
3616 xenapi_vbd['image'] = vdi_image_path
3617 if vdi_image_path.startswith('tap'):
3618 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3619 else:
3620 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3622 if not dev_uuid:
3623 raise XendError('Failed to create device')
3625 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3626 XEN_API_VM_POWER_STATE_PAUSED):
3627 _, config = self.info['devices'][dev_uuid]
3629 if vdi_image_path.startswith('tap'):
3630 dev_control = self.getDeviceController('tap')
3631 else:
3632 dev_control = self.getDeviceController('vbd')
3634 try:
3635 devid = dev_control.createDevice(config)
3636 dev_control.waitForDevice(devid)
3637 self.info.device_update(dev_uuid,
3638 cfg_xenapi = {'devid': devid})
3639 except Exception, exn:
3640 log.exception(exn)
3641 del self.info['devices'][dev_uuid]
3642 self.info['vbd_refs'].remove(dev_uuid)
3643 raise
3645 return dev_uuid
3647 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3648 """Create a VBD using a VDI from XendStorageRepository.
3650 @param xenapi_vbd: vbd struct from the Xen API
3651 @param vdi_image_path: VDI UUID
3652 @rtype: string
3653 @return: uuid of the device
3654 """
3655 xenapi_vbd['image'] = vdi_image_path
3656 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3657 if not dev_uuid:
3658 raise XendError('Failed to create device')
3660 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3661 _, config = self.info['devices'][dev_uuid]
3662 config['devid'] = self.getDeviceController('tap').createDevice(config)
3664 return config['devid']
3666 def create_vif(self, xenapi_vif):
3667 """Create VIF device from the passed struct in Xen API format.
3669 @param xenapi_vif: Xen API VIF Struct.
3670 @rtype: string
3671 @return: UUID
3672 """
3673 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3674 if not dev_uuid:
3675 raise XendError('Failed to create device')
3677 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3678 XEN_API_VM_POWER_STATE_PAUSED):
3680 _, config = self.info['devices'][dev_uuid]
3681 dev_control = self.getDeviceController('vif')
3683 try:
3684 devid = dev_control.createDevice(config)
3685 dev_control.waitForDevice(devid)
3686 self.info.device_update(dev_uuid,
3687 cfg_xenapi = {'devid': devid})
3688 except Exception, exn:
3689 log.exception(exn)
3690 del self.info['devices'][dev_uuid]
3691 self.info['vif_refs'].remove(dev_uuid)
3692 raise
3694 return dev_uuid
3696 def create_vtpm(self, xenapi_vtpm):
3697 """Create a VTPM device from the passed struct in Xen API format.
3699 @return: uuid of the device
3700 @rtype: string
3701 """
3703 if self._stateGet() not in (DOM_STATE_HALTED,):
3704 raise VmError("Can only add vTPM to a halted domain.")
3705 if self.get_vtpms() != []:
3706 raise VmError('Domain already has a vTPM.')
3707 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3708 if not dev_uuid:
3709 raise XendError('Failed to create device')
3711 return dev_uuid
3713 def create_console(self, xenapi_console):
3714 """ Create a console device from a Xen API struct.
3716 @return: uuid of device
3717 @rtype: string
3718 """
3719 if self._stateGet() not in (DOM_STATE_HALTED,):
3720 raise VmError("Can only add console to a halted domain.")
3722 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3723 if not dev_uuid:
3724 raise XendError('Failed to create device')
3726 return dev_uuid
3728 def set_console_other_config(self, console_uuid, other_config):
3729 self.info.console_update(console_uuid, 'other_config', other_config)
3731 def create_dpci(self, xenapi_pci):
3732 """Create pci device from the passed struct in Xen API format.
3734 @param xenapi_pci: DPCI struct from Xen API
3735 @rtype: bool
3736 #@rtype: string
3737 @return: True if successfully created device
3738 #@return: UUID
3739 """
3741 dpci_uuid = uuid.createString()
3743 dpci_opts = []
3744 opts_dict = xenapi_pci.get('options')
3745 for k in opts_dict.keys():
3746 dpci_opts.append([k, opts_dict[k]])
3748 # Convert xenapi to sxp
3749 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3751 target_pci_sxp = \
3752 ['pci',
3753 ['dev',
3754 ['domain', '0x%02x' % ppci.get_domain()],
3755 ['bus', '0x%02x' % ppci.get_bus()],
3756 ['slot', '0x%02x' % ppci.get_slot()],
3757 ['func', '0x%1x' % ppci.get_func()],
3758 ['vslot', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3759 ['opts', dpci_opts],
3760 ['uuid', dpci_uuid]
3761 ],
3762 ['state', 'Initialising']
3765 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3767 old_pci_sxp = self._getDeviceInfo_pci(0)
3769 if old_pci_sxp is None:
3770 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3771 if not dev_uuid:
3772 raise XendError('Failed to create device')
3774 else:
3775 new_pci_sxp = ['pci']
3776 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3777 new_pci_sxp.append(existing_dev)
3778 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3780 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3781 self.info.device_update(dev_uuid, new_pci_sxp)
3783 xen.xend.XendDomain.instance().managed_config_save(self)
3785 else:
3786 try:
3787 self.device_configure(target_pci_sxp)
3789 except Exception, exn:
3790 raise XendError('Failed to create device')
3792 return dpci_uuid
3794 def create_dscsi(self, xenapi_dscsi):
3795 """Create scsi device from the passed struct in Xen API format.
3797 @param xenapi_dscsi: DSCSI struct from Xen API
3798 @rtype: string
3799 @return: UUID
3800 """
3802 dscsi_uuid = uuid.createString()
3804 # Convert xenapi to sxp
3805 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3806 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3807 target_vscsi_sxp = \
3808 ['vscsi',
3809 ['dev',
3810 ['devid', devid],
3811 ['p-devname', pscsi.get_dev_name()],
3812 ['p-dev', pscsi.get_physical_HCTL()],
3813 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3814 ['state', xenbusState['Initialising']],
3815 ['uuid', dscsi_uuid]
3816 ],
3817 ['feature-host', 0]
3820 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3822 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3824 if cur_vscsi_sxp is None:
3825 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3826 if not dev_uuid:
3827 raise XendError('Failed to create device')
3829 else:
3830 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3831 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3832 new_vscsi_sxp.append(existing_dev)
3833 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3835 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3836 self.info.device_update(dev_uuid, new_vscsi_sxp)
3838 xen.xend.XendDomain.instance().managed_config_save(self)
3840 else:
3841 try:
3842 self.device_configure(target_vscsi_sxp)
3844 except Exception, exn:
3845 raise XendError('Failed to create device')
3847 return dscsi_uuid
3850 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3851 if dev_uuid not in self.info['devices']:
3852 raise XendError('Device does not exist')
3854 try:
3855 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3856 XEN_API_VM_POWER_STATE_PAUSED):
3857 _, config = self.info['devices'][dev_uuid]
3858 devid = config.get('devid')
3859 if devid != None:
3860 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3861 else:
3862 raise XendError('Unable to get devid for device: %s:%s' %
3863 (dev_type, dev_uuid))
3864 finally:
3865 del self.info['devices'][dev_uuid]
3866 self.info['%s_refs' % dev_type].remove(dev_uuid)
3868 def destroy_vbd(self, dev_uuid):
3869 self.destroy_device_by_uuid('vbd', dev_uuid)
3871 def destroy_vif(self, dev_uuid):
3872 self.destroy_device_by_uuid('vif', dev_uuid)
3874 def destroy_vtpm(self, dev_uuid):
3875 self.destroy_device_by_uuid('vtpm', dev_uuid)
3877 def destroy_dpci(self, dev_uuid):
3879 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3880 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3882 old_pci_sxp = self._getDeviceInfo_pci(0)
3883 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3884 target_dev = None
3885 new_pci_sxp = ['pci']
3886 for dev in sxp.children(old_pci_sxp, 'dev'):
3887 domain = int(sxp.child_value(dev, 'domain'), 16)
3888 bus = int(sxp.child_value(dev, 'bus'), 16)
3889 slot = int(sxp.child_value(dev, 'slot'), 16)
3890 func = int(sxp.child_value(dev, 'func'), 16)
3891 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3892 if ppci.get_name() == name:
3893 target_dev = dev
3894 else:
3895 new_pci_sxp.append(dev)
3897 if target_dev is None:
3898 raise XendError('Failed to destroy device')
3900 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3902 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3904 self.info.device_update(dev_uuid, new_pci_sxp)
3905 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3906 del self.info['devices'][dev_uuid]
3907 xen.xend.XendDomain.instance().managed_config_save(self)
3909 else:
3910 try:
3911 self.device_configure(target_pci_sxp)
3913 except Exception, exn:
3914 raise XendError('Failed to destroy device')
3916 def destroy_dscsi(self, dev_uuid):
3917 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3918 devid = dscsi.get_virtual_host()
3919 vHCTL = dscsi.get_virtual_HCTL()
3920 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3921 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3923 target_dev = None
3924 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3925 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3926 if vHCTL == sxp.child_value(dev, 'v-dev'):
3927 target_dev = dev
3928 else:
3929 new_vscsi_sxp.append(dev)
3931 if target_dev is None:
3932 raise XendError('Failed to destroy device')
3934 target_dev.append(['state', xenbusState['Closing']])
3935 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3937 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3939 self.info.device_update(dev_uuid, new_vscsi_sxp)
3940 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3941 del self.info['devices'][dev_uuid]
3942 xen.xend.XendDomain.instance().managed_config_save(self)
3944 else:
3945 try:
3946 self.device_configure(target_vscsi_sxp)
3948 except Exception, exn:
3949 raise XendError('Failed to destroy device')
3951 def destroy_xapi_instances(self):
3952 """Destroy Xen-API instances stored in XendAPIStore.
3953 """
3954 # Xen-API classes based on XendBase have their instances stored
3955 # in XendAPIStore. Cleanup these instances here, if they are supposed
3956 # to be destroyed when the parent domain is dead.
3958 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3959 # XendBase and there's no need to remove them from XendAPIStore.
3961 from xen.xend import XendDomain
3962 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3963 # domain still exists.
3964 return
3966 # Destroy the VMMetrics instance.
3967 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3968 is not None:
3969 self.metrics.destroy()
3971 # Destroy DPCI instances.
3972 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3973 XendAPIStore.deregister(dpci_uuid, "DPCI")
3975 # Destroy DSCSI instances.
3976 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3977 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3979 def has_device(self, dev_class, dev_uuid):
3980 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3982 def __str__(self):
3983 return '<domain id=%s name=%s memory=%s state=%s>' % \
3984 (str(self.domid), self.info['name_label'],
3985 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3987 __repr__ = __str__