debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 19656:780041c4a96d

xend: Fix xm pci commands for inactive managed domains.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 19 02:23:32 2009 +0100 (2009-05-19)
parents a89e83f2d43e
children 070f456143d3
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 xeninfo['auto_power_on'] = False
148 domid = xeninfo['domid']
149 uuid1 = uuid.fromString(xeninfo['uuid'])
150 needs_reinitialising = False
152 dompath = GetDomainPath(domid)
153 if not dompath:
154 raise XendError('No domain path in store for existing '
155 'domain %d' % domid)
157 log.info("Recreating domain %d, UUID %s. at %s" %
158 (domid, xeninfo['uuid'], dompath))
160 # need to verify the path and uuid if not Domain-0
161 # if the required uuid and vm aren't set, then that means
162 # we need to recreate the dom with our own values
163 #
164 # NOTE: this is probably not desirable, really we should just
165 # abort or ignore, but there may be cases where xenstore's
166 # entry disappears (eg. xenstore-rm /)
167 #
168 try:
169 vmpath = xstransact.Read(dompath, "vm")
170 if not vmpath:
171 if not priv:
172 log.warn('/local/domain/%d/vm is missing. recreate is '
173 'confused, trying our best to recover' % domid)
174 needs_reinitialising = True
175 raise XendError('reinit')
177 uuid2_str = xstransact.Read(vmpath, "uuid")
178 if not uuid2_str:
179 log.warn('%s/uuid/ is missing. recreate is confused, '
180 'trying our best to recover' % vmpath)
181 needs_reinitialising = True
182 raise XendError('reinit')
184 uuid2 = uuid.fromString(uuid2_str)
185 if uuid1 != uuid2:
186 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
187 'Trying out best to recover' % domid)
188 needs_reinitialising = True
189 except XendError:
190 pass # our best shot at 'goto' in python :)
192 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
193 vmpath = vmpath)
195 if needs_reinitialising:
196 vm._recreateDom()
197 vm._removeVm()
198 vm._storeVmDetails()
199 vm._storeDomDetails()
201 vm.image = image.create(vm, vm.info)
202 vm.image.recreate()
204 vm._registerWatches()
205 vm.refreshShutdown(xeninfo)
207 # register the domain in the list
208 from xen.xend import XendDomain
209 XendDomain.instance().add_domain(vm)
211 return vm
214 def restore(config):
215 """Create a domain and a VM object to do a restore.
217 @param config: Domain SXP configuration
218 @type config: list of lists. (see C{create})
220 @rtype: XendDomainInfo
221 @return: A up and running XendDomainInfo instance
222 @raise VmError: Invalid configuration or failure to start.
223 @raise XendError: Errors with configuration.
224 """
226 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
227 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
228 resume = True)
229 try:
230 vm.resume()
231 return vm
232 except:
233 vm.destroy()
234 raise
236 def createDormant(domconfig):
237 """Create a dormant/inactive XenDomainInfo without creating VM.
238 This is for creating instances of persistent domains that are not
239 yet start.
241 @param domconfig: Parsed configuration
242 @type domconfig: XendConfig object
244 @rtype: XendDomainInfo
245 @return: A up and running XendDomainInfo instance
246 @raise XendError: Errors with configuration.
247 """
249 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
251 # domid does not make sense for non-running domains.
252 domconfig.pop('domid', None)
253 vm = XendDomainInfo(domconfig)
254 return vm
256 def domain_by_name(name):
257 """Get domain by name
259 @params name: Name of the domain
260 @type name: string
261 @return: XendDomainInfo or None
262 """
263 from xen.xend import XendDomain
264 return XendDomain.instance().domain_lookup_by_name_nr(name)
267 def shutdown_reason(code):
268 """Get a shutdown reason from a code.
270 @param code: shutdown code
271 @type code: int
272 @return: shutdown reason
273 @rtype: string
274 """
275 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
277 def dom_get(dom):
278 """Get info from xen for an existing domain.
280 @param dom: domain id
281 @type dom: int
282 @return: info or None
283 @rtype: dictionary
284 """
285 try:
286 domlist = xc.domain_getinfo(dom, 1)
287 if domlist and dom == domlist[0]['domid']:
288 return domlist[0]
289 except Exception, err:
290 # ignore missing domain
291 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
292 return None
294 def get_assigned_pci_devices(domid):
295 dev_str_list = []
296 path = '/local/domain/0/backend/pci/%u/0/' % domid
297 num_devs = xstransact.Read(path + 'num_devs');
298 if num_devs is None or num_devs == "":
299 return dev_str_list
300 num_devs = int(num_devs);
301 for i in range(num_devs):
302 dev_str = xstransact.Read(path + 'dev-%i' % i)
303 dev_str_list = dev_str_list + [dev_str]
304 return dev_str_list
306 def do_FLR(domid):
307 from xen.xend.server.pciif import parse_pci_name, PciDevice
308 dev_str_list = get_assigned_pci_devices(domid)
310 for dev_str in dev_str_list:
311 (dom, b, d, f) = parse_pci_name(dev_str)
312 try:
313 dev = PciDevice(dom, b, d, f)
314 except Exception, e:
315 raise VmError("pci: failed to locate device and "+
316 "parse it's resources - "+str(e))
317 dev.do_FLR()
319 class XendDomainInfo:
320 """An object represents a domain.
322 @TODO: try to unify dom and domid, they mean the same thing, but
323 xc refers to it as dom, and everywhere else, including
324 xenstore it is domid. The best way is to change xc's
325 python interface.
327 @ivar info: Parsed configuration
328 @type info: dictionary
329 @ivar domid: Domain ID (if VM has started)
330 @type domid: int or None
331 @ivar vmpath: XenStore path to this VM.
332 @type vmpath: string
333 @ivar dompath: XenStore path to this Domain.
334 @type dompath: string
335 @ivar image: Reference to the VM Image.
336 @type image: xen.xend.image.ImageHandler
337 @ivar store_port: event channel to xenstored
338 @type store_port: int
339 @ivar console_port: event channel to xenconsoled
340 @type console_port: int
341 @ivar store_mfn: xenstored mfn
342 @type store_mfn: int
343 @ivar console_mfn: xenconsoled mfn
344 @type console_mfn: int
345 @ivar notes: OS image notes
346 @type notes: dictionary
347 @ivar vmWatch: reference to a watch on the xenstored vmpath
348 @type vmWatch: xen.xend.xenstore.xswatch
349 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
350 @type shutdownWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
352 @type shutdownStartTime: float or None
353 @ivar restart_in_progress: Is a domain restart thread running?
354 @type restart_in_progress: bool
355 # @ivar state: Domain state
356 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
357 @ivar state_updated: lock for self.state
358 @type state_updated: threading.Condition
359 @ivar refresh_shutdown_lock: lock for polling shutdown state
360 @type refresh_shutdown_lock: threading.Condition
361 @ivar _deviceControllers: device controller cache for this domain
362 @type _deviceControllers: dict 'string' to DevControllers
363 """
365 def __init__(self, info, domid = None, dompath = None, augment = False,
366 priv = False, resume = False, vmpath = None):
367 """Constructor for a domain
369 @param info: parsed configuration
370 @type info: dictionary
371 @keyword domid: Set initial domain id (if any)
372 @type domid: int
373 @keyword dompath: Set initial dompath (if any)
374 @type dompath: string
375 @keyword augment: Augment given info with xenstored VM info
376 @type augment: bool
377 @keyword priv: Is a privileged domain (Dom 0)
378 @type priv: bool
379 @keyword resume: Is this domain being resumed?
380 @type resume: bool
381 """
383 self.info = info
384 if domid == None:
385 self.domid = self.info.get('domid')
386 else:
387 self.domid = domid
389 #REMOVE: uuid is now generated in XendConfig
390 #if not self._infoIsSet('uuid'):
391 # self.info['uuid'] = uuid.toString(uuid.create())
393 # Find a unique /vm/<uuid>/<integer> path if not specified.
394 # This avoids conflict between pre-/post-migrate domains when doing
395 # localhost relocation.
396 self.vmpath = vmpath
397 i = 0
398 while self.vmpath == None:
399 self.vmpath = XS_VMROOT + self.info['uuid']
400 if i != 0:
401 self.vmpath = self.vmpath + '-' + str(i)
402 try:
403 if self._readVm("uuid"):
404 self.vmpath = None
405 i = i + 1
406 except:
407 pass
409 self.dompath = dompath
411 self.image = None
412 self.store_port = None
413 self.store_mfn = None
414 self.console_port = None
415 self.console_mfn = None
417 self.native_protocol = None
419 self.vmWatch = None
420 self.shutdownWatch = None
421 self.shutdownStartTime = None
422 self._resume = resume
423 self.restart_in_progress = False
425 self.state_updated = threading.Condition()
426 self.refresh_shutdown_lock = threading.Condition()
427 self._stateSet(DOM_STATE_HALTED)
429 self._deviceControllers = {}
431 for state in DOM_STATES_OLD:
432 self.info[state] = 0
434 if augment:
435 self._augmentInfo(priv)
437 self._checkName(self.info['name_label'])
439 self.metrics = XendVMMetrics(uuid.createString(), self)
442 #
443 # Public functions available through XMLRPC
444 #
447 def start(self, is_managed = False):
448 """Attempts to start the VM by do the appropriate
449 initialisation if it not started.
450 """
451 from xen.xend import XendDomain
453 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
454 try:
455 XendTask.log_progress(0, 30, self._constructDomain)
456 XendTask.log_progress(31, 60, self._initDomain)
458 XendTask.log_progress(61, 70, self._storeVmDetails)
459 XendTask.log_progress(71, 80, self._storeDomDetails)
460 XendTask.log_progress(81, 90, self._registerWatches)
461 XendTask.log_progress(91, 100, self.refreshShutdown)
463 xendomains = XendDomain.instance()
464 xennode = XendNode.instance()
466 # save running configuration if XendDomains believe domain is
467 # persistent
468 if is_managed:
469 xendomains.managed_config_save(self)
471 if xennode.xenschedinfo() == 'credit':
472 xendomains.domain_sched_credit_set(self.getDomid(),
473 self.getWeight(),
474 self.getCap())
475 except:
476 log.exception('VM start failed')
477 self.destroy()
478 raise
479 else:
480 raise XendError('VM already running')
482 def resume(self):
483 """Resumes a domain that has come back from suspension."""
484 state = self._stateGet()
485 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
486 try:
487 self._constructDomain()
489 try:
490 self._setCPUAffinity()
491 except:
492 # usually a CPU we want to set affinity to does not exist
493 # we just ignore it so that the domain can still be restored
494 log.warn("Cannot restore CPU affinity")
496 self._storeVmDetails()
497 self._createChannels()
498 self._createDevices()
499 self._storeDomDetails()
500 self._endRestore()
501 except:
502 log.exception('VM resume failed')
503 self.destroy()
504 raise
505 else:
506 raise XendError('VM is not suspended; it is %s'
507 % XEN_API_VM_POWER_STATE[state])
509 def shutdown(self, reason):
510 """Shutdown a domain by signalling this via xenstored."""
511 log.debug('XendDomainInfo.shutdown(%s)', reason)
512 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
513 raise XendError('Domain cannot be shutdown')
515 if self.domid == 0:
516 raise XendError('Domain 0 cannot be shutdown')
518 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
519 raise XendError('Invalid reason: %s' % reason)
520 self.storeDom("control/shutdown", reason)
522 # HVM domain shuts itself down only if it has PV drivers
523 if self.info.is_hvm():
524 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
525 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
526 if not hvm_pvdrv or hvm_s_state != 0:
527 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
528 log.info("HVM save:remote shutdown dom %d!", self.domid)
529 xc.domain_shutdown(self.domid, code)
531 def pause(self):
532 """Pause domain
534 @raise XendError: Failed pausing a domain
535 """
536 try:
537 xc.domain_pause(self.domid)
538 self._stateSet(DOM_STATE_PAUSED)
539 except Exception, ex:
540 log.exception(ex)
541 raise XendError("Domain unable to be paused: %s" % str(ex))
543 def unpause(self):
544 """Unpause domain
546 @raise XendError: Failed unpausing a domain
547 """
548 try:
549 xc.domain_unpause(self.domid)
550 self._stateSet(DOM_STATE_RUNNING)
551 except Exception, ex:
552 log.exception(ex)
553 raise XendError("Domain unable to be unpaused: %s" % str(ex))
555 def send_sysrq(self, key):
556 """ Send a Sysrq equivalent key via xenstored."""
557 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
558 raise XendError("Domain '%s' is not started" % self.info['name_label'])
560 asserts.isCharConvertible(key)
561 self.storeDom("control/sysrq", '%c' % key)
563 def sync_pcidev_info(self):
565 if not self.info.is_hvm():
566 return
568 devid = '0'
569 dev_info = self._getDeviceInfo_pci(devid)
570 if dev_info is None:
571 return
573 # get the virtual slot info from xenstore
574 dev_uuid = sxp.child_value(dev_info, 'uuid')
575 pci_conf = self.info['devices'][dev_uuid][1]
576 pci_devs = pci_conf['devs']
578 count = 0
579 vslots = None
580 while vslots is None and count < 20:
581 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
582 % (self.getDomid(), devid))
583 time.sleep(0.1)
584 count += 1
585 if vslots is None:
586 log.error("Device model didn't tell the vslots for PCI device")
587 return
589 #delete last delim
590 if vslots[-1] == ";":
591 vslots = vslots[:-1]
593 slot_list = vslots.split(';')
594 if len(slot_list) != len(pci_devs):
595 log.error("Device model's pci dev num dismatch")
596 return
598 #update the vslot info
599 count = 0;
600 for x in pci_devs:
601 x['vslot'] = slot_list[count]
602 count += 1
605 def hvm_pci_device_create(self, dev_config):
606 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
607 % scrub_password(dev_config))
609 if not self.info.is_hvm():
610 raise VmError("hvm_pci_device_create called on non-HVM guest")
612 #all the PCI devs share one conf node
613 devid = '0'
615 new_dev = dev_config['devs'][0]
616 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
618 #check conflict before trigger hotplug event
619 if dev_info is not None:
620 dev_uuid = sxp.child_value(dev_info, 'uuid')
621 pci_conf = self.info['devices'][dev_uuid][1]
622 pci_devs = pci_conf['devs']
623 for x in pci_devs:
624 if x.has_key('vslot'):
625 x_vslot = x['vslot']
626 else:
627 x_vslot = x['requested_vslot']
628 if (int(x_vslot, 16) == int(new_dev['requested_vslot'], 16) and
629 int(x_vslot, 16) != AUTO_PHP_SLOT):
630 raise VmError("vslot %s already have a device." % (new_dev['requested_vslot']))
632 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
633 int(x['bus'], 16) == int(new_dev['bus'], 16) and
634 int(x['slot'], 16) == int(new_dev['slot'], 16) and
635 int(x['func'], 16) == int(new_dev['func'], 16) ):
636 raise VmError("device is already inserted")
638 # Test whether the devices can be assigned with VT-d
639 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
640 new_dev['bus'],
641 new_dev['slot'],
642 new_dev['func'])
643 bdf = xc.test_assign_device(0, pci_str)
644 if bdf != 0:
645 if bdf == -1:
646 raise VmError("failed to assign device: maybe the platform"
647 " doesn't support VT-d, or VT-d isn't enabled"
648 " properly?")
649 bus = (bdf >> 16) & 0xff
650 devfn = (bdf >> 8) & 0xff
651 dev = (devfn >> 3) & 0x1f
652 func = devfn & 0x7
653 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
654 " already been assigned to other domain, or maybe"
655 " it doesn't exist." % (bus, dev, func))
657 # Here, we duplicate some checkings (in some cases, we mustn't allow
658 # a device to be hot-plugged into an HVM guest) that are also done in
659 # pci_device_configure()'s self.device_create(dev_sxp) or
660 # dev_control.reconfigureDevice(devid, dev_config).
661 # We must make the checkings before sending the command 'pci-ins' to
662 # ioemu.
664 # Test whether the device is owned by pciback. For instance, we can't
665 # hotplug a device being used by Dom0 itself to an HVM guest.
666 from xen.xend.server.pciif import PciDevice, parse_pci_name
667 domain = int(new_dev['domain'],16)
668 bus = int(new_dev['bus'],16)
669 dev = int(new_dev['slot'],16)
670 func = int(new_dev['func'],16)
671 try:
672 pci_device = PciDevice(domain, bus, dev, func)
673 except Exception, e:
674 raise VmError("pci: failed to locate device and "+
675 "parse it's resources - "+str(e))
676 if pci_device.driver!='pciback':
677 raise VmError(("pci: PCI Backend does not own device "+ \
678 "%s\n"+ \
679 "See the pciback.hide kernel "+ \
680 "command-line parameter or\n"+ \
681 "bind your slot/device to the PCI backend using sysfs" \
682 )%(pci_device.name))
684 # Check non-page-aligned MMIO BAR.
685 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
686 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
687 pci_device.name)
689 # Check the co-assignment.
690 # To pci-attach a device D to domN, we should ensure each of D's
691 # co-assignment devices hasn't been assigned, or has been assigned to
692 # domN.
693 coassignment_list = pci_device.find_coassigned_devices()
694 assigned_pci_device_str_list = self._get_assigned_pci_devices()
695 for pci_str in coassignment_list:
696 (domain, bus, dev, func) = parse_pci_name(pci_str)
697 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
698 if xc.test_assign_device(0, dev_str) == 0:
699 continue
700 if not pci_str in assigned_pci_device_str_list:
701 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
702 " because one of its co-assignment device %s has been" + \
703 " assigned to other domain." \
704 )% (pci_device.name, self.info['name_label'], pci_str))
706 if self.domid is not None:
707 opts = ''
708 if 'opts' in new_dev and len(new_dev['opts']) > 0:
709 config_opts = new_dev['opts']
710 config_opts = map(lambda (x, y): x+'='+y, config_opts)
711 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
713 bdf_str = "%s:%s:%s.%s@%s%s" % (new_dev['domain'],
714 new_dev['bus'],
715 new_dev['slot'],
716 new_dev['func'],
717 new_dev['requested_vslot'],
718 opts)
719 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
721 vslot = xstransact.Read("/local/domain/0/device-model/%i/parameter"
722 % self.getDomid())
723 else:
724 vslot = new_dev['requested_vslot']
726 return vslot
729 def device_create(self, dev_config):
730 """Create a new device.
732 @param dev_config: device configuration
733 @type dev_config: SXP object (parsed config)
734 """
735 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
736 dev_type = sxp.name(dev_config)
737 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
738 dev_config_dict = self.info['devices'][dev_uuid][1]
739 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
741 if dev_type == 'vif':
742 for x in dev_config:
743 if x != 'vif' and x[0] == 'mac':
744 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
745 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
746 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
748 if self.domid is not None:
749 try:
750 dev_config_dict['devid'] = devid = \
751 self._createDevice(dev_type, dev_config_dict)
752 self._waitForDevice(dev_type, devid)
753 except VmError, ex:
754 del self.info['devices'][dev_uuid]
755 if dev_type == 'pci':
756 for dev in dev_config_dict['devs']:
757 XendAPIStore.deregister(dev['uuid'], 'DPCI')
758 elif dev_type == 'vscsi':
759 for dev in dev_config_dict['devs']:
760 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
761 elif dev_type == 'tap':
762 self.info['vbd_refs'].remove(dev_uuid)
763 else:
764 self.info['%s_refs' % dev_type].remove(dev_uuid)
765 raise ex
766 else:
767 devid = None
769 xen.xend.XendDomain.instance().managed_config_save(self)
770 return self.getDeviceController(dev_type).sxpr(devid)
773 def pci_device_configure(self, dev_sxp, devid = 0):
774 """Configure an existing pci device.
776 @param dev_sxp: device configuration
777 @type dev_sxp: SXP object (parsed config)
778 @param devid: device id
779 @type devid: int
780 @return: Returns True if successfully updated device
781 @rtype: boolean
782 """
783 log.debug("XendDomainInfo.pci_device_configure: %s"
784 % scrub_password(dev_sxp))
786 dev_class = sxp.name(dev_sxp)
788 if dev_class != 'pci':
789 return False
791 pci_state = sxp.child_value(dev_sxp, 'state')
792 existing_dev_info = self._getDeviceInfo_pci(devid)
794 if existing_dev_info is None and pci_state != 'Initialising':
795 raise XendError("Cannot detach when pci platform does not exist")
797 pci_dev = sxp.children(dev_sxp, 'dev')[0]
798 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
799 dev = dev_config['devs'][0]
801 # Do HVM specific processing
802 if self.info.is_hvm():
803 if pci_state == 'Initialising':
804 # HVM PCI device attachment
805 vslot = self.hvm_pci_device_create(dev_config)
806 # Update vslot
807 dev['vslot'] = vslot
808 for n in sxp.children(pci_dev):
809 if(n[0] == 'vslot'):
810 n[1] = vslot
811 else:
812 # HVM PCI device detachment
813 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
814 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
815 existing_pci_devs = existing_pci_conf['devs']
816 vslot = AUTO_PHP_SLOT_STR
817 for x in existing_pci_devs:
818 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
819 int(x['bus'], 16) == int(dev['bus'], 16) and
820 int(x['slot'], 16) == int(dev['slot'], 16) and
821 int(x['func'], 16) == int(dev['func'], 16) ):
822 if x.has_key('vslot'):
823 vslot = x['vslot']
824 else:
825 vslot = x['requested_vslot']
826 break
827 if vslot == AUTO_PHP_SLOT_STR:
828 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
829 % (int(dev['domain'],16), int(dev['bus'],16),
830 int(dev['slot'],16), int(dev['func'],16)))
831 self.hvm_destroyPCIDevice(int(vslot, 16))
832 # Update vslot
833 dev['vslot'] = vslot
834 for n in sxp.children(pci_dev):
835 if(n[0] == 'vslot'):
836 n[1] = vslot
838 # If pci platform does not exist, create and exit.
839 if existing_dev_info is None:
840 self.device_create(dev_sxp)
841 return True
843 if self.domid is not None:
844 # use DevController.reconfigureDevice to change device config
845 dev_control = self.getDeviceController(dev_class)
846 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
847 if not self.info.is_hvm():
848 # in PV case, wait until backend state becomes connected.
849 dev_control.waitForDevice_reconfigure(devid)
850 num_devs = dev_control.cleanupDevice(devid)
852 # update XendConfig with new device info
853 if dev_uuid:
854 new_dev_sxp = dev_control.configuration(devid)
855 self.info.device_update(dev_uuid, new_dev_sxp)
857 # If there is no device left, destroy pci and remove config.
858 if num_devs == 0:
859 if self.info.is_hvm():
860 self.destroyDevice('pci', devid, True)
861 del self.info['devices'][dev_uuid]
862 platform = self.info['platform']
863 orig_dev_num = len(platform['pci'])
864 # TODO: can use this to keep some info to ask high level
865 # management tools to hot insert a new passthrough dev
866 # after migration
867 if orig_dev_num != 0:
868 #platform['pci'] = ["%dDEVs" % orig_dev_num]
869 platform['pci'] = []
870 else:
871 self.destroyDevice('pci', devid)
872 del self.info['devices'][dev_uuid]
873 else:
874 new_dev_sxp = ['pci']
875 for cur_dev in sxp.children(existing_dev_info, 'dev'):
876 if pci_state == 'Closing':
877 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
878 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
879 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
880 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
881 continue
882 new_dev_sxp.append(cur_dev)
884 if pci_state == 'Initialising':
885 for new_dev in sxp.children(dev_sxp, 'dev'):
886 new_dev_sxp.append(new_dev)
888 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
889 self.info.device_update(dev_uuid, new_dev_sxp)
891 # If there is only 'vscsi' in new_dev_sxp, remove the config.
892 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
893 del self.info['devices'][dev_uuid]
894 if self.info.is_hvm():
895 platform = self.info['platform']
896 orig_dev_num = len(platform['pci'])
897 # TODO: can use this to keep some info to ask high level
898 # management tools to hot insert a new passthrough dev
899 # after migration
900 if orig_dev_num != 0:
901 #platform['pci'] = ["%dDEVs" % orig_dev_num]
902 platform['pci'] = []
904 xen.xend.XendDomain.instance().managed_config_save(self)
906 return True
908 def vscsi_device_configure(self, dev_sxp):
909 """Configure an existing vscsi device.
910 quoted pci funciton
911 """
912 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
913 if not dev_info:
914 return False
915 for dev in sxp.children(dev_info, 'dev'):
916 if p_devs is not None:
917 if sxp.child_value(dev, 'p-dev') in p_devs:
918 return True
919 if v_devs is not None:
920 if sxp.child_value(dev, 'v-dev') in v_devs:
921 return True
922 return False
924 def _vscsi_be(be):
925 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
926 if be_xdi is not None:
927 be_domid = be_xdi.getDomid()
928 if be_domid is not None:
929 return str(be_domid)
930 return str(be)
932 dev_class = sxp.name(dev_sxp)
933 if dev_class != 'vscsi':
934 return False
936 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
937 devs = dev_config['devs']
938 v_devs = [d['v-dev'] for d in devs]
939 state = devs[0]['state']
940 req_devid = int(devs[0]['devid'])
941 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
943 if state == xenbusState['Initialising']:
944 # new create
945 # If request devid does not exist, create and exit.
946 p_devs = [d['p-dev'] for d in devs]
947 for dev_type, dev_info in self.info.all_devices_sxpr():
948 if dev_type != 'vscsi':
949 continue
950 if _is_vscsi_defined(dev_info, p_devs = p_devs):
951 raise XendError('The physical device "%s" is already defined' % \
952 p_devs[0])
953 if cur_dev_sxp is None:
954 self.device_create(dev_sxp)
955 return True
957 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
958 raise XendError('The virtual device "%s" is already defined' % \
959 v_devs[0])
961 if int(dev_config['feature-host']) != \
962 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
963 raise XendError('The physical device "%s" cannot define '
964 'because mode is different' % devs[0]['p-dev'])
966 new_be = dev_config.get('backend', None)
967 if new_be is not None:
968 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
969 if cur_be is None:
970 cur_be = xen.xend.XendDomain.DOM0_ID
971 new_be_dom = _vscsi_be(new_be)
972 cur_be_dom = _vscsi_be(cur_be)
973 if new_be_dom != cur_be_dom:
974 raise XendError('The physical device "%s" cannot define '
975 'because backend is different' % devs[0]['p-dev'])
977 elif state == xenbusState['Closing']:
978 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
979 raise XendError("Cannot detach vscsi device does not exist")
981 if self.domid is not None:
982 # use DevController.reconfigureDevice to change device config
983 dev_control = self.getDeviceController(dev_class)
984 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
985 dev_control.waitForDevice_reconfigure(req_devid)
986 num_devs = dev_control.cleanupDevice(req_devid)
988 # update XendConfig with new device info
989 if dev_uuid:
990 new_dev_sxp = dev_control.configuration(req_devid)
991 self.info.device_update(dev_uuid, new_dev_sxp)
993 # If there is no device left, destroy vscsi and remove config.
994 if num_devs == 0:
995 self.destroyDevice('vscsi', req_devid)
996 del self.info['devices'][dev_uuid]
998 else:
999 new_dev_sxp = ['vscsi']
1000 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1001 new_dev_sxp.append(cur_mode)
1002 try:
1003 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1004 new_dev_sxp.append(cur_be)
1005 except IndexError:
1006 pass
1008 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1009 if state == xenbusState['Closing']:
1010 if int(cur_mode[1]) == 1:
1011 continue
1012 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1013 continue
1014 new_dev_sxp.append(cur_dev)
1016 if state == xenbusState['Initialising']:
1017 for new_dev in sxp.children(dev_sxp, 'dev'):
1018 new_dev_sxp.append(new_dev)
1020 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1021 self.info.device_update(dev_uuid, new_dev_sxp)
1023 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1024 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1025 del self.info['devices'][dev_uuid]
1027 xen.xend.XendDomain.instance().managed_config_save(self)
1029 return True
1031 def device_configure(self, dev_sxp, devid = None):
1032 """Configure an existing device.
1034 @param dev_config: device configuration
1035 @type dev_config: SXP object (parsed config)
1036 @param devid: device id
1037 @type devid: int
1038 @return: Returns True if successfully updated device
1039 @rtype: boolean
1040 """
1042 # convert device sxp to a dict
1043 dev_class = sxp.name(dev_sxp)
1044 dev_config = {}
1046 if dev_class == 'pci':
1047 return self.pci_device_configure(dev_sxp)
1049 if dev_class == 'vscsi':
1050 return self.vscsi_device_configure(dev_sxp)
1052 for opt_val in dev_sxp[1:]:
1053 try:
1054 dev_config[opt_val[0]] = opt_val[1]
1055 except IndexError:
1056 pass
1058 dev_control = self.getDeviceController(dev_class)
1059 if devid is None:
1060 dev = dev_config.get('dev', '')
1061 if not dev:
1062 raise VmError('Block device must have virtual details specified')
1063 if 'ioemu:' in dev:
1064 (_, dev) = dev.split(':', 1)
1065 try:
1066 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1067 except ValueError:
1068 pass
1069 devid = dev_control.convertToDeviceNumber(dev)
1070 dev_info = self._getDeviceInfo_vbd(devid)
1071 if dev_info is None:
1072 raise VmError("Device %s not connected" % devid)
1073 dev_uuid = sxp.child_value(dev_info, 'uuid')
1075 if self.domid is not None:
1076 # use DevController.reconfigureDevice to change device config
1077 dev_control.reconfigureDevice(devid, dev_config)
1078 else:
1079 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1080 if (new_f['device-type'] == 'cdrom' and
1081 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1082 new_b['mode'] == 'r' and
1083 sxp.child_value(dev_info, 'mode') == 'r'):
1084 pass
1085 else:
1086 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1087 (dev_class, devid, dev_config))
1089 # update XendConfig with new device info
1090 self.info.device_update(dev_uuid, dev_sxp)
1091 xen.xend.XendDomain.instance().managed_config_save(self)
1093 return True
1095 def waitForDevices(self):
1096 """Wait for this domain's configured devices to connect.
1098 @raise VmError: if any device fails to initialise.
1099 """
1100 for devclass in XendDevices.valid_devices():
1101 self.getDeviceController(devclass).waitForDevices()
1103 def hvm_destroyPCIDevice(self, vslot):
1104 log.debug("hvm_destroyPCIDevice called %s", vslot)
1106 if not self.info.is_hvm():
1107 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1109 #all the PCI devs share one conf node
1110 devid = '0'
1111 vslot = int(vslot)
1112 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1113 dev_uuid = sxp.child_value(dev_info, 'uuid')
1115 #delete the pci bdf config under the pci device
1116 pci_conf = self.info['devices'][dev_uuid][1]
1117 pci_len = len(pci_conf['devs'])
1119 #find the pass-through device with the virtual slot
1120 devnum = 0
1121 for x in pci_conf['devs']:
1122 if x.has_key('vslot'):
1123 x_vslot = x['vslot']
1124 else:
1125 x_vslot = x['requested_vslot']
1126 if int(x_vslot, 16) == vslot:
1127 break
1128 devnum += 1
1130 if devnum >= pci_len:
1131 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1133 if vslot == AUTO_PHP_SLOT:
1134 raise VmError("Device @ vslot 0x%x doesn't support hotplug." % (vslot))
1136 # Check the co-assignment.
1137 # To pci-detach a device D from domN, we should ensure: for each DD in the
1138 # list of D's co-assignment devices, DD is not assigned (to domN).
1140 from xen.xend.server.pciif import PciDevice
1141 domain = int(x['domain'],16)
1142 bus = int(x['bus'],16)
1143 dev = int(x['slot'],16)
1144 func = int(x['func'],16)
1145 try:
1146 pci_device = PciDevice(domain, bus, dev, func)
1147 except Exception, e:
1148 raise VmError("pci: failed to locate device and "+
1149 "parse it's resources - "+str(e))
1150 coassignment_list = pci_device.find_coassigned_devices()
1151 coassignment_list.remove(pci_device.name)
1152 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1153 for pci_str in coassignment_list:
1154 if pci_str in assigned_pci_device_str_list:
1155 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1156 " because one of its co-assignment device %s is still " + \
1157 " assigned to the domain." \
1158 )% (pci_device.name, self.info['name_label'], pci_str))
1161 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1162 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1164 if self.domid is not None:
1165 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1167 return 0
1169 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1170 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1171 deviceClass, devid)
1173 if rm_cfg:
1174 # Convert devid to device number. A device number is
1175 # needed to remove its configuration.
1176 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1178 # Save current sxprs. A device number and a backend
1179 # path are needed to remove its configuration but sxprs
1180 # do not have those after calling destroyDevice.
1181 sxprs = self.getDeviceSxprs(deviceClass)
1183 rc = None
1184 if self.domid is not None:
1185 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1186 if not force and rm_cfg:
1187 # The backend path, other than the device itself,
1188 # has to be passed because its accompanied frontend
1189 # path may be void until its removal is actually
1190 # issued. It is probable because destroyDevice is
1191 # issued first.
1192 for dev_num, dev_info in sxprs:
1193 dev_num = int(dev_num)
1194 if dev_num == dev:
1195 for x in dev_info:
1196 if x[0] == 'backend':
1197 backend = x[1]
1198 break
1199 break
1200 self._waitForDevice_destroy(deviceClass, devid, backend)
1202 if rm_cfg:
1203 if deviceClass == 'vif':
1204 if self.domid is not None:
1205 for dev_num, dev_info in sxprs:
1206 dev_num = int(dev_num)
1207 if dev_num == dev:
1208 for x in dev_info:
1209 if x[0] == 'mac':
1210 mac = x[1]
1211 break
1212 break
1213 dev_info = self._getDeviceInfo_vif(mac)
1214 else:
1215 _, dev_info = sxprs[dev]
1216 else: # 'vbd' or 'tap'
1217 dev_info = self._getDeviceInfo_vbd(dev)
1218 # To remove the UUID of the device from refs,
1219 # deviceClass must be always 'vbd'.
1220 deviceClass = 'vbd'
1221 if dev_info is None:
1222 raise XendError("Device %s is not defined" % devid)
1224 dev_uuid = sxp.child_value(dev_info, 'uuid')
1225 del self.info['devices'][dev_uuid]
1226 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1227 xen.xend.XendDomain.instance().managed_config_save(self)
1229 return rc
1231 def getDeviceSxprs(self, deviceClass):
1232 if deviceClass == 'pci':
1233 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1234 if dev_info is None:
1235 return []
1236 dev_uuid = sxp.child_value(dev_info, 'uuid')
1237 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1238 pci_len = len(pci_devs)
1239 return pci_devs
1240 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1241 return self.getDeviceController(deviceClass).sxprs()
1242 else:
1243 sxprs = []
1244 dev_num = 0
1245 for dev_type, dev_info in self.info.all_devices_sxpr():
1246 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1247 (deviceClass != 'vbd' and dev_type != deviceClass):
1248 continue
1250 if deviceClass == 'vscsi':
1251 vscsi_devs = ['devs', []]
1252 for vscsi_dev in sxp.children(dev_info, 'dev'):
1253 vscsi_dev.append(['frontstate', None])
1254 vscsi_devs[1].append(vscsi_dev)
1255 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1256 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1257 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1258 elif deviceClass == 'vbd':
1259 dev = sxp.child_value(dev_info, 'dev')
1260 if 'ioemu:' in dev:
1261 (_, dev) = dev.split(':', 1)
1262 try:
1263 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1264 except ValueError:
1265 dev_name = dev
1266 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1267 sxprs.append([dev_num, dev_info])
1268 else:
1269 sxprs.append([dev_num, dev_info])
1270 dev_num += 1
1271 return sxprs
1273 def getBlockDeviceClass(self, devid):
1274 # To get a device number from the devid,
1275 # we temporarily use the device controller of VBD.
1276 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1277 dev_info = self._getDeviceInfo_vbd(dev)
1278 if dev_info:
1279 return dev_info[0]
1281 def _getDeviceInfo_vif(self, mac):
1282 for dev_type, dev_info in self.info.all_devices_sxpr():
1283 if dev_type != 'vif':
1284 continue
1285 if mac == sxp.child_value(dev_info, 'mac'):
1286 return dev_info
1288 def _getDeviceInfo_vbd(self, devid):
1289 for dev_type, dev_info in self.info.all_devices_sxpr():
1290 if dev_type != 'vbd' and dev_type != 'tap':
1291 continue
1292 dev = sxp.child_value(dev_info, 'dev')
1293 dev = dev.split(':')[0]
1294 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1295 if devid == dev:
1296 return dev_info
1298 def _getDeviceInfo_pci(self, devid):
1299 for dev_type, dev_info in self.info.all_devices_sxpr():
1300 if dev_type != 'pci':
1301 continue
1302 return dev_info
1303 return None
1305 def _getDeviceInfo_vscsi(self, devid):
1306 devid = int(devid)
1307 for dev_type, dev_info in self.info.all_devices_sxpr():
1308 if dev_type != 'vscsi':
1309 continue
1310 devs = sxp.children(dev_info, 'dev')
1311 if devid == int(sxp.child_value(devs[0], 'devid')):
1312 return dev_info
1313 return None
1315 def _get_assigned_pci_devices(self, devid = 0):
1316 if self.domid is not None:
1317 return get_assigned_pci_devices(self.domid)
1319 dev_str_list = []
1320 dev_info = self._getDeviceInfo_pci(devid)
1321 if dev_info is None:
1322 return dev_str_list
1323 dev_uuid = sxp.child_value(dev_info, 'uuid')
1324 pci_conf = self.info['devices'][dev_uuid][1]
1325 pci_devs = pci_conf['devs']
1326 for pci_dev in pci_devs:
1327 domain = int(pci_dev['domain'], 16)
1328 bus = int(pci_dev['bus'], 16)
1329 slot = int(pci_dev['slot'], 16)
1330 func = int(pci_dev['func'], 16)
1331 dev_str = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
1332 dev_str_list = dev_str_list + [dev_str]
1333 return dev_str_list
1335 def setMemoryTarget(self, target):
1336 """Set the memory target of this domain.
1337 @param target: In MiB.
1338 """
1339 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1340 self.info['name_label'], str(self.domid), target)
1342 MiB = 1024 * 1024
1343 memory_cur = self.get_memory_dynamic_max() / MiB
1345 if self.domid == 0:
1346 dom0_min_mem = xoptions.get_dom0_min_mem()
1347 if target < memory_cur and dom0_min_mem > target:
1348 raise XendError("memory_dynamic_max too small")
1350 self._safe_set_memory('memory_dynamic_min', target * MiB)
1351 self._safe_set_memory('memory_dynamic_max', target * MiB)
1353 if self.domid >= 0:
1354 if target > memory_cur:
1355 balloon.free((target - memory_cur) * 1024, self)
1356 self.storeVm("memory", target)
1357 self.storeDom("memory/target", target << 10)
1358 xc.domain_set_target_mem(self.domid,
1359 (target * 1024))
1360 xen.xend.XendDomain.instance().managed_config_save(self)
1362 def setMemoryMaximum(self, limit):
1363 """Set the maximum memory limit of this domain
1364 @param limit: In MiB.
1365 """
1366 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1367 self.info['name_label'], str(self.domid), limit)
1369 maxmem_cur = self.get_memory_static_max()
1370 MiB = 1024 * 1024
1371 self._safe_set_memory('memory_static_max', limit * MiB)
1373 if self.domid >= 0:
1374 maxmem = int(limit) * 1024
1375 try:
1376 return xc.domain_setmaxmem(self.domid, maxmem)
1377 except Exception, ex:
1378 self._safe_set_memory('memory_static_max', maxmem_cur)
1379 raise XendError(str(ex))
1380 xen.xend.XendDomain.instance().managed_config_save(self)
1383 def getVCPUInfo(self):
1384 try:
1385 # We include the domain name and ID, to help xm.
1386 sxpr = ['domain',
1387 ['domid', self.domid],
1388 ['name', self.info['name_label']],
1389 ['vcpu_count', self.info['VCPUs_max']]]
1391 for i in range(0, self.info['VCPUs_max']):
1392 if self.domid is not None:
1393 info = xc.vcpu_getinfo(self.domid, i)
1395 sxpr.append(['vcpu',
1396 ['number', i],
1397 ['online', info['online']],
1398 ['blocked', info['blocked']],
1399 ['running', info['running']],
1400 ['cpu_time', info['cpu_time'] / 1e9],
1401 ['cpu', info['cpu']],
1402 ['cpumap', info['cpumap']]])
1403 else:
1404 sxpr.append(['vcpu',
1405 ['number', i],
1406 ['online', 0],
1407 ['blocked', 0],
1408 ['running', 0],
1409 ['cpu_time', 0.0],
1410 ['cpu', -1],
1411 ['cpumap', self.info['cpus'][i] and \
1412 self.info['cpus'][i] or range(64)]])
1414 return sxpr
1416 except RuntimeError, exn:
1417 raise XendError(str(exn))
1420 def getDomInfo(self):
1421 return dom_get(self.domid)
1424 # internal functions ... TODO: re-categorised
1427 def _augmentInfo(self, priv):
1428 """Augment self.info, as given to us through L{recreate}, with
1429 values taken from the store. This recovers those values known
1430 to xend but not to the hypervisor.
1431 """
1432 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1433 if priv:
1434 augment_entries.remove('memory')
1435 augment_entries.remove('maxmem')
1436 augment_entries.remove('vcpus')
1437 augment_entries.remove('vcpu_avail')
1439 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1440 for k in augment_entries])
1442 # make returned lists into a dictionary
1443 vm_config = dict(zip(augment_entries, vm_config))
1445 for arg in augment_entries:
1446 val = vm_config[arg]
1447 if val != None:
1448 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1449 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1450 self.info[xapiarg] = val
1451 elif arg == "memory":
1452 self.info["static_memory_min"] = val
1453 elif arg == "maxmem":
1454 self.info["static_memory_max"] = val
1455 else:
1456 self.info[arg] = val
1458 # read CPU Affinity
1459 self.info['cpus'] = []
1460 vcpus_info = self.getVCPUInfo()
1461 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1462 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1464 # For dom0, we ignore any stored value for the vcpus fields, and
1465 # read the current value from Xen instead. This allows boot-time
1466 # settings to take precedence over any entries in the store.
1467 if priv:
1468 xeninfo = dom_get(self.domid)
1469 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1470 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1472 # read image value
1473 image_sxp = self._readVm('image')
1474 if image_sxp:
1475 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1477 # read devices
1478 devices = []
1479 for devclass in XendDevices.valid_devices():
1480 devconfig = self.getDeviceController(devclass).configurations()
1481 if devconfig:
1482 devices.extend(devconfig)
1484 if not self.info['devices'] and devices is not None:
1485 for device in devices:
1486 self.info.device_add(device[0], cfg_sxp = device)
1488 self._update_consoles()
1490 def _update_consoles(self, transaction = None):
1491 if self.domid == None or self.domid == 0:
1492 return
1494 # Update VT100 port if it exists
1495 if transaction is None:
1496 self.console_port = self.readDom('console/port')
1497 else:
1498 self.console_port = self.readDomTxn(transaction, 'console/port')
1499 if self.console_port is not None:
1500 serial_consoles = self.info.console_get_all('vt100')
1501 if not serial_consoles:
1502 cfg = self.info.console_add('vt100', self.console_port)
1503 self._createDevice('console', cfg)
1504 else:
1505 console_uuid = serial_consoles[0].get('uuid')
1506 self.info.console_update(console_uuid, 'location',
1507 self.console_port)
1510 # Update VNC port if it exists and write to xenstore
1511 if transaction is None:
1512 vnc_port = self.readDom('console/vnc-port')
1513 else:
1514 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1515 if vnc_port is not None:
1516 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1517 if dev_type == 'vfb':
1518 old_location = dev_info.get('location')
1519 listen_host = dev_info.get('vnclisten', \
1520 XendOptions.instance().get_vnclisten_address())
1521 new_location = '%s:%s' % (listen_host, str(vnc_port))
1522 if old_location == new_location:
1523 break
1525 dev_info['location'] = new_location
1526 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1527 vfb_ctrl = self.getDeviceController('vfb')
1528 vfb_ctrl.reconfigureDevice(0, dev_info)
1529 break
1532 # Function to update xenstore /vm/*
1535 def _readVm(self, *args):
1536 return xstransact.Read(self.vmpath, *args)
1538 def _writeVm(self, *args):
1539 return xstransact.Write(self.vmpath, *args)
1541 def _removeVm(self, *args):
1542 return xstransact.Remove(self.vmpath, *args)
1544 def _gatherVm(self, *args):
1545 return xstransact.Gather(self.vmpath, *args)
1547 def _listRecursiveVm(self, *args):
1548 return xstransact.ListRecursive(self.vmpath, *args)
1550 def storeVm(self, *args):
1551 return xstransact.Store(self.vmpath, *args)
1553 def permissionsVm(self, *args):
1554 return xstransact.SetPermissions(self.vmpath, *args)
1557 # Function to update xenstore /dom/*
1560 def readDom(self, *args):
1561 return xstransact.Read(self.dompath, *args)
1563 def gatherDom(self, *args):
1564 return xstransact.Gather(self.dompath, *args)
1566 def _writeDom(self, *args):
1567 return xstransact.Write(self.dompath, *args)
1569 def _removeDom(self, *args):
1570 return xstransact.Remove(self.dompath, *args)
1572 def storeDom(self, *args):
1573 return xstransact.Store(self.dompath, *args)
1576 def readDomTxn(self, transaction, *args):
1577 paths = map(lambda x: self.dompath + "/" + x, args)
1578 return transaction.read(*paths)
1580 def gatherDomTxn(self, transaction, *args):
1581 paths = map(lambda x: self.dompath + "/" + x, args)
1582 return transaction.gather(*paths)
1584 def _writeDomTxn(self, transaction, *args):
1585 paths = map(lambda x: self.dompath + "/" + x, args)
1586 return transaction.write(*paths)
1588 def _removeDomTxn(self, transaction, *args):
1589 paths = map(lambda x: self.dompath + "/" + x, args)
1590 return transaction.remove(*paths)
1592 def storeDomTxn(self, transaction, *args):
1593 paths = map(lambda x: self.dompath + "/" + x, args)
1594 return transaction.store(*paths)
1597 def _recreateDom(self):
1598 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1600 def _recreateDomFunc(self, t):
1601 t.remove()
1602 t.mkdir()
1603 t.set_permissions({'dom' : self.domid, 'read' : True})
1604 t.write('vm', self.vmpath)
1605 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1606 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1607 t.mkdir(i)
1608 t.set_permissions(i, {'dom' : self.domid})
1610 def _storeDomDetails(self):
1611 to_store = {
1612 'domid': str(self.domid),
1613 'vm': self.vmpath,
1614 'name': self.info['name_label'],
1615 'console/limit': str(xoptions.get_console_limit() * 1024),
1616 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1619 def f(n, v):
1620 if v is not None:
1621 if type(v) == bool:
1622 to_store[n] = v and "1" or "0"
1623 else:
1624 to_store[n] = str(v)
1626 # Figure out if we need to tell xenconsoled to ignore this guest's
1627 # console - device model will handle console if it is running
1628 constype = "ioemu"
1629 if 'device_model' not in self.info['platform']:
1630 constype = "xenconsoled"
1632 f('console/port', self.console_port)
1633 f('console/ring-ref', self.console_mfn)
1634 f('console/type', constype)
1635 f('store/port', self.store_port)
1636 f('store/ring-ref', self.store_mfn)
1638 if arch.type == "x86":
1639 f('control/platform-feature-multiprocessor-suspend', True)
1641 # elfnotes
1642 for n, v in self.info.get_notes().iteritems():
1643 n = n.lower().replace('_', '-')
1644 if n == 'features':
1645 for v in v.split('|'):
1646 v = v.replace('_', '-')
1647 if v.startswith('!'):
1648 f('image/%s/%s' % (n, v[1:]), False)
1649 else:
1650 f('image/%s/%s' % (n, v), True)
1651 else:
1652 f('image/%s' % n, v)
1654 if self.info.has_key('security_label'):
1655 f('security_label', self.info['security_label'])
1657 to_store.update(self._vcpuDomDetails())
1659 log.debug("Storing domain details: %s", scrub_password(to_store))
1661 self._writeDom(to_store)
1663 def _vcpuDomDetails(self):
1664 def availability(n):
1665 if self.info['vcpu_avail'] & (1 << n):
1666 return 'online'
1667 else:
1668 return 'offline'
1670 result = {}
1671 for v in range(0, self.info['VCPUs_max']):
1672 result["cpu/%d/availability" % v] = availability(v)
1673 return result
1676 # xenstore watches
1679 def _registerWatches(self):
1680 """Register a watch on this VM's entries in the store, and the
1681 domain's control/shutdown node, so that when they are changed
1682 externally, we keep up to date. This should only be called by {@link
1683 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1684 details have been written, but before the new instance is returned."""
1685 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1686 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1687 self._handleShutdownWatch)
1689 def _storeChanged(self, _):
1690 log.trace("XendDomainInfo.storeChanged");
1692 changed = False
1694 # Check whether values in the configuration have
1695 # changed in Xenstore.
1697 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1698 'rtc/timeoffset']
1700 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1701 for k in cfg_vm])
1703 # convert two lists into a python dictionary
1704 vm_details = dict(zip(cfg_vm, vm_details))
1706 for arg, val in vm_details.items():
1707 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1708 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1709 if val != None and val != self.info[xapiarg]:
1710 self.info[xapiarg] = val
1711 changed = True
1712 elif arg == "memory":
1713 if val != None and val != self.info["static_memory_min"]:
1714 self.info["static_memory_min"] = val
1715 changed = True
1716 elif arg == "maxmem":
1717 if val != None and val != self.info["static_memory_max"]:
1718 self.info["static_memory_max"] = val
1719 changed = True
1721 # Check whether image definition has been updated
1722 image_sxp = self._readVm('image')
1723 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1724 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1725 changed = True
1727 # Update the rtc_timeoffset to be preserved across reboot.
1728 # NB. No need to update xenstore domain section.
1729 val = int(vm_details.get("rtc/timeoffset", 0))
1730 self.info["platform"]["rtc_timeoffset"] = val
1732 if changed:
1733 # Update the domain section of the store, as this contains some
1734 # parameters derived from the VM configuration.
1735 self.refresh_shutdown_lock.acquire()
1736 try:
1737 state = self._stateGet()
1738 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1739 self._storeDomDetails()
1740 finally:
1741 self.refresh_shutdown_lock.release()
1743 return 1
1745 def _handleShutdownWatch(self, _):
1746 log.debug('XendDomainInfo.handleShutdownWatch')
1748 reason = self.readDom('control/shutdown')
1750 if reason and reason != 'suspend':
1751 sst = self.readDom('xend/shutdown_start_time')
1752 now = time.time()
1753 if sst:
1754 self.shutdownStartTime = float(sst)
1755 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1756 else:
1757 self.shutdownStartTime = now
1758 self.storeDom('xend/shutdown_start_time', now)
1759 timeout = SHUTDOWN_TIMEOUT
1761 log.trace(
1762 "Scheduling refreshShutdown on domain %d in %ds.",
1763 self.domid, timeout)
1764 threading.Timer(timeout, self.refreshShutdown).start()
1766 return True
1770 # Public Attributes for the VM
1774 def getDomid(self):
1775 return self.domid
1777 def setName(self, name, to_store = True):
1778 self._checkName(name)
1779 self.info['name_label'] = name
1780 if to_store:
1781 self.storeVm("name", name)
1783 def getName(self):
1784 return self.info['name_label']
1786 def getDomainPath(self):
1787 return self.dompath
1789 def getShutdownReason(self):
1790 return self.readDom('control/shutdown')
1792 def getStorePort(self):
1793 """For use only by image.py and XendCheckpoint.py."""
1794 return self.store_port
1796 def getConsolePort(self):
1797 """For use only by image.py and XendCheckpoint.py"""
1798 return self.console_port
1800 def getFeatures(self):
1801 """For use only by image.py."""
1802 return self.info['features']
1804 def getVCpuCount(self):
1805 return self.info['VCPUs_max']
1807 def setVCpuCount(self, vcpus):
1808 def vcpus_valid(n):
1809 if vcpus <= 0:
1810 raise XendError('Zero or less VCPUs is invalid')
1811 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1812 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1813 vcpus_valid(vcpus)
1815 self.info['vcpu_avail'] = (1 << vcpus) - 1
1816 if self.domid >= 0:
1817 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1818 self._writeDom(self._vcpuDomDetails())
1819 self.info['VCPUs_live'] = vcpus
1820 else:
1821 if self.info['VCPUs_max'] > vcpus:
1822 # decreasing
1823 del self.info['cpus'][vcpus:]
1824 elif self.info['VCPUs_max'] < vcpus:
1825 # increasing
1826 for c in range(self.info['VCPUs_max'], vcpus):
1827 self.info['cpus'].append(list())
1828 self.info['VCPUs_max'] = vcpus
1829 xen.xend.XendDomain.instance().managed_config_save(self)
1830 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1831 vcpus)
1833 def getMemoryTarget(self):
1834 """Get this domain's target memory size, in KB."""
1835 return self.info['memory_dynamic_max'] / 1024
1837 def getMemoryMaximum(self):
1838 """Get this domain's maximum memory size, in KB."""
1839 # remember, info now stores memory in bytes
1840 return self.info['memory_static_max'] / 1024
1842 def getResume(self):
1843 return str(self._resume)
1845 def setResume(self, isresume):
1846 self._resume = isresume
1848 def getCpus(self):
1849 return self.info['cpus']
1851 def setCpus(self, cpumap):
1852 self.info['cpus'] = cpumap
1854 def getCap(self):
1855 return self.info['vcpus_params']['cap']
1857 def setCap(self, cpu_cap):
1858 self.info['vcpus_params']['cap'] = cpu_cap
1860 def getWeight(self):
1861 return self.info['vcpus_params']['weight']
1863 def setWeight(self, cpu_weight):
1864 self.info['vcpus_params']['weight'] = cpu_weight
1866 def getRestartCount(self):
1867 return self._readVm('xend/restart_count')
1869 def refreshShutdown(self, xeninfo = None):
1870 """ Checks the domain for whether a shutdown is required.
1872 Called from XendDomainInfo and also image.py for HVM images.
1873 """
1875 # If set at the end of this method, a restart is required, with the
1876 # given reason. This restart has to be done out of the scope of
1877 # refresh_shutdown_lock.
1878 restart_reason = None
1880 self.refresh_shutdown_lock.acquire()
1881 try:
1882 if xeninfo is None:
1883 xeninfo = dom_get(self.domid)
1884 if xeninfo is None:
1885 # The domain no longer exists. This will occur if we have
1886 # scheduled a timer to check for shutdown timeouts and the
1887 # shutdown succeeded. It will also occur if someone
1888 # destroys a domain beneath us. We clean up the domain,
1889 # just in case, but we can't clean up the VM, because that
1890 # VM may have migrated to a different domain on this
1891 # machine.
1892 self.cleanupDomain()
1893 self._stateSet(DOM_STATE_HALTED)
1894 return
1896 if xeninfo['dying']:
1897 # Dying means that a domain has been destroyed, but has not
1898 # yet been cleaned up by Xen. This state could persist
1899 # indefinitely if, for example, another domain has some of its
1900 # pages mapped. We might like to diagnose this problem in the
1901 # future, but for now all we do is make sure that it's not us
1902 # holding the pages, by calling cleanupDomain. We can't
1903 # clean up the VM, as above.
1904 self.cleanupDomain()
1905 self._stateSet(DOM_STATE_SHUTDOWN)
1906 return
1908 elif xeninfo['crashed']:
1909 if self.readDom('xend/shutdown_completed'):
1910 # We've seen this shutdown already, but we are preserving
1911 # the domain for debugging. Leave it alone.
1912 return
1914 log.warn('Domain has crashed: name=%s id=%d.',
1915 self.info['name_label'], self.domid)
1916 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1918 restart_reason = 'crash'
1919 self._stateSet(DOM_STATE_HALTED)
1921 elif xeninfo['shutdown']:
1922 self._stateSet(DOM_STATE_SHUTDOWN)
1923 if self.readDom('xend/shutdown_completed'):
1924 # We've seen this shutdown already, but we are preserving
1925 # the domain for debugging. Leave it alone.
1926 return
1928 else:
1929 reason = shutdown_reason(xeninfo['shutdown_reason'])
1931 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1932 self.info['name_label'], self.domid, reason)
1933 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1935 self._clearRestart()
1937 if reason == 'suspend':
1938 self._stateSet(DOM_STATE_SUSPENDED)
1939 # Don't destroy the domain. XendCheckpoint will do
1940 # this once it has finished. However, stop watching
1941 # the VM path now, otherwise we will end up with one
1942 # watch for the old domain, and one for the new.
1943 self._unwatchVm()
1944 elif reason in ('poweroff', 'reboot'):
1945 restart_reason = reason
1946 else:
1947 self.destroy()
1949 elif self.dompath is None:
1950 # We have yet to manage to call introduceDomain on this
1951 # domain. This can happen if a restore is in progress, or has
1952 # failed. Ignore this domain.
1953 pass
1954 else:
1955 # Domain is alive. If we are shutting it down, log a message
1956 # if it seems unresponsive.
1957 if xeninfo['paused']:
1958 self._stateSet(DOM_STATE_PAUSED)
1959 else:
1960 self._stateSet(DOM_STATE_RUNNING)
1962 if self.shutdownStartTime:
1963 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1964 self.shutdownStartTime)
1965 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1966 log.info(
1967 "Domain shutdown timeout expired: name=%s id=%s",
1968 self.info['name_label'], self.domid)
1969 self.storeDom('xend/unresponsive', 'True')
1970 finally:
1971 self.refresh_shutdown_lock.release()
1973 if restart_reason and not self.restart_in_progress:
1974 self.restart_in_progress = True
1975 threading.Thread(target = self._maybeRestart,
1976 args = (restart_reason,)).start()
1980 # Restart functions - handling whether we come back up on shutdown.
1983 def _clearRestart(self):
1984 self._removeDom("xend/shutdown_start_time")
1986 def _maybeDumpCore(self, reason):
1987 if reason == 'crash':
1988 if xoptions.get_enable_dump() or self.get_on_crash() \
1989 in ['coredump_and_destroy', 'coredump_and_restart']:
1990 try:
1991 self.dumpCore()
1992 except XendError:
1993 # This error has been logged -- there's nothing more
1994 # we can do in this context.
1995 pass
1997 def _maybeRestart(self, reason):
1998 # Before taking configured action, dump core if configured to do so.
2000 self._maybeDumpCore(reason)
2002 # Dispatch to the correct method based upon the configured on_{reason}
2003 # behaviour.
2004 actions = {"destroy" : self.destroy,
2005 "restart" : self._restart,
2006 "preserve" : self._preserve,
2007 "rename-restart" : self._renameRestart,
2008 "coredump-destroy" : self.destroy,
2009 "coredump-restart" : self._restart}
2011 action_conf = {
2012 'poweroff': 'actions_after_shutdown',
2013 'reboot': 'actions_after_reboot',
2014 'crash': 'actions_after_crash',
2017 action_target = self.info.get(action_conf.get(reason))
2018 func = actions.get(action_target, None)
2019 if func and callable(func):
2020 func()
2021 else:
2022 self.destroy() # default to destroy
2024 def _renameRestart(self):
2025 self._restart(True)
2027 def _restart(self, rename = False):
2028 """Restart the domain after it has exited.
2030 @param rename True if the old domain is to be renamed and preserved,
2031 False if it is to be destroyed.
2032 """
2033 from xen.xend import XendDomain
2035 if self._readVm(RESTART_IN_PROGRESS):
2036 log.error('Xend failed during restart of domain %s. '
2037 'Refusing to restart to avoid loops.',
2038 str(self.domid))
2039 self.destroy()
2040 return
2042 old_domid = self.domid
2043 self._writeVm(RESTART_IN_PROGRESS, 'True')
2045 elapse = time.time() - self.info['start_time']
2046 if elapse < MINIMUM_RESTART_TIME:
2047 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2048 'Refusing to restart to avoid loops.',
2049 self.info['name_label'], elapse)
2050 self.destroy()
2051 return
2053 prev_vm_xend = self._listRecursiveVm('xend')
2054 new_dom_info = self.info
2055 try:
2056 if rename:
2057 new_dom_info = self._preserveForRestart()
2058 else:
2059 self._unwatchVm()
2060 self.destroy()
2062 # new_dom's VM will be the same as this domain's VM, except where
2063 # the rename flag has instructed us to call preserveForRestart.
2064 # In that case, it is important that we remove the
2065 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2066 # once the new one is available.
2068 new_dom = None
2069 try:
2070 new_dom = XendDomain.instance().domain_create_from_dict(
2071 new_dom_info)
2072 for x in prev_vm_xend[0][1]:
2073 new_dom._writeVm('xend/%s' % x[0], x[1])
2074 new_dom.waitForDevices()
2075 new_dom.unpause()
2076 rst_cnt = new_dom._readVm('xend/restart_count')
2077 rst_cnt = int(rst_cnt) + 1
2078 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2079 new_dom._removeVm(RESTART_IN_PROGRESS)
2080 except:
2081 if new_dom:
2082 new_dom._removeVm(RESTART_IN_PROGRESS)
2083 new_dom.destroy()
2084 else:
2085 self._removeVm(RESTART_IN_PROGRESS)
2086 raise
2087 except:
2088 log.exception('Failed to restart domain %s.', str(old_domid))
2090 def _preserveForRestart(self):
2091 """Preserve a domain that has been shut down, by giving it a new UUID,
2092 cloning the VM details, and giving it a new name. This allows us to
2093 keep this domain for debugging, but restart a new one in its place
2094 preserving the restart semantics (name and UUID preserved).
2095 """
2097 new_uuid = uuid.createString()
2098 new_name = 'Domain-%s' % new_uuid
2099 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2100 self.info['name_label'], self.domid, self.info['uuid'],
2101 new_name, new_uuid)
2102 self._unwatchVm()
2103 self._releaseDevices()
2104 # Remove existing vm node in xenstore
2105 self._removeVm()
2106 new_dom_info = self.info.copy()
2107 new_dom_info['name_label'] = self.info['name_label']
2108 new_dom_info['uuid'] = self.info['uuid']
2109 self.info['name_label'] = new_name
2110 self.info['uuid'] = new_uuid
2111 self.vmpath = XS_VMROOT + new_uuid
2112 # Write out new vm node to xenstore
2113 self._storeVmDetails()
2114 self._preserve()
2115 return new_dom_info
2118 def _preserve(self):
2119 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2120 self.domid)
2121 self._unwatchVm()
2122 self.storeDom('xend/shutdown_completed', 'True')
2123 self._stateSet(DOM_STATE_HALTED)
2126 # Debugging ..
2129 def dumpCore(self, corefile = None):
2130 """Create a core dump for this domain.
2132 @raise: XendError if core dumping failed.
2133 """
2135 if not corefile:
2136 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2137 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2138 self.info['name_label'], self.domid)
2140 if os.path.isdir(corefile):
2141 raise XendError("Cannot dump core in a directory: %s" %
2142 corefile)
2144 try:
2145 try:
2146 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2147 xc.domain_dumpcore(self.domid, corefile)
2148 except RuntimeError, ex:
2149 corefile_incomp = corefile+'-incomplete'
2150 try:
2151 os.rename(corefile, corefile_incomp)
2152 except:
2153 pass
2155 log.error("core dump failed: id = %s name = %s: %s",
2156 self.domid, self.info['name_label'], str(ex))
2157 raise XendError("Failed to dump core: %s" % str(ex))
2158 finally:
2159 self._removeVm(DUMPCORE_IN_PROGRESS)
2162 # Device creation/deletion functions
2165 def _createDevice(self, deviceClass, devConfig):
2166 return self.getDeviceController(deviceClass).createDevice(devConfig)
2168 def _waitForDevice(self, deviceClass, devid):
2169 return self.getDeviceController(deviceClass).waitForDevice(devid)
2171 def _waitForDeviceUUID(self, dev_uuid):
2172 deviceClass, config = self.info['devices'].get(dev_uuid)
2173 self._waitForDevice(deviceClass, config['devid'])
2175 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2176 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2177 devid, backpath)
2179 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2180 return self.getDeviceController(deviceClass).reconfigureDevice(
2181 devid, devconfig)
2183 def _createDevices(self):
2184 """Create the devices for a vm.
2186 @raise: VmError for invalid devices
2187 """
2188 if self.image:
2189 self.image.prepareEnvironment()
2191 vscsi_uuidlist = {}
2192 vscsi_devidlist = []
2193 ordered_refs = self.info.ordered_device_refs()
2194 for dev_uuid in ordered_refs:
2195 devclass, config = self.info['devices'][dev_uuid]
2196 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2197 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2198 dev_uuid = config.get('uuid')
2199 devid = self._createDevice(devclass, config)
2201 # store devid in XendConfig for caching reasons
2202 if dev_uuid in self.info['devices']:
2203 self.info['devices'][dev_uuid][1]['devid'] = devid
2205 elif devclass == 'vscsi':
2206 vscsi_config = config.get('devs', [])[0]
2207 devid = vscsi_config.get('devid', '')
2208 dev_uuid = config.get('uuid')
2209 vscsi_uuidlist[devid] = dev_uuid
2210 vscsi_devidlist.append(devid)
2212 #It is necessary to sorted it for /dev/sdxx in guest.
2213 if len(vscsi_uuidlist) > 0:
2214 vscsi_devidlist.sort()
2215 for vscsiid in vscsi_devidlist:
2216 dev_uuid = vscsi_uuidlist[vscsiid]
2217 devclass, config = self.info['devices'][dev_uuid]
2218 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2219 dev_uuid = config.get('uuid')
2220 devid = self._createDevice(devclass, config)
2221 # store devid in XendConfig for caching reasons
2222 if dev_uuid in self.info['devices']:
2223 self.info['devices'][dev_uuid][1]['devid'] = devid
2226 if self.image:
2227 self.image.createDeviceModel()
2229 #if have pass-through devs, need the virtual pci slots info from qemu
2230 self.sync_pcidev_info()
2232 def _releaseDevices(self, suspend = False):
2233 """Release all domain's devices. Nothrow guarantee."""
2234 if self.image:
2235 try:
2236 log.debug("Destroying device model")
2237 self.image.destroyDeviceModel()
2238 except Exception, e:
2239 log.exception("Device model destroy failed %s" % str(e))
2240 else:
2241 log.debug("No device model")
2243 log.debug("Releasing devices")
2244 t = xstransact("%s/device" % self.dompath)
2245 try:
2246 for devclass in XendDevices.valid_devices():
2247 for dev in t.list(devclass):
2248 try:
2249 true_devclass = devclass
2250 if devclass == 'vbd':
2251 # In the case of "vbd", the true device class
2252 # may possibly be "tap". Just in case, verify
2253 # device class.
2254 devid = dev.split('/')[-1]
2255 true_devclass = self.getBlockDeviceClass(devid)
2256 log.debug("Removing %s", dev);
2257 self.destroyDevice(true_devclass, dev, False);
2258 except:
2259 # Log and swallow any exceptions in removal --
2260 # there's nothing more we can do.
2261 log.exception("Device release failed: %s; %s; %s",
2262 self.info['name_label'],
2263 true_devclass, dev)
2264 finally:
2265 t.abort()
2267 def getDeviceController(self, name):
2268 """Get the device controller for this domain, and if it
2269 doesn't exist, create it.
2271 @param name: device class name
2272 @type name: string
2273 @rtype: subclass of DevController
2274 """
2275 if name not in self._deviceControllers:
2276 devController = XendDevices.make_controller(name, self)
2277 if not devController:
2278 raise XendError("Unknown device type: %s" % name)
2279 self._deviceControllers[name] = devController
2281 return self._deviceControllers[name]
2284 # Migration functions (public)
2287 def testMigrateDevices(self, network, dst):
2288 """ Notify all device about intention of migration
2289 @raise: XendError for a device that cannot be migrated
2290 """
2291 for (n, c) in self.info.all_devices_sxpr():
2292 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2293 if rc != 0:
2294 raise XendError("Device of type '%s' refuses migration." % n)
2296 def migrateDevices(self, network, dst, step, domName=''):
2297 """Notify the devices about migration
2298 """
2299 ctr = 0
2300 try:
2301 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2302 self.migrateDevice(dev_type, dev_conf, network, dst,
2303 step, domName)
2304 ctr = ctr + 1
2305 except:
2306 for dev_type, dev_conf in self.info.all_devices_sxpr():
2307 if ctr == 0:
2308 step = step - 1
2309 ctr = ctr - 1
2310 self._recoverMigrateDevice(dev_type, dev_conf, network,
2311 dst, step, domName)
2312 raise
2314 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2315 step, domName=''):
2316 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2317 network, dst, step, domName)
2319 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2320 dst, step, domName=''):
2321 return self.getDeviceController(deviceClass).recover_migrate(
2322 deviceConfig, network, dst, step, domName)
2325 ## private:
2327 def _constructDomain(self):
2328 """Construct the domain.
2330 @raise: VmError on error
2331 """
2333 log.debug('XendDomainInfo.constructDomain')
2335 self.shutdownStartTime = None
2336 self.restart_in_progress = False
2338 hap = 0
2339 hvm = self.info.is_hvm()
2340 if hvm:
2341 hap = self.info.is_hap()
2342 info = xc.xeninfo()
2343 if 'hvm' not in info['xen_caps']:
2344 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2345 "supported by your CPU and enabled in your "
2346 "BIOS?")
2348 # Hack to pre-reserve some memory for initial domain creation.
2349 # There is an implicit memory overhead for any domain creation. This
2350 # overhead is greater for some types of domain than others. For
2351 # example, an x86 HVM domain will have a default shadow-pagetable
2352 # allocation of 1MB. We free up 4MB here to be on the safe side.
2353 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2354 balloon.free(4*1024, self) # 4MB should be plenty
2356 ssidref = 0
2357 if security.on() == xsconstants.XS_POLICY_USE:
2358 ssidref = security.calc_dom_ssidref_from_info(self.info)
2359 if security.has_authorization(ssidref) == False:
2360 raise VmError("VM is not authorized to run.")
2362 s3_integrity = 0
2363 if self.info.has_key('s3_integrity'):
2364 s3_integrity = self.info['s3_integrity']
2365 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2367 try:
2368 self.domid = xc.domain_create(
2369 domid = 0,
2370 ssidref = ssidref,
2371 handle = uuid.fromString(self.info['uuid']),
2372 flags = flags,
2373 target = self.info.target())
2374 except Exception, e:
2375 # may get here if due to ACM the operation is not permitted
2376 if security.on() == xsconstants.XS_POLICY_ACM:
2377 raise VmError('Domain in conflict set with running domain?')
2379 if self.domid < 0:
2380 raise VmError('Creating domain failed: name=%s' %
2381 self.info['name_label'])
2383 self.dompath = GetDomainPath(self.domid)
2385 self._recreateDom()
2387 # Set timer configration of domain
2388 timer_mode = self.info["platform"].get("timer_mode")
2389 if hvm and timer_mode is not None:
2390 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2391 long(timer_mode))
2393 # Set Viridian interface configuration of domain
2394 viridian = self.info["platform"].get("viridian")
2395 if arch.type == "x86" and hvm and viridian is not None:
2396 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2398 # Optionally enable virtual HPET
2399 hpet = self.info["platform"].get("hpet")
2400 if hvm and hpet is not None:
2401 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2402 long(hpet))
2404 # Optionally enable periodic vpt aligning
2405 vpt_align = self.info["platform"].get("vpt_align")
2406 if hvm and vpt_align is not None:
2407 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2408 long(vpt_align))
2410 # Set maximum number of vcpus in domain
2411 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2413 # Check for cpu_{cap|weight} validity for credit scheduler
2414 if XendNode.instance().xenschedinfo() == 'credit':
2415 cap = self.getCap()
2416 weight = self.getWeight()
2418 assert type(weight) == int
2419 assert type(cap) == int
2421 if weight < 1 or weight > 65535:
2422 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2424 if cap < 0 or cap > self.getVCpuCount() * 100:
2425 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2426 (self.getVCpuCount() * 100))
2428 # Test whether the devices can be assigned with VT-d
2429 pci = self.info["platform"].get("pci")
2430 pci_str = ''
2431 if pci and len(pci) > 0:
2432 pci = map(lambda x: x[0:4], pci) # strip options
2433 pci_str = str(pci)
2434 if hvm and pci_str:
2435 bdf = xc.test_assign_device(0, pci_str)
2436 if bdf != 0:
2437 if bdf == -1:
2438 raise VmError("failed to assign device: maybe the platform"
2439 " doesn't support VT-d, or VT-d isn't enabled"
2440 " properly?")
2441 bus = (bdf >> 16) & 0xff
2442 devfn = (bdf >> 8) & 0xff
2443 dev = (devfn >> 3) & 0x1f
2444 func = devfn & 0x7
2445 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2446 " already been assigned to other domain, or maybe"
2447 " it doesn't exist." % (bus, dev, func))
2449 # register the domain in the list
2450 from xen.xend import XendDomain
2451 XendDomain.instance().add_domain(self)
2453 def _introduceDomain(self):
2454 assert self.domid is not None
2455 assert self.store_mfn is not None
2456 assert self.store_port is not None
2458 try:
2459 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2460 except RuntimeError, exn:
2461 raise XendError(str(exn))
2463 def _setTarget(self, target):
2464 assert self.domid is not None
2466 try:
2467 SetTarget(self.domid, target)
2468 self.storeDom('target', target)
2469 except RuntimeError, exn:
2470 raise XendError(str(exn))
2473 def _setCPUAffinity(self):
2474 """ Repin domain vcpus if a restricted cpus list is provided
2475 """
2477 def has_cpus():
2478 if self.info['cpus'] is not None:
2479 for c in self.info['cpus']:
2480 if c:
2481 return True
2482 return False
2484 if has_cpus():
2485 for v in range(0, self.info['VCPUs_max']):
2486 if self.info['cpus'][v]:
2487 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2488 else:
2489 def find_relaxed_node(node_list):
2490 import sys
2491 nr_nodes = info['nr_nodes']
2492 if node_list is None:
2493 node_list = range(0, nr_nodes)
2494 nodeload = [0]
2495 nodeload = nodeload * nr_nodes
2496 from xen.xend import XendDomain
2497 doms = XendDomain.instance().list('all')
2498 for dom in filter (lambda d: d.domid != self.domid, doms):
2499 cpuinfo = dom.getVCPUInfo()
2500 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2501 if sxp.child_value(vcpu, 'online') == 0: continue
2502 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2503 for i in range(0, nr_nodes):
2504 node_cpumask = info['node_to_cpu'][i]
2505 for j in node_cpumask:
2506 if j in cpumap:
2507 nodeload[i] += 1
2508 break
2509 for i in range(0, nr_nodes):
2510 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2511 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2512 else:
2513 nodeload[i] = sys.maxint
2514 index = nodeload.index( min(nodeload) )
2515 return index
2517 info = xc.physinfo()
2518 if info['nr_nodes'] > 1:
2519 node_memory_list = info['node_to_memory']
2520 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2521 candidate_node_list = []
2522 for i in range(0, info['nr_nodes']):
2523 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2524 candidate_node_list.append(i)
2525 index = find_relaxed_node(candidate_node_list)
2526 cpumask = info['node_to_cpu'][index]
2527 for v in range(0, self.info['VCPUs_max']):
2528 xc.vcpu_setaffinity(self.domid, v, cpumask)
2531 def _initDomain(self):
2532 log.debug('XendDomainInfo.initDomain: %s %s',
2533 self.domid,
2534 self.info['vcpus_params']['weight'])
2536 self._configureBootloader()
2538 try:
2539 self.image = image.create(self, self.info)
2541 # repin domain vcpus if a restricted cpus list is provided
2542 # this is done prior to memory allocation to aide in memory
2543 # distribution for NUMA systems.
2544 self._setCPUAffinity()
2546 # Use architecture- and image-specific calculations to determine
2547 # the various headrooms necessary, given the raw configured
2548 # values. maxmem, memory, and shadow are all in KiB.
2549 # but memory_static_max etc are all stored in bytes now.
2550 memory = self.image.getRequiredAvailableMemory(
2551 self.info['memory_dynamic_max'] / 1024)
2552 maxmem = self.image.getRequiredAvailableMemory(
2553 self.info['memory_static_max'] / 1024)
2554 shadow = self.image.getRequiredShadowMemory(
2555 self.info['shadow_memory'] * 1024,
2556 self.info['memory_static_max'] / 1024)
2558 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2559 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2560 # takes MiB and we must not round down and end up under-providing.
2561 shadow = ((shadow + 1023) / 1024) * 1024
2563 # set memory limit
2564 xc.domain_setmaxmem(self.domid, maxmem)
2566 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2567 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2568 # Round vtd_mem up to a multiple of a MiB.
2569 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2571 # Make sure there's enough RAM available for the domain
2572 balloon.free(memory + shadow + vtd_mem, self)
2574 # Set up the shadow memory
2575 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2576 self.info['shadow_memory'] = shadow_cur
2578 # machine address size
2579 if self.info.has_key('machine_address_size'):
2580 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2581 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2583 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2584 log.debug("_initDomain: suppressing spurious page faults")
2585 xc.domain_suppress_spurious_page_faults(self.domid)
2587 self._createChannels()
2589 channel_details = self.image.createImage()
2591 self.store_mfn = channel_details['store_mfn']
2592 if 'console_mfn' in channel_details:
2593 self.console_mfn = channel_details['console_mfn']
2594 if 'notes' in channel_details:
2595 self.info.set_notes(channel_details['notes'])
2596 if 'native_protocol' in channel_details:
2597 self.native_protocol = channel_details['native_protocol'];
2599 self._introduceDomain()
2600 if self.info.target():
2601 self._setTarget(self.info.target())
2603 self._createDevices()
2605 self.image.cleanupBootloading()
2607 self.info['start_time'] = time.time()
2609 self._stateSet(DOM_STATE_RUNNING)
2610 except VmError, exn:
2611 log.exception("XendDomainInfo.initDomain: exception occurred")
2612 if self.image:
2613 self.image.cleanupBootloading()
2614 raise exn
2615 except RuntimeError, exn:
2616 log.exception("XendDomainInfo.initDomain: exception occurred")
2617 if self.image:
2618 self.image.cleanupBootloading()
2619 raise VmError(str(exn))
2622 def cleanupDomain(self):
2623 """Cleanup domain resources; release devices. Idempotent. Nothrow
2624 guarantee."""
2626 self.refresh_shutdown_lock.acquire()
2627 try:
2628 self.unwatchShutdown()
2629 self._releaseDevices()
2630 bootloader_tidy(self)
2632 if self.image:
2633 self.image = None
2635 try:
2636 self._removeDom()
2637 except:
2638 log.exception("Removing domain path failed.")
2640 self._stateSet(DOM_STATE_HALTED)
2641 self.domid = None # Do not push into _stateSet()!
2642 finally:
2643 self.refresh_shutdown_lock.release()
2646 def unwatchShutdown(self):
2647 """Remove the watch on the domain's control/shutdown node, if any.
2648 Idempotent. Nothrow guarantee. Expects to be protected by the
2649 refresh_shutdown_lock."""
2651 try:
2652 try:
2653 if self.shutdownWatch:
2654 self.shutdownWatch.unwatch()
2655 finally:
2656 self.shutdownWatch = None
2657 except:
2658 log.exception("Unwatching control/shutdown failed.")
2660 def waitForShutdown(self):
2661 self.state_updated.acquire()
2662 try:
2663 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2664 self.state_updated.wait(timeout=1.0)
2665 finally:
2666 self.state_updated.release()
2668 def waitForSuspend(self):
2669 """Wait for the guest to respond to a suspend request by
2670 shutting down. If the guest hasn't re-written control/shutdown
2671 after a certain amount of time, it's obviously not listening and
2672 won't suspend, so we give up. HVM guests with no PV drivers
2673 should already be shutdown.
2674 """
2675 state = "suspend"
2676 nr_tries = 60
2678 self.state_updated.acquire()
2679 try:
2680 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2681 self.state_updated.wait(1.0)
2682 if state == "suspend":
2683 if nr_tries == 0:
2684 msg = ('Timeout waiting for domain %s to suspend'
2685 % self.domid)
2686 self._writeDom('control/shutdown', '')
2687 raise XendError(msg)
2688 state = self.readDom('control/shutdown')
2689 nr_tries -= 1
2690 finally:
2691 self.state_updated.release()
2694 # TODO: recategorise - called from XendCheckpoint
2697 def completeRestore(self, store_mfn, console_mfn):
2699 log.debug("XendDomainInfo.completeRestore")
2701 self.store_mfn = store_mfn
2702 self.console_mfn = console_mfn
2704 self._introduceDomain()
2705 self.image = image.create(self, self.info)
2706 if self.image:
2707 self.image.createDeviceModel(True)
2708 self._storeDomDetails()
2709 self._registerWatches()
2710 self.refreshShutdown()
2712 log.debug("XendDomainInfo.completeRestore done")
2715 def _endRestore(self):
2716 self.setResume(False)
2719 # VM Destroy
2722 def _prepare_phantom_paths(self):
2723 # get associated devices to destroy
2724 # build list of phantom devices to be removed after normal devices
2725 plist = []
2726 if self.domid is not None:
2727 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2728 try:
2729 for dev in t.list():
2730 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2731 % (self.dompath, dev))
2732 if backend_phantom_vbd is not None:
2733 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2734 % backend_phantom_vbd)
2735 plist.append(backend_phantom_vbd)
2736 plist.append(frontend_phantom_vbd)
2737 finally:
2738 t.abort()
2739 return plist
2741 def _cleanup_phantom_devs(self, plist):
2742 # remove phantom devices
2743 if not plist == []:
2744 time.sleep(2)
2745 for paths in plist:
2746 if paths.find('backend') != -1:
2747 # Modify online status /before/ updating state (latter is watched by
2748 # drivers, so this ordering avoids a race).
2749 xstransact.Write(paths, 'online', "0")
2750 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2751 # force
2752 xstransact.Remove(paths)
2754 def destroy(self):
2755 """Cleanup VM and destroy domain. Nothrow guarantee."""
2757 if self.domid is None:
2758 return
2760 from xen.xend import XendDomain
2761 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2763 paths = self._prepare_phantom_paths()
2765 if self.dompath is not None:
2766 try:
2767 xc.domain_destroy_hook(self.domid)
2768 xc.domain_pause(self.domid)
2769 do_FLR(self.domid)
2770 xc.domain_destroy(self.domid)
2771 for state in DOM_STATES_OLD:
2772 self.info[state] = 0
2773 self._stateSet(DOM_STATE_HALTED)
2774 except:
2775 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2777 XendDomain.instance().remove_domain(self)
2778 self.cleanupDomain()
2780 self._cleanup_phantom_devs(paths)
2781 self._cleanupVm()
2783 if "transient" in self.info["other_config"] \
2784 and bool(self.info["other_config"]["transient"]):
2785 XendDomain.instance().domain_delete_by_dominfo(self)
2788 def resetDomain(self):
2789 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2791 old_domid = self.domid
2792 prev_vm_xend = self._listRecursiveVm('xend')
2793 new_dom_info = self.info
2794 try:
2795 self._unwatchVm()
2796 self.destroy()
2798 new_dom = None
2799 try:
2800 from xen.xend import XendDomain
2801 new_dom_info['domid'] = None
2802 new_dom = XendDomain.instance().domain_create_from_dict(
2803 new_dom_info)
2804 for x in prev_vm_xend[0][1]:
2805 new_dom._writeVm('xend/%s' % x[0], x[1])
2806 new_dom.waitForDevices()
2807 new_dom.unpause()
2808 except:
2809 if new_dom:
2810 new_dom.destroy()
2811 raise
2812 except:
2813 log.exception('Failed to reset domain %s.', str(old_domid))
2816 def resumeDomain(self):
2817 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2819 # resume a suspended domain (e.g. after live checkpoint, or after
2820 # a later error during save or migate); checks that the domain
2821 # is currently suspended first so safe to call from anywhere
2823 xeninfo = dom_get(self.domid)
2824 if xeninfo is None:
2825 return
2826 if not xeninfo['shutdown']:
2827 return
2828 reason = shutdown_reason(xeninfo['shutdown_reason'])
2829 if reason != 'suspend':
2830 return
2832 try:
2833 # could also fetch a parsed note from xenstore
2834 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2835 if not fast:
2836 self._releaseDevices()
2837 self.testDeviceComplete()
2838 self.testvifsComplete()
2839 log.debug("XendDomainInfo.resumeDomain: devices released")
2841 self._resetChannels()
2843 self._removeDom('control/shutdown')
2844 self._removeDom('device-misc/vif/nextDeviceID')
2846 self._createChannels()
2847 self._introduceDomain()
2848 self._storeDomDetails()
2850 self._createDevices()
2851 log.debug("XendDomainInfo.resumeDomain: devices created")
2853 xc.domain_resume(self.domid, fast)
2854 ResumeDomain(self.domid)
2855 except:
2856 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2857 self.image.resumeDeviceModel()
2858 log.debug("XendDomainInfo.resumeDomain: completed")
2862 # Channels for xenstore and console
2865 def _createChannels(self):
2866 """Create the channels to the domain.
2867 """
2868 self.store_port = self._createChannel()
2869 self.console_port = self._createChannel()
2872 def _createChannel(self):
2873 """Create an event channel to the domain.
2874 """
2875 try:
2876 if self.domid != None:
2877 return xc.evtchn_alloc_unbound(domid = self.domid,
2878 remote_dom = 0)
2879 except:
2880 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2881 raise
2883 def _resetChannels(self):
2884 """Reset all event channels in the domain.
2885 """
2886 try:
2887 if self.domid != None:
2888 return xc.evtchn_reset(dom = self.domid)
2889 except:
2890 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2891 raise
2895 # Bootloader configuration
2898 def _configureBootloader(self):
2899 """Run the bootloader if we're configured to do so."""
2901 blexec = self.info['PV_bootloader']
2902 bootloader_args = self.info['PV_bootloader_args']
2903 kernel = self.info['PV_kernel']
2904 ramdisk = self.info['PV_ramdisk']
2905 args = self.info['PV_args']
2906 boot = self.info['HVM_boot_policy']
2908 if boot:
2909 # HVM booting.
2910 pass
2911 elif not blexec and kernel:
2912 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2913 # will be picked up by image.py.
2914 pass
2915 else:
2916 # Boot using bootloader
2917 if not blexec or blexec == 'pygrub':
2918 blexec = osdep.pygrub_path
2920 blcfg = None
2921 disks = [x for x in self.info['vbd_refs']
2922 if self.info['devices'][x][1]['bootable']]
2924 if not disks:
2925 msg = "Had a bootloader specified, but no disks are bootable"
2926 log.error(msg)
2927 raise VmError(msg)
2929 devinfo = self.info['devices'][disks[0]]
2930 devtype = devinfo[0]
2931 disk = devinfo[1]['uname']
2933 fn = blkdev_uname_to_file(disk)
2934 taptype = blkdev_uname_to_taptype(disk)
2935 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2936 if mounted:
2937 # This is a file, not a device. pygrub can cope with a
2938 # file if it's raw, but if it's QCOW or other such formats
2939 # used through blktap, then we need to mount it first.
2941 log.info("Mounting %s on %s." %
2942 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2944 vbd = {
2945 'mode': 'RO',
2946 'device': BOOTLOADER_LOOPBACK_DEVICE,
2949 from xen.xend import XendDomain
2950 dom0 = XendDomain.instance().privilegedDomain()
2951 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2952 fn = BOOTLOADER_LOOPBACK_DEVICE
2954 try:
2955 blcfg = bootloader(blexec, fn, self, False,
2956 bootloader_args, kernel, ramdisk, args)
2957 finally:
2958 if mounted:
2959 log.info("Unmounting %s from %s." %
2960 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2962 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2964 if blcfg is None:
2965 msg = "Had a bootloader specified, but can't find disk"
2966 log.error(msg)
2967 raise VmError(msg)
2969 self.info.update_with_image_sxp(blcfg, True)
2973 # VM Functions
2976 def _readVMDetails(self, params):
2977 """Read the specified parameters from the store.
2978 """
2979 try:
2980 return self._gatherVm(*params)
2981 except ValueError:
2982 # One of the int/float entries in params has a corresponding store
2983 # entry that is invalid. We recover, because older versions of
2984 # Xend may have put the entry there (memory/target, for example),
2985 # but this is in general a bad situation to have reached.
2986 log.exception(
2987 "Store corrupted at %s! Domain %d's configuration may be "
2988 "affected.", self.vmpath, self.domid)
2989 return []
2991 def _cleanupVm(self):
2992 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2994 self._unwatchVm()
2996 try:
2997 self._removeVm()
2998 except:
2999 log.exception("Removing VM path failed.")
3002 def checkLiveMigrateMemory(self):
3003 """ Make sure there's enough memory to migrate this domain """
3004 overhead_kb = 0
3005 if arch.type == "x86":
3006 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3007 # the minimum that Xen would allocate if no value were given.
3008 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3009 (self.info['memory_static_max'] / 1024 / 1024) * 4
3010 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3011 # The domain might already have some shadow memory
3012 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3013 if overhead_kb > 0:
3014 balloon.free(overhead_kb, self)
3016 def _unwatchVm(self):
3017 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3018 guarantee."""
3019 try:
3020 try:
3021 if self.vmWatch:
3022 self.vmWatch.unwatch()
3023 finally:
3024 self.vmWatch = None
3025 except:
3026 log.exception("Unwatching VM path failed.")
3028 def testDeviceComplete(self):
3029 """ For Block IO migration safety we must ensure that
3030 the device has shutdown correctly, i.e. all blocks are
3031 flushed to disk
3032 """
3033 start = time.time()
3034 while True:
3035 test = 0
3036 diff = time.time() - start
3037 vbds = self.getDeviceController('vbd').deviceIDs()
3038 taps = self.getDeviceController('tap').deviceIDs()
3039 for i in vbds + taps:
3040 test = 1
3041 log.info("Dev %s still active, looping...", i)
3042 time.sleep(0.1)
3044 if test == 0:
3045 break
3046 if diff >= MIGRATE_TIMEOUT:
3047 log.info("Dev still active but hit max loop timeout")
3048 break
3050 def testvifsComplete(self):
3051 """ In case vifs are released and then created for the same
3052 domain, we need to wait the device shut down.
3053 """
3054 start = time.time()
3055 while True:
3056 test = 0
3057 diff = time.time() - start
3058 for i in self.getDeviceController('vif').deviceIDs():
3059 test = 1
3060 log.info("Dev %s still active, looping...", i)
3061 time.sleep(0.1)
3063 if test == 0:
3064 break
3065 if diff >= MIGRATE_TIMEOUT:
3066 log.info("Dev still active but hit max loop timeout")
3067 break
3069 def _storeVmDetails(self):
3070 to_store = {}
3072 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3073 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3074 if self._infoIsSet(info_key):
3075 to_store[key] = str(self.info[info_key])
3077 if self._infoIsSet("static_memory_min"):
3078 to_store["memory"] = str(self.info["static_memory_min"])
3079 if self._infoIsSet("static_memory_max"):
3080 to_store["maxmem"] = str(self.info["static_memory_max"])
3082 image_sxpr = self.info.image_sxpr()
3083 if image_sxpr:
3084 to_store['image'] = sxp.to_string(image_sxpr)
3086 if not self._readVm('xend/restart_count'):
3087 to_store['xend/restart_count'] = str(0)
3089 log.debug("Storing VM details: %s", scrub_password(to_store))
3091 self._writeVm(to_store)
3092 self._setVmPermissions()
3094 def _setVmPermissions(self):
3095 """Allow the guest domain to read its UUID. We don't allow it to
3096 access any other entry, for security."""
3097 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3098 { 'dom' : self.domid,
3099 'read' : True,
3100 'write' : False })
3103 # Utility functions
3106 def __getattr__(self, name):
3107 if name == "state":
3108 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3109 log.warn("".join(traceback.format_stack()))
3110 return self._stateGet()
3111 else:
3112 raise AttributeError(name)
3114 def __setattr__(self, name, value):
3115 if name == "state":
3116 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3117 log.warn("".join(traceback.format_stack()))
3118 self._stateSet(value)
3119 else:
3120 self.__dict__[name] = value
3122 def _stateSet(self, state):
3123 self.state_updated.acquire()
3124 try:
3125 # TODO Not sure this is correct...
3126 # _stateGet is live now. Why not fire event
3127 # even when it hasn't changed?
3128 if self._stateGet() != state:
3129 self.state_updated.notifyAll()
3130 import XendAPI
3131 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3132 'power_state')
3133 finally:
3134 self.state_updated.release()
3136 def _stateGet(self):
3137 # Lets try and reconsitute the state from xc
3138 # first lets try and get the domain info
3139 # from xc - this will tell us if the domain
3140 # exists
3141 info = dom_get(self.getDomid())
3142 if info is None or info['shutdown']:
3143 # We are either HALTED or SUSPENDED
3144 # check saved image exists
3145 from xen.xend import XendDomain
3146 managed_config_path = \
3147 XendDomain.instance()._managed_check_point_path( \
3148 self.get_uuid())
3149 if os.path.exists(managed_config_path):
3150 return XEN_API_VM_POWER_STATE_SUSPENDED
3151 else:
3152 return XEN_API_VM_POWER_STATE_HALTED
3153 elif info['crashed']:
3154 # Crashed
3155 return XEN_API_VM_POWER_STATE_CRASHED
3156 else:
3157 # We are either RUNNING or PAUSED
3158 if info['paused']:
3159 return XEN_API_VM_POWER_STATE_PAUSED
3160 else:
3161 return XEN_API_VM_POWER_STATE_RUNNING
3163 def _infoIsSet(self, name):
3164 return name in self.info and self.info[name] is not None
3166 def _checkName(self, name):
3167 """Check if a vm name is valid. Valid names contain alphabetic
3168 characters, digits, or characters in '_-.:/+'.
3169 The same name cannot be used for more than one vm at the same time.
3171 @param name: name
3172 @raise: VmError if invalid
3173 """
3174 from xen.xend import XendDomain
3176 if name is None or name == '':
3177 raise VmError('Missing VM Name')
3179 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3180 raise VmError('Invalid VM Name')
3182 dom = XendDomain.instance().domain_lookup_nr(name)
3183 if dom and dom.info['uuid'] != self.info['uuid']:
3184 raise VmError("VM name '%s' already exists%s" %
3185 (name,
3186 dom.domid is not None and
3187 (" as domain %s" % str(dom.domid)) or ""))
3190 def update(self, info = None, refresh = True, transaction = None):
3191 """Update with info from xc.domain_getinfo().
3192 """
3193 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3194 str(self.domid))
3196 if not info:
3197 info = dom_get(self.domid)
3198 if not info:
3199 return
3201 if info["maxmem_kb"] < 0:
3202 info["maxmem_kb"] = XendNode.instance() \
3203 .physinfo_dict()['total_memory'] * 1024
3205 # make sure state is reset for info
3206 # TODO: we should eventually get rid of old_dom_states
3208 self.info.update_config(info)
3209 self._update_consoles(transaction)
3211 if refresh:
3212 self.refreshShutdown(info)
3214 log.trace("XendDomainInfo.update done on domain %s: %s",
3215 str(self.domid), self.info)
3217 def sxpr(self, ignore_store = False, legacy_only = True):
3218 result = self.info.to_sxp(domain = self,
3219 ignore_devices = ignore_store,
3220 legacy_only = legacy_only)
3222 return result
3224 # Xen API
3225 # ----------------------------------------------------------------
3227 def get_uuid(self):
3228 dom_uuid = self.info.get('uuid')
3229 if not dom_uuid: # if it doesn't exist, make one up
3230 dom_uuid = uuid.createString()
3231 self.info['uuid'] = dom_uuid
3232 return dom_uuid
3234 def get_memory_static_max(self):
3235 return self.info.get('memory_static_max', 0)
3236 def get_memory_static_min(self):
3237 return self.info.get('memory_static_min', 0)
3238 def get_memory_dynamic_max(self):
3239 return self.info.get('memory_dynamic_max', 0)
3240 def get_memory_dynamic_min(self):
3241 return self.info.get('memory_dynamic_min', 0)
3243 # only update memory-related config values if they maintain sanity
3244 def _safe_set_memory(self, key, newval):
3245 oldval = self.info.get(key, 0)
3246 try:
3247 self.info[key] = newval
3248 self.info._memory_sanity_check()
3249 except Exception, ex:
3250 self.info[key] = oldval
3251 raise
3253 def set_memory_static_max(self, val):
3254 self._safe_set_memory('memory_static_max', val)
3255 def set_memory_static_min(self, val):
3256 self._safe_set_memory('memory_static_min', val)
3257 def set_memory_dynamic_max(self, val):
3258 self._safe_set_memory('memory_dynamic_max', val)
3259 def set_memory_dynamic_min(self, val):
3260 self._safe_set_memory('memory_dynamic_min', val)
3262 def get_vcpus_params(self):
3263 if self.getDomid() is None:
3264 return self.info['vcpus_params']
3266 retval = xc.sched_credit_domain_get(self.getDomid())
3267 return retval
3268 def get_power_state(self):
3269 return XEN_API_VM_POWER_STATE[self._stateGet()]
3270 def get_platform(self):
3271 return self.info.get('platform', {})
3272 def get_pci_bus(self):
3273 return self.info.get('pci_bus', '')
3274 def get_tools_version(self):
3275 return self.info.get('tools_version', {})
3276 def get_metrics(self):
3277 return self.metrics.get_uuid();
3280 def get_security_label(self, xspol=None):
3281 import xen.util.xsm.xsm as security
3282 label = security.get_security_label(self, xspol)
3283 return label
3285 def set_security_label(self, seclab, old_seclab, xspol=None,
3286 xspol_old=None):
3287 """
3288 Set the security label of a domain from its old to
3289 a new value.
3290 @param seclab New security label formatted in the form
3291 <policy type>:<policy name>:<vm label>
3292 @param old_seclab The current security label that the
3293 VM must have.
3294 @param xspol An optional policy under which this
3295 update should be done. If not given,
3296 then the current active policy is used.
3297 @param xspol_old The old policy; only to be passed during
3298 the updating of a policy
3299 @return Returns return code, a string with errors from
3300 the hypervisor's operation, old label of the
3301 domain
3302 """
3303 rc = 0
3304 errors = ""
3305 old_label = ""
3306 new_ssidref = 0
3307 domid = self.getDomid()
3308 res_labels = None
3309 is_policy_update = (xspol_old != None)
3311 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3313 state = self._stateGet()
3314 # Relabel only HALTED or RUNNING or PAUSED domains
3315 if domid != 0 and \
3316 state not in \
3317 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3318 DOM_STATE_SUSPENDED ]:
3319 log.warn("Relabeling domain not possible in state '%s'" %
3320 DOM_STATES[state])
3321 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3323 # Remove security label. Works only for halted or suspended domains
3324 if not seclab or seclab == "":
3325 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3326 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3328 if self.info.has_key('security_label'):
3329 old_label = self.info['security_label']
3330 # Check label against expected one.
3331 if old_label != old_seclab:
3332 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3333 del self.info['security_label']
3334 xen.xend.XendDomain.instance().managed_config_save(self)
3335 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3337 tmp = seclab.split(":")
3338 if len(tmp) != 3:
3339 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3340 typ, policy, label = tmp
3342 poladmin = XSPolicyAdminInstance()
3343 if not xspol:
3344 xspol = poladmin.get_policy_by_name(policy)
3346 try:
3347 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3349 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3350 #if domain is running or paused try to relabel in hypervisor
3351 if not xspol:
3352 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3354 if typ != xspol.get_type_name() or \
3355 policy != xspol.get_name():
3356 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3358 if typ == xsconstants.ACM_POLICY_ID:
3359 new_ssidref = xspol.vmlabel_to_ssidref(label)
3360 if new_ssidref == xsconstants.INVALID_SSIDREF:
3361 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3363 # Check that all used resources are accessible under the
3364 # new label
3365 if not is_policy_update and \
3366 not security.resources_compatible_with_vmlabel(xspol,
3367 self, label):
3368 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3370 #Check label against expected one. Can only do this
3371 # if the policy hasn't changed underneath in the meantime
3372 if xspol_old == None:
3373 old_label = self.get_security_label()
3374 if old_label != old_seclab:
3375 log.info("old_label != old_seclab: %s != %s" %
3376 (old_label, old_seclab))
3377 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3379 # relabel domain in the hypervisor
3380 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3381 log.info("rc from relabeling in HV: %d" % rc)
3382 else:
3383 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3385 if rc == 0:
3386 # HALTED, RUNNING or PAUSED
3387 if domid == 0:
3388 if xspol:
3389 self.info['security_label'] = seclab
3390 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3391 else:
3392 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3393 else:
3394 if self.info.has_key('security_label'):
3395 old_label = self.info['security_label']
3396 # Check label against expected one, unless wildcard
3397 if old_label != old_seclab:
3398 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3400 self.info['security_label'] = seclab
3402 try:
3403 xen.xend.XendDomain.instance().managed_config_save(self)
3404 except:
3405 pass
3406 return (rc, errors, old_label, new_ssidref)
3407 finally:
3408 xen.xend.XendDomain.instance().policy_lock.release()
3410 def get_on_shutdown(self):
3411 after_shutdown = self.info.get('actions_after_shutdown')
3412 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3413 return XEN_API_ON_NORMAL_EXIT[-1]
3414 return after_shutdown
3416 def get_on_reboot(self):
3417 after_reboot = self.info.get('actions_after_reboot')
3418 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3419 return XEN_API_ON_NORMAL_EXIT[-1]
3420 return after_reboot
3422 def get_on_suspend(self):
3423 # TODO: not supported
3424 after_suspend = self.info.get('actions_after_suspend')
3425 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3426 return XEN_API_ON_NORMAL_EXIT[-1]
3427 return after_suspend
3429 def get_on_crash(self):
3430 after_crash = self.info.get('actions_after_crash')
3431 if not after_crash or after_crash not in \
3432 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3433 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3434 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3436 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3437 """ Get's a device configuration either from XendConfig or
3438 from the DevController.
3440 @param dev_class: device class, either, 'vbd' or 'vif'
3441 @param dev_uuid: device UUID
3443 @rtype: dictionary
3444 """
3445 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3447 # shortcut if the domain isn't started because
3448 # the devcontrollers will have no better information
3449 # than XendConfig.
3450 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3451 XEN_API_VM_POWER_STATE_SUSPENDED):
3452 if dev_config:
3453 return copy.deepcopy(dev_config)
3454 return None
3456 # instead of using dev_class, we use the dev_type
3457 # that is from XendConfig.
3458 controller = self.getDeviceController(dev_type)
3459 if not controller:
3460 return None
3462 all_configs = controller.getAllDeviceConfigurations()
3463 if not all_configs:
3464 return None
3466 updated_dev_config = copy.deepcopy(dev_config)
3467 for _devid, _devcfg in all_configs.items():
3468 if _devcfg.get('uuid') == dev_uuid:
3469 updated_dev_config.update(_devcfg)
3470 updated_dev_config['id'] = _devid
3471 return updated_dev_config
3473 return updated_dev_config
3475 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3476 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3477 if not config:
3478 return {}
3480 config['VM'] = self.get_uuid()
3482 if dev_class == 'vif':
3483 if not config.has_key('name'):
3484 config['name'] = config.get('vifname', '')
3485 if not config.has_key('MAC'):
3486 config['MAC'] = config.get('mac', '')
3487 if not config.has_key('type'):
3488 config['type'] = 'paravirtualised'
3489 if not config.has_key('device'):
3490 devid = config.get('id')
3491 if devid != None:
3492 config['device'] = 'eth%s' % devid
3493 else:
3494 config['device'] = ''
3496 if not config.has_key('network'):
3497 try:
3498 bridge = config.get('bridge', None)
3499 if bridge is None:
3500 from xen.util import Brctl
3501 if_to_br = dict([(i,b)
3502 for (b,ifs) in Brctl.get_state().items()
3503 for i in ifs])
3504 vifname = "vif%s.%s" % (self.getDomid(),
3505 config.get('id'))
3506 bridge = if_to_br.get(vifname, None)
3507 config['network'] = \
3508 XendNode.instance().bridge_to_network(
3509 config.get('bridge')).get_uuid()
3510 except Exception:
3511 log.exception('bridge_to_network')
3512 # Ignore this for now -- it may happen if the device
3513 # has been specified using the legacy methods, but at
3514 # some point we're going to have to figure out how to
3515 # handle that properly.
3517 config['MTU'] = 1500 # TODO
3519 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3520 xennode = XendNode.instance()
3521 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3522 config['io_read_kbs'] = rx_bps/1024
3523 config['io_write_kbs'] = tx_bps/1024
3524 rx, tx = xennode.get_vif_stat(self.domid, devid)
3525 config['io_total_read_kbs'] = rx/1024
3526 config['io_total_write_kbs'] = tx/1024
3527 else:
3528 config['io_read_kbs'] = 0.0
3529 config['io_write_kbs'] = 0.0
3530 config['io_total_read_kbs'] = 0.0
3531 config['io_total_write_kbs'] = 0.0
3533 config['security_label'] = config.get('security_label', '')
3535 if dev_class == 'vbd':
3537 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3538 controller = self.getDeviceController(dev_class)
3539 devid, _1, _2 = controller.getDeviceDetails(config)
3540 xennode = XendNode.instance()
3541 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3542 config['io_read_kbs'] = rd_blkps
3543 config['io_write_kbs'] = wr_blkps
3544 else:
3545 config['io_read_kbs'] = 0.0
3546 config['io_write_kbs'] = 0.0
3548 config['VDI'] = config.get('VDI', '')
3549 config['device'] = config.get('dev', '')
3550 if ':' in config['device']:
3551 vbd_name, vbd_type = config['device'].split(':', 1)
3552 config['device'] = vbd_name
3553 if vbd_type == 'cdrom':
3554 config['type'] = XEN_API_VBD_TYPE[0]
3555 else:
3556 config['type'] = XEN_API_VBD_TYPE[1]
3558 config['driver'] = 'paravirtualised' # TODO
3559 config['image'] = config.get('uname', '')
3561 if config.get('mode', 'r') == 'r':
3562 config['mode'] = 'RO'
3563 else:
3564 config['mode'] = 'RW'
3566 if dev_class == 'vtpm':
3567 if not config.has_key('type'):
3568 config['type'] = 'paravirtualised' # TODO
3569 if not config.has_key('backend'):
3570 config['backend'] = "00000000-0000-0000-0000-000000000000"
3572 return config
3574 def get_dev_property(self, dev_class, dev_uuid, field):
3575 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3576 try:
3577 return config[field]
3578 except KeyError:
3579 raise XendError('Invalid property for device: %s' % field)
3581 def set_dev_property(self, dev_class, dev_uuid, field, value):
3582 self.info['devices'][dev_uuid][1][field] = value
3584 def get_vcpus_util(self):
3585 vcpu_util = {}
3586 xennode = XendNode.instance()
3587 if 'VCPUs_max' in self.info and self.domid != None:
3588 for i in range(0, self.info['VCPUs_max']):
3589 util = xennode.get_vcpu_util(self.domid, i)
3590 vcpu_util[str(i)] = util
3592 return vcpu_util
3594 def get_consoles(self):
3595 return self.info.get('console_refs', [])
3597 def get_vifs(self):
3598 return self.info.get('vif_refs', [])
3600 def get_vbds(self):
3601 return self.info.get('vbd_refs', [])
3603 def get_vtpms(self):
3604 return self.info.get('vtpm_refs', [])
3606 def get_dpcis(self):
3607 return XendDPCI.get_by_VM(self.info.get('uuid'))
3609 def get_dscsis(self):
3610 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3612 def create_vbd(self, xenapi_vbd, vdi_image_path):
3613 """Create a VBD using a VDI from XendStorageRepository.
3615 @param xenapi_vbd: vbd struct from the Xen API
3616 @param vdi_image_path: VDI UUID
3617 @rtype: string
3618 @return: uuid of the device
3619 """
3620 xenapi_vbd['image'] = vdi_image_path
3621 if vdi_image_path.startswith('tap'):
3622 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3623 else:
3624 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3626 if not dev_uuid:
3627 raise XendError('Failed to create device')
3629 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3630 XEN_API_VM_POWER_STATE_PAUSED):
3631 _, config = self.info['devices'][dev_uuid]
3633 if vdi_image_path.startswith('tap'):
3634 dev_control = self.getDeviceController('tap')
3635 else:
3636 dev_control = self.getDeviceController('vbd')
3638 try:
3639 devid = dev_control.createDevice(config)
3640 dev_control.waitForDevice(devid)
3641 self.info.device_update(dev_uuid,
3642 cfg_xenapi = {'devid': devid})
3643 except Exception, exn:
3644 log.exception(exn)
3645 del self.info['devices'][dev_uuid]
3646 self.info['vbd_refs'].remove(dev_uuid)
3647 raise
3649 return dev_uuid
3651 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3652 """Create a VBD using a VDI from XendStorageRepository.
3654 @param xenapi_vbd: vbd struct from the Xen API
3655 @param vdi_image_path: VDI UUID
3656 @rtype: string
3657 @return: uuid of the device
3658 """
3659 xenapi_vbd['image'] = vdi_image_path
3660 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3661 if not dev_uuid:
3662 raise XendError('Failed to create device')
3664 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3665 _, config = self.info['devices'][dev_uuid]
3666 config['devid'] = self.getDeviceController('tap').createDevice(config)
3668 return config['devid']
3670 def create_vif(self, xenapi_vif):
3671 """Create VIF device from the passed struct in Xen API format.
3673 @param xenapi_vif: Xen API VIF Struct.
3674 @rtype: string
3675 @return: UUID
3676 """
3677 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3678 if not dev_uuid:
3679 raise XendError('Failed to create device')
3681 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3682 XEN_API_VM_POWER_STATE_PAUSED):
3684 _, config = self.info['devices'][dev_uuid]
3685 dev_control = self.getDeviceController('vif')
3687 try:
3688 devid = dev_control.createDevice(config)
3689 dev_control.waitForDevice(devid)
3690 self.info.device_update(dev_uuid,
3691 cfg_xenapi = {'devid': devid})
3692 except Exception, exn:
3693 log.exception(exn)
3694 del self.info['devices'][dev_uuid]
3695 self.info['vif_refs'].remove(dev_uuid)
3696 raise
3698 return dev_uuid
3700 def create_vtpm(self, xenapi_vtpm):
3701 """Create a VTPM device from the passed struct in Xen API format.
3703 @return: uuid of the device
3704 @rtype: string
3705 """
3707 if self._stateGet() not in (DOM_STATE_HALTED,):
3708 raise VmError("Can only add vTPM to a halted domain.")
3709 if self.get_vtpms() != []:
3710 raise VmError('Domain already has a vTPM.')
3711 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3712 if not dev_uuid:
3713 raise XendError('Failed to create device')
3715 return dev_uuid
3717 def create_console(self, xenapi_console):
3718 """ Create a console device from a Xen API struct.
3720 @return: uuid of device
3721 @rtype: string
3722 """
3723 if self._stateGet() not in (DOM_STATE_HALTED,):
3724 raise VmError("Can only add console to a halted domain.")
3726 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3727 if not dev_uuid:
3728 raise XendError('Failed to create device')
3730 return dev_uuid
3732 def set_console_other_config(self, console_uuid, other_config):
3733 self.info.console_update(console_uuid, 'other_config', other_config)
3735 def create_dpci(self, xenapi_pci):
3736 """Create pci device from the passed struct in Xen API format.
3738 @param xenapi_pci: DPCI struct from Xen API
3739 @rtype: bool
3740 #@rtype: string
3741 @return: True if successfully created device
3742 #@return: UUID
3743 """
3745 dpci_uuid = uuid.createString()
3747 dpci_opts = []
3748 opts_dict = xenapi_pci.get('options')
3749 for k in opts_dict.keys():
3750 dpci_opts.append([k, opts_dict[k]])
3752 # Convert xenapi to sxp
3753 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3755 target_pci_sxp = \
3756 ['pci',
3757 ['dev',
3758 ['domain', '0x%02x' % ppci.get_domain()],
3759 ['bus', '0x%02x' % ppci.get_bus()],
3760 ['slot', '0x%02x' % ppci.get_slot()],
3761 ['func', '0x%1x' % ppci.get_func()],
3762 ['vslot', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3763 ['opts', dpci_opts],
3764 ['uuid', dpci_uuid]
3765 ],
3766 ['state', 'Initialising']
3769 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3771 old_pci_sxp = self._getDeviceInfo_pci(0)
3773 if old_pci_sxp is None:
3774 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3775 if not dev_uuid:
3776 raise XendError('Failed to create device')
3778 else:
3779 new_pci_sxp = ['pci']
3780 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3781 new_pci_sxp.append(existing_dev)
3782 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3784 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3785 self.info.device_update(dev_uuid, new_pci_sxp)
3787 xen.xend.XendDomain.instance().managed_config_save(self)
3789 else:
3790 try:
3791 self.device_configure(target_pci_sxp)
3793 except Exception, exn:
3794 raise XendError('Failed to create device')
3796 return dpci_uuid
3798 def create_dscsi(self, xenapi_dscsi):
3799 """Create scsi device from the passed struct in Xen API format.
3801 @param xenapi_dscsi: DSCSI struct from Xen API
3802 @rtype: string
3803 @return: UUID
3804 """
3806 dscsi_uuid = uuid.createString()
3808 # Convert xenapi to sxp
3809 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3810 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3811 target_vscsi_sxp = \
3812 ['vscsi',
3813 ['dev',
3814 ['devid', devid],
3815 ['p-devname', pscsi.get_dev_name()],
3816 ['p-dev', pscsi.get_physical_HCTL()],
3817 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3818 ['state', xenbusState['Initialising']],
3819 ['uuid', dscsi_uuid]
3820 ],
3821 ['feature-host', 0]
3824 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3826 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3828 if cur_vscsi_sxp is None:
3829 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3830 if not dev_uuid:
3831 raise XendError('Failed to create device')
3833 else:
3834 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3835 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3836 new_vscsi_sxp.append(existing_dev)
3837 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3839 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3840 self.info.device_update(dev_uuid, new_vscsi_sxp)
3842 xen.xend.XendDomain.instance().managed_config_save(self)
3844 else:
3845 try:
3846 self.device_configure(target_vscsi_sxp)
3848 except Exception, exn:
3849 raise XendError('Failed to create device')
3851 return dscsi_uuid
3854 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3855 if dev_uuid not in self.info['devices']:
3856 raise XendError('Device does not exist')
3858 try:
3859 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3860 XEN_API_VM_POWER_STATE_PAUSED):
3861 _, config = self.info['devices'][dev_uuid]
3862 devid = config.get('devid')
3863 if devid != None:
3864 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3865 else:
3866 raise XendError('Unable to get devid for device: %s:%s' %
3867 (dev_type, dev_uuid))
3868 finally:
3869 del self.info['devices'][dev_uuid]
3870 self.info['%s_refs' % dev_type].remove(dev_uuid)
3872 def destroy_vbd(self, dev_uuid):
3873 self.destroy_device_by_uuid('vbd', dev_uuid)
3875 def destroy_vif(self, dev_uuid):
3876 self.destroy_device_by_uuid('vif', dev_uuid)
3878 def destroy_vtpm(self, dev_uuid):
3879 self.destroy_device_by_uuid('vtpm', dev_uuid)
3881 def destroy_dpci(self, dev_uuid):
3883 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3884 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3886 old_pci_sxp = self._getDeviceInfo_pci(0)
3887 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3888 target_dev = None
3889 new_pci_sxp = ['pci']
3890 for dev in sxp.children(old_pci_sxp, 'dev'):
3891 domain = int(sxp.child_value(dev, 'domain'), 16)
3892 bus = int(sxp.child_value(dev, 'bus'), 16)
3893 slot = int(sxp.child_value(dev, 'slot'), 16)
3894 func = int(sxp.child_value(dev, 'func'), 16)
3895 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3896 if ppci.get_name() == name:
3897 target_dev = dev
3898 else:
3899 new_pci_sxp.append(dev)
3901 if target_dev is None:
3902 raise XendError('Failed to destroy device')
3904 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3906 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3908 self.info.device_update(dev_uuid, new_pci_sxp)
3909 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3910 del self.info['devices'][dev_uuid]
3911 xen.xend.XendDomain.instance().managed_config_save(self)
3913 else:
3914 try:
3915 self.device_configure(target_pci_sxp)
3917 except Exception, exn:
3918 raise XendError('Failed to destroy device')
3920 def destroy_dscsi(self, dev_uuid):
3921 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3922 devid = dscsi.get_virtual_host()
3923 vHCTL = dscsi.get_virtual_HCTL()
3924 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3925 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3927 target_dev = None
3928 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3929 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3930 if vHCTL == sxp.child_value(dev, 'v-dev'):
3931 target_dev = dev
3932 else:
3933 new_vscsi_sxp.append(dev)
3935 if target_dev is None:
3936 raise XendError('Failed to destroy device')
3938 target_dev.append(['state', xenbusState['Closing']])
3939 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3941 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3943 self.info.device_update(dev_uuid, new_vscsi_sxp)
3944 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3945 del self.info['devices'][dev_uuid]
3946 xen.xend.XendDomain.instance().managed_config_save(self)
3948 else:
3949 try:
3950 self.device_configure(target_vscsi_sxp)
3952 except Exception, exn:
3953 raise XendError('Failed to destroy device')
3955 def destroy_xapi_instances(self):
3956 """Destroy Xen-API instances stored in XendAPIStore.
3957 """
3958 # Xen-API classes based on XendBase have their instances stored
3959 # in XendAPIStore. Cleanup these instances here, if they are supposed
3960 # to be destroyed when the parent domain is dead.
3962 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3963 # XendBase and there's no need to remove them from XendAPIStore.
3965 from xen.xend import XendDomain
3966 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3967 # domain still exists.
3968 return
3970 # Destroy the VMMetrics instance.
3971 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3972 is not None:
3973 self.metrics.destroy()
3975 # Destroy DPCI instances.
3976 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3977 XendAPIStore.deregister(dpci_uuid, "DPCI")
3979 # Destroy DSCSI instances.
3980 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3981 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3983 def has_device(self, dev_class, dev_uuid):
3984 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3986 def __str__(self):
3987 return '<domain id=%s name=%s memory=%s state=%s>' % \
3988 (str(self.domid), self.info['name_label'],
3989 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3991 __repr__ = __str__