debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents d1631540bcc4
children
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import stat
35 import shutil
36 import traceback
37 from types import StringTypes
39 import xen.lowlevel.xc
40 from xen.util import asserts, auxbin, mkdir
41 from xen.util.blkif import parse_uname
42 import xen.util.xsm.xsm as security
43 from xen.util import xsconstants
44 from xen.util import mkdir
45 from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
46 append_default_pci_opts, \
47 pci_dict_to_bdf_str, pci_dict_to_xc_str, \
48 pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
49 pci_dict_cmp, PCI_DEVFN, PCI_SLOT, PCI_FUNC, parse_hex
51 from xen.xend import balloon, sxp, uuid, image, arch
52 from xen.xend import XendOptions, XendNode, XendConfig
54 from xen.xend.XendConfig import scrub_password
55 from xen.xend.XendBootloader import bootloader, bootloader_tidy
56 from xen.xend.XendError import XendError, VmError
57 from xen.xend.XendDevices import XendDevices
58 from xen.xend.XendTask import XendTask
59 from xen.xend.xenstore.xstransact import xstransact, complete
60 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
61 from xen.xend.xenstore.xswatch import xswatch
62 from xen.xend.XendConstants import *
63 from xen.xend.XendAPIConstants import *
64 from xen.xend.XendCPUPool import XendCPUPool
65 from xen.xend.server.DevConstants import xenbusState
66 from xen.xend.server.BlktapController import TapdiskController
68 from xen.xend.XendVMMetrics import XendVMMetrics
70 from xen.xend import XendAPIStore
71 from xen.xend.XendPPCI import XendPPCI
72 from xen.xend.XendDPCI import XendDPCI
73 from xen.xend.XendPSCSI import XendPSCSI
74 from xen.xend.XendDSCSI import XendDSCSI, XendDSCSI_HBA
76 MIGRATE_TIMEOUT = 30.0
77 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
79 xc = xen.lowlevel.xc.xc()
80 xoptions = XendOptions.instance()
82 log = logging.getLogger("xend.XendDomainInfo")
83 #log.setLevel(logging.TRACE)
86 def create(config):
87 """Creates and start a VM using the supplied configuration.
89 @param config: A configuration object involving lists of tuples.
90 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
92 @rtype: XendDomainInfo
93 @return: An up and running XendDomainInfo instance
94 @raise VmError: Invalid configuration or failure to start.
95 """
96 from xen.xend import XendDomain
97 domconfig = XendConfig.XendConfig(sxp_obj = config)
98 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
99 if othervm is None or othervm.domid is None:
100 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
101 if othervm is not None and othervm.domid is not None:
102 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
103 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
104 vm = XendDomainInfo(domconfig)
105 try:
106 vm.start()
107 except:
108 log.exception('Domain construction failed')
109 vm.destroy()
110 raise
112 return vm
114 def create_from_dict(config_dict):
115 """Creates and start a VM using the supplied configuration.
117 @param config_dict: An configuration dictionary.
119 @rtype: XendDomainInfo
120 @return: An up and running XendDomainInfo instance
121 @raise VmError: Invalid configuration or failure to start.
122 """
124 log.debug("XendDomainInfo.create_from_dict(%s)",
125 scrub_password(config_dict))
126 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
127 try:
128 vm.start()
129 except:
130 log.exception('Domain construction failed')
131 vm.destroy()
132 raise
133 return vm
135 def recreate(info, priv):
136 """Create the VM object for an existing domain. The domain must not
137 be dying, as the paths in the store should already have been removed,
138 and asking us to recreate them causes problems.
140 @param xeninfo: Parsed configuration
141 @type xeninfo: Dictionary
142 @param priv: Is a privileged domain (Dom 0)
143 @type priv: bool
145 @rtype: XendDomainInfo
146 @return: A up and running XendDomainInfo instance
147 @raise VmError: Invalid configuration.
148 @raise XendError: Errors with configuration.
149 """
151 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
153 assert not info['dying']
155 xeninfo = XendConfig.XendConfig(dominfo = info)
156 xeninfo['is_control_domain'] = priv
157 xeninfo['is_a_template'] = False
158 xeninfo['auto_power_on'] = False
159 domid = xeninfo['domid']
160 uuid1 = uuid.fromString(xeninfo['uuid'])
161 needs_reinitialising = False
163 dompath = GetDomainPath(domid)
164 if not dompath:
165 raise XendError('No domain path in store for existing '
166 'domain %d' % domid)
168 log.info("Recreating domain %d, UUID %s. at %s" %
169 (domid, xeninfo['uuid'], dompath))
171 # need to verify the path and uuid if not Domain-0
172 # if the required uuid and vm aren't set, then that means
173 # we need to recreate the dom with our own values
174 #
175 # NOTE: this is probably not desirable, really we should just
176 # abort or ignore, but there may be cases where xenstore's
177 # entry disappears (eg. xenstore-rm /)
178 #
179 try:
180 vmpath = xstransact.Read(dompath, "vm")
181 if not vmpath:
182 if not priv:
183 log.warn('/local/domain/%d/vm is missing. recreate is '
184 'confused, trying our best to recover' % domid)
185 needs_reinitialising = True
186 raise XendError('reinit')
188 uuid2_str = xstransact.Read(vmpath, "uuid")
189 if not uuid2_str:
190 log.warn('%s/uuid/ is missing. recreate is confused, '
191 'trying our best to recover' % vmpath)
192 needs_reinitialising = True
193 raise XendError('reinit')
195 uuid2 = uuid.fromString(uuid2_str)
196 if uuid1 != uuid2:
197 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
198 'Trying out best to recover' % domid)
199 needs_reinitialising = True
200 except XendError:
201 pass # our best shot at 'goto' in python :)
203 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
204 vmpath = vmpath)
206 if needs_reinitialising:
207 vm._recreateDom()
208 vm._removeVm()
209 vm._storeVmDetails()
210 vm._storeDomDetails()
212 vm.image = image.create(vm, vm.info)
213 vm.image.recreate()
215 vm._registerWatches()
216 vm.refreshShutdown(xeninfo)
218 # register the domain in the list
219 from xen.xend import XendDomain
220 XendDomain.instance().add_domain(vm)
222 return vm
225 def restore(config):
226 """Create a domain and a VM object to do a restore.
228 @param config: Domain SXP configuration
229 @type config: list of lists. (see C{create})
231 @rtype: XendDomainInfo
232 @return: A up and running XendDomainInfo instance
233 @raise VmError: Invalid configuration or failure to start.
234 @raise XendError: Errors with configuration.
235 """
237 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
238 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
239 resume = True)
240 try:
241 vm.resume()
242 return vm
243 except:
244 vm.destroy()
245 raise
247 def createDormant(domconfig):
248 """Create a dormant/inactive XenDomainInfo without creating VM.
249 This is for creating instances of persistent domains that are not
250 yet start.
252 @param domconfig: Parsed configuration
253 @type domconfig: XendConfig object
255 @rtype: XendDomainInfo
256 @return: A up and running XendDomainInfo instance
257 @raise XendError: Errors with configuration.
258 """
260 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
262 # domid does not make sense for non-running domains.
263 domconfig.pop('domid', None)
264 vm = XendDomainInfo(domconfig)
265 return vm
267 def domain_by_name(name):
268 """Get domain by name
270 @params name: Name of the domain
271 @type name: string
272 @return: XendDomainInfo or None
273 """
274 from xen.xend import XendDomain
275 return XendDomain.instance().domain_lookup_by_name_nr(name)
278 def shutdown_reason(code):
279 """Get a shutdown reason from a code.
281 @param code: shutdown code
282 @type code: int
283 @return: shutdown reason
284 @rtype: string
285 """
286 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
288 def dom_get(dom):
289 """Get info from xen for an existing domain.
291 @param dom: domain id
292 @type dom: int
293 @return: info or None
294 @rtype: dictionary
295 """
296 try:
297 domlist = xc.domain_getinfo(dom, 1)
298 if domlist and dom == domlist[0]['domid']:
299 return domlist[0]
300 except Exception, err:
301 # ignore missing domain
302 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
303 return None
305 from xen.xend.server.pciif import parse_pci_name, PciDevice,\
306 get_assigned_pci_devices, get_all_assigned_pci_devices
309 def do_FLR(domid, is_hvm):
310 dev_str_list = get_assigned_pci_devices(domid)
312 for dev_str in dev_str_list:
313 try:
314 dev = PciDevice(parse_pci_name(dev_str))
315 except Exception, e:
316 raise VmError("pci: failed to locate device and "+
317 "parse it's resources - "+str(e))
318 dev.do_FLR(is_hvm, xoptions.get_pci_dev_assign_strict_check())
320 class XendDomainInfo:
321 """An object represents a domain.
323 @TODO: try to unify dom and domid, they mean the same thing, but
324 xc refers to it as dom, and everywhere else, including
325 xenstore it is domid. The best way is to change xc's
326 python interface.
328 @ivar info: Parsed configuration
329 @type info: dictionary
330 @ivar domid: Domain ID (if VM has started)
331 @type domid: int or None
332 @ivar paused_by_admin: Is this Domain paused by command or API
333 @type paused_by_admin: bool
334 @ivar guest_bitsize: the bitsize of guest
335 @type guest_bitsize: int or None
336 @ivar alloc_mem: the memory domain allocated when booting
337 @type alloc_mem: int or None
338 @ivar vmpath: XenStore path to this VM.
339 @type vmpath: string
340 @ivar dompath: XenStore path to this Domain.
341 @type dompath: string
342 @ivar image: Reference to the VM Image.
343 @type image: xen.xend.image.ImageHandler
344 @ivar store_port: event channel to xenstored
345 @type store_port: int
346 @ivar console_port: event channel to xenconsoled
347 @type console_port: int
348 @ivar store_mfn: xenstored mfn
349 @type store_mfn: int
350 @ivar console_mfn: xenconsoled mfn
351 @type console_mfn: int
352 @ivar notes: OS image notes
353 @type notes: dictionary
354 @ivar vmWatch: reference to a watch on the xenstored vmpath
355 @type vmWatch: xen.xend.xenstore.xswatch
356 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
357 @type shutdownWatch: xen.xend.xenstore.xswatch
358 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
359 @type shutdownStartTime: float or None
360 @ivar restart_in_progress: Is a domain restart thread running?
361 @type restart_in_progress: bool
362 # @ivar state: Domain state
363 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
364 @ivar state_updated: lock for self.state
365 @type state_updated: threading.Condition
366 @ivar refresh_shutdown_lock: lock for polling shutdown state
367 @type refresh_shutdown_lock: threading.Condition
368 @ivar _deviceControllers: device controller cache for this domain
369 @type _deviceControllers: dict 'string' to DevControllers
370 """
372 def __init__(self, info, domid = None, dompath = None, augment = False,
373 priv = False, resume = False, vmpath = None):
374 """Constructor for a domain
376 @param info: parsed configuration
377 @type info: dictionary
378 @keyword domid: Set initial domain id (if any)
379 @type domid: int
380 @keyword dompath: Set initial dompath (if any)
381 @type dompath: string
382 @keyword augment: Augment given info with xenstored VM info
383 @type augment: bool
384 @keyword priv: Is a privileged domain (Dom 0)
385 @type priv: bool
386 @keyword resume: Is this domain being resumed?
387 @type resume: bool
388 """
390 self.info = info
391 if domid == None:
392 self.domid = self.info.get('domid')
393 else:
394 self.domid = domid
395 self.guest_bitsize = None
396 self.alloc_mem = None
397 self.paused_by_admin = False
399 maxmem = self.info.get('memory_static_max', 0)
400 memory = self.info.get('memory_dynamic_max', 0)
402 if self.info.is_hvm() and maxmem > memory:
403 self.pod_enabled = True
404 else:
405 self.pod_enabled = False
407 #REMOVE: uuid is now generated in XendConfig
408 #if not self._infoIsSet('uuid'):
409 # self.info['uuid'] = uuid.toString(uuid.create())
411 # Find a unique /vm/<uuid>/<integer> path if not specified.
412 # This avoids conflict between pre-/post-migrate domains when doing
413 # localhost relocation.
414 self.vmpath = vmpath
415 i = 0
416 while self.vmpath == None:
417 self.vmpath = XS_VMROOT + self.info['uuid']
418 if i != 0:
419 self.vmpath = self.vmpath + '-' + str(i)
420 try:
421 if self._readVm("uuid"):
422 self.vmpath = None
423 i = i + 1
424 except:
425 pass
427 self.dompath = dompath
429 self.image = None
430 self.store_port = None
431 self.store_mfn = None
432 self.console_port = None
433 self.console_mfn = None
435 self.native_protocol = None
437 self.vmWatch = None
438 self.shutdownWatch = None
439 self.shutdownStartTime = None
440 self._resume = resume
441 self.restart_in_progress = False
443 self.state_updated = threading.Condition()
444 self.refresh_shutdown_lock = threading.Condition()
445 self._stateSet(DOM_STATE_HALTED)
447 self._deviceControllers = {}
449 for state in DOM_STATES_OLD:
450 self.info[state] = 0
452 if augment:
453 self._augmentInfo(priv)
455 self._checkName(self.info['name_label'])
457 self.metrics = XendVMMetrics(uuid.createString(), self)
460 #
461 # Public functions available through XMLRPC
462 #
465 def start(self, is_managed = False):
466 """Attempts to start the VM by do the appropriate
467 initialisation if it not started.
468 """
469 from xen.xend import XendDomain
471 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
472 try:
473 XendTask.log_progress(0, 30, self._constructDomain)
474 XendTask.log_progress(31, 60, self._initDomain)
476 XendTask.log_progress(61, 70, self._storeVmDetails)
477 XendTask.log_progress(71, 80, self._storeDomDetails)
478 XendTask.log_progress(81, 90, self._registerWatches)
479 XendTask.log_progress(91, 100, self.refreshShutdown)
481 xendomains = XendDomain.instance()
483 # save running configuration if XendDomains believe domain is
484 # persistent
485 if is_managed:
486 xendomains.managed_config_save(self)
487 except:
488 log.exception('VM start failed')
489 self.destroy()
490 raise
491 else:
492 raise XendError('VM already running')
494 def resume(self):
495 """Resumes a domain that has come back from suspension."""
496 state = self._stateGet()
497 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
498 try:
499 self._constructDomain()
501 try:
502 self._setCPUAffinity()
503 except:
504 # usually a CPU we want to set affinity to does not exist
505 # we just ignore it so that the domain can still be restored
506 log.warn("Cannot restore CPU affinity")
508 self._setSchedParams()
509 self._storeVmDetails()
510 self._createChannels()
511 self._createDevices()
512 self._storeDomDetails()
513 self._endRestore()
514 except:
515 log.exception('VM resume failed')
516 self.destroy()
517 raise
518 else:
519 raise XendError('VM is not suspended; it is %s'
520 % XEN_API_VM_POWER_STATE[state])
522 def shutdown(self, reason):
523 """Shutdown a domain by signalling this via xenstored."""
524 log.debug('XendDomainInfo.shutdown(%s)', reason)
525 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
526 raise XendError('Domain cannot be shutdown')
528 if self.domid == 0:
529 raise XendError('Domain 0 cannot be shutdown')
531 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
532 raise XendError('Invalid reason: %s' % reason)
533 self.storeDom("control/shutdown", reason)
535 # HVM domain shuts itself down only if it has PV drivers
536 if self.info.is_hvm():
537 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
538 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
539 if not hvm_pvdrv or hvm_s_state != 0:
540 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
541 log.info("HVM save:remote shutdown dom %d!", self.domid)
542 xc.domain_shutdown(self.domid, code)
544 def pause(self):
545 """Pause domain
547 @raise XendError: Failed pausing a domain
548 """
549 try:
550 if(self.domid):
551 # get all blktap2 devices
552 dev = xstransact.List(self.vmpath + '/device/tap2')
553 for x in dev:
554 path = self.getDeviceController('tap2').readBackend(x, 'params')
555 if path and path.startswith(TapdiskController.TAP_DEV):
556 TapdiskController.pause(path)
557 except Exception, ex:
558 log.warn('Could not pause blktap disk.');
560 try:
561 xc.domain_pause(self.domid)
562 self._stateSet(DOM_STATE_PAUSED)
563 except Exception, ex:
564 log.exception(ex)
565 raise XendError("Domain unable to be paused: %s" % str(ex))
567 def unpause(self):
568 """Unpause domain
570 @raise XendError: Failed unpausing a domain
571 """
572 try:
573 if(self.domid):
574 dev = xstransact.List(self.vmpath + '/device/tap2')
575 for x in dev:
576 path = self.getDeviceController('tap2').readBackend(x, 'params')
577 if path and path.startswith(TapdiskController.TAP_DEV):
578 TapdiskController.unpause(path)
580 except Exception, ex:
581 log.warn('Could not unpause blktap disk: %s' % str(ex));
583 try:
584 xc.domain_unpause(self.domid)
585 self._stateSet(DOM_STATE_RUNNING)
586 except Exception, ex:
587 log.exception(ex)
588 raise XendError("Domain unable to be unpaused: %s" % str(ex))
590 def send_sysrq(self, key):
591 """ Send a Sysrq equivalent key via xenstored."""
592 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
593 raise XendError("Domain '%s' is not started" % self.info['name_label'])
595 asserts.isCharConvertible(key)
596 self.storeDom("control/sysrq", '%c' % key)
598 def pci_device_configure_boot(self):
600 if not self.info.is_hvm():
601 return
603 devid = '0'
604 first = True
605 dev_info = self._getDeviceInfo_pci(devid)
606 if dev_info is None:
607 return
609 # get the virtual slot info from xenstore
610 dev_uuid = sxp.child_value(dev_info, 'uuid')
611 pci_conf = self.info['devices'][dev_uuid][1]
612 pci_devs = pci_conf['devs']
614 # Keep a set of keys that are done rather than
615 # just itterating through set(map(..., pci_devs))
616 # to preserve any order information present.
617 done = set()
618 for key in map(lambda x: x['key'], pci_devs):
619 if key in done:
620 continue
621 done |= set([key])
622 dev = filter(lambda x: x['key'] == key, pci_devs)
624 head_dev = dev.pop()
625 dev_sxp = pci_convert_dict_to_sxp(head_dev, 'Initialising',
626 'Booting')
627 self.pci_device_configure(dev_sxp, first_dev = first)
628 first = False
630 # That is all for single-function virtual devices
631 if len(dev) == 0:
632 continue
634 if int(head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
635 new_dev_info = self._getDeviceInfo_pci(devid)
636 if new_dev_info is None:
637 continue
638 new_dev_uuid = sxp.child_value(new_dev_info, 'uuid')
639 new_pci_conf = self.info['devices'][new_dev_uuid][1]
640 new_pci_devs = new_pci_conf['devs']
642 new_head_dev = filter(lambda x: pci_dict_cmp(x, head_dev),
643 new_pci_devs)[0]
645 if int(new_head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
646 continue
648 vdevfn = PCI_SLOT(int(new_head_dev['vdevfn'], 16))
649 new_dev = []
650 for i in dev:
651 i['vdevfn'] = '0x%02x' % \
652 PCI_DEVFN(vdevfn,
653 PCI_FUNC(int(i['vdevfn'], 16)))
654 new_dev.append(i)
656 dev = new_dev
658 for i in dev:
659 dev_sxp = pci_convert_dict_to_sxp(i, 'Initialising', 'Booting')
660 self.pci_device_configure(dev_sxp)
662 def hvm_pci_device_create(self, dev_config):
663 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
664 % scrub_password(dev_config))
666 if not self.info.is_hvm():
667 raise VmError("hvm_pci_device_create called on non-HVM guest")
669 #all the PCI devs share one conf node
670 devid = '0'
672 new_dev = dev_config['devs'][0]
673 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
675 #check conflict before trigger hotplug event
676 if dev_info is not None:
677 dev_uuid = sxp.child_value(dev_info, 'uuid')
678 pci_conf = self.info['devices'][dev_uuid][1]
679 pci_devs = pci_conf['devs']
680 for x in pci_devs:
681 if (int(x['vdevfn'], 16) == int(new_dev['vdevfn'], 16) and
682 not int(x['vdevfn'], 16) & AUTO_PHP_SLOT):
683 raise VmError("vdevfn %s already have a device." %
684 (new_dev['vdevfn']))
686 if (pci_dict_cmp(x, new_dev)):
687 raise VmError("device is already inserted")
689 # Test whether the devices can be assigned.
690 self.pci_dev_check_attachability_and_do_FLR(new_dev)
692 return self.hvm_pci_device_insert_dev(new_dev)
694 def iommu_check_pod_mode(self):
695 """ Disallow PCI device assignment if pod is enabled. """
696 if self.pod_enabled:
697 raise VmError("failed to assign device since pod is enabled")
699 def pci_dev_check_assignability_and_do_FLR(self, config):
700 """ In the case of static device assignment(i.e., the 'pci' string in
701 guest config file), we check if the device(s) specified in the 'pci'
702 can be assigned to guest or not; if yes, we do_FLR the device(s).
703 """
705 self.iommu_check_pod_mode()
706 pci_dev_ctrl = self.getDeviceController('pci')
707 return pci_dev_ctrl.dev_check_assignability_and_do_FLR(config)
709 def pci_dev_check_attachability_and_do_FLR(self, new_dev):
710 """ In the case of dynamic device assignment(i.e., xm pci-attach), we
711 check if the device can be attached to guest or not; if yes, we do_FLR
712 the device.
713 """
715 self.iommu_check_pod_mode()
717 # Test whether the devices can be assigned
719 pci_name = pci_dict_to_bdf_str(new_dev)
720 _all_assigned_pci_devices = get_all_assigned_pci_devices(self.domid)
721 if pci_name in _all_assigned_pci_devices:
722 raise VmError("failed to assign device %s that has"
723 " already been assigned to other domain." % pci_name)
725 # Test whether the device is owned by pciback or pci-stub.
726 try:
727 pci_device = PciDevice(new_dev)
728 except Exception, e:
729 raise VmError("pci: failed to locate device and "+
730 "parse its resources - "+str(e))
731 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
732 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
733 %pci_device.name)
735 strict_check = xoptions.get_pci_dev_assign_strict_check()
736 # Check non-page-aligned MMIO BAR.
737 if pci_device.has_non_page_aligned_bar and strict_check:
738 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
739 pci_device.name)
741 # PV guest has less checkings.
742 if not self.info.is_hvm():
743 # try to do FLR for PV guest
744 pci_device.do_FLR(self.info.is_hvm(), strict_check)
745 return
747 if not strict_check:
748 return
750 # Check if there is intermediate PCIe switch bewteen the device and
751 # Root Complex.
752 if pci_device.is_behind_switch_lacking_acs():
753 err_msg = 'pci: to avoid potential security issue, %s is not'+\
754 ' allowed to be assigned to guest since it is behind'+\
755 ' PCIe switch that does not support or enable ACS.'
756 raise VmError(err_msg % pci_device.name)
758 # Check the co-assignment.
759 # To pci-attach a device D to domN, we should ensure each of D's
760 # co-assignment devices hasn't been assigned, or has been assigned to
761 # domN.
762 coassignment_list = pci_device.find_coassigned_devices()
763 pci_device.devs_check_driver(coassignment_list)
764 assigned_pci_device_str_list = self._get_assigned_pci_devices()
765 for pci_str in coassignment_list:
766 if not (pci_str in _all_assigned_pci_devices):
767 continue
768 if not pci_str in assigned_pci_device_str_list:
769 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
770 " because one of its co-assignment device %s has been" + \
771 " assigned to other domain." \
772 )% (pci_device.name, self.info['name_label'], pci_str))
774 # try to do FLR for HVM guest
775 pci_device.do_FLR(self.info.is_hvm(), strict_check)
777 def hvm_pci_device_insert(self, dev_config):
778 log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
779 % scrub_password(dev_config))
781 if not self.info.is_hvm():
782 raise VmError("hvm_pci_device_create called on non-HVM guest")
784 new_dev = dev_config['devs'][0]
786 return self.hvm_pci_device_insert_dev(new_dev)
788 def hvm_pci_device_insert_dev(self, new_dev):
789 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
790 % scrub_password(new_dev))
792 if self.domid is not None:
793 opts = ''
794 optslist = []
795 pci_defopts = []
796 if 'pci_msitranslate' in self.info['platform']:
797 pci_defopts.append(['msitranslate',
798 str(self.info['platform']['pci_msitranslate'])])
799 if 'pci_power_mgmt' in self.info['platform']:
800 pci_defopts.append(['power_mgmt',
801 str(self.info['platform']['pci_power_mgmt'])])
802 if new_dev.has_key('opts'):
803 optslist += new_dev['opts']
805 if optslist or pci_defopts:
806 opts = ',' + serialise_pci_opts(
807 append_default_pci_opts(optslist, pci_defopts))
809 bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
810 int(new_dev['vdevfn'], 16), opts)
811 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
812 bdf = xc.assign_device(self.domid, pci_dict_to_xc_str(new_dev))
813 if bdf > 0:
814 raise VmError("Failed to assign device to IOMMU (%s)" % bdf_str)
815 log.debug("pci: assign device %s" % bdf_str)
816 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
818 vdevfn = xstransact.Read("/local/domain/0/device-model/%i/parameter"
819 % self.getDomid())
820 try:
821 vdevfn_int = int(vdevfn, 16)
822 except ValueError:
823 raise VmError(("Cannot pass-through PCI function '%s'. " +
824 "Device model reported an error: %s") %
825 (bdf_str, vdevfn))
826 else:
827 vdevfn = new_dev['vdevfn']
829 return vdevfn
832 def device_create(self, dev_config):
833 """Create a new device.
835 @param dev_config: device configuration
836 @type dev_config: SXP object (parsed config)
837 """
838 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
839 dev_type = sxp.name(dev_config)
841 if dev_type == 'vif':
842 for x in dev_config:
843 if x != 'vif' and x[0] == 'mac':
844 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
845 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
846 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
848 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
849 dev_config_dict = self.info['devices'][dev_uuid][1]
850 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
852 if self.domid is not None:
853 try:
854 dev_config_dict['devid'] = devid = \
855 self._createDevice(dev_type, dev_config_dict)
856 if dev_type == 'tap2':
857 # createDevice may create a blktap1 device if blktap2 is not
858 # installed or if the blktap driver is not supported in
859 # blktap1
860 dev_type = self.getBlockDeviceClass(devid)
861 self._waitForDevice(dev_type, devid)
862 except VmError, ex:
863 del self.info['devices'][dev_uuid]
864 if dev_type == 'pci':
865 for dev in dev_config_dict['devs']:
866 XendAPIStore.deregister(dev['uuid'], 'DPCI')
867 elif dev_type == 'vscsi':
868 for dev in dev_config_dict['devs']:
869 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
870 elif dev_type == 'tap' or dev_type == 'tap2':
871 self.info['vbd_refs'].remove(dev_uuid)
872 else:
873 self.info['%s_refs' % dev_type].remove(dev_uuid)
874 raise ex
875 else:
876 devid = None
878 xen.xend.XendDomain.instance().managed_config_save(self)
879 return self.getDeviceController(dev_type).sxpr(devid)
882 def pci_device_configure(self, dev_sxp, devid = 0, first_dev = False):
883 """Configure an existing pci device.
885 @param dev_sxp: device configuration
886 @type dev_sxp: SXP object (parsed config)
887 @param devid: device id
888 @type devid: int
889 @return: Returns True if successfully updated device
890 @rtype: boolean
891 """
892 log.debug("XendDomainInfo.pci_device_configure: %s"
893 % scrub_password(dev_sxp))
895 dev_class = sxp.name(dev_sxp)
897 if dev_class != 'pci':
898 return False
900 pci_state = sxp.child_value(dev_sxp, 'state')
901 pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
902 existing_dev_info = self._getDeviceInfo_pci(devid)
904 if existing_dev_info is None and pci_state != 'Initialising':
905 raise XendError("Cannot detach when pci platform does not exist")
907 pci_dev = sxp.children(dev_sxp, 'dev')[0]
908 dev_config = pci_convert_sxp_to_dict(dev_sxp)
909 dev = dev_config['devs'][0]
911 stubdomid = self.getStubdomDomid()
912 # Do HVM specific processing
913 if self.info.is_hvm():
914 from xen.xend import XendDomain
915 if pci_state == 'Initialising':
916 if stubdomid is not None :
917 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
919 # HVM PCI device attachment
920 if pci_sub_state == 'Booting':
921 vdevfn = self.hvm_pci_device_insert(dev_config)
922 else:
923 vdevfn = self.hvm_pci_device_create(dev_config)
924 # Update vdevfn
925 dev['vdevfn'] = vdevfn
926 for n in sxp.children(pci_dev):
927 if(n[0] == 'vdevfn'):
928 n[1] = vdevfn
929 else:
930 # HVM PCI device detachment
931 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
932 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
933 existing_pci_devs = existing_pci_conf['devs']
934 new_devs = filter(lambda x: pci_dict_cmp(x, dev),
935 existing_pci_devs)
936 if len(new_devs) < 0:
937 raise VmError("Device %s is not connected" %
938 pci_dict_to_bdf_str(dev))
939 new_dev = new_devs[0]
940 # Only tell qemu-dm to unplug function 0.
941 # When unplugging a function, all functions in the
942 # same vslot must be unplugged, and function 0 must
943 # be one of the functions present when a vslot is
944 # hot-plugged. Telling qemu-dm to unplug function 0
945 # also tells it to unplug all other functions in the
946 # same vslot.
947 if (PCI_FUNC(int(new_dev['vdevfn'], 16)) == 0):
948 self.hvm_destroyPCIDevice(new_dev)
949 if stubdomid is not None :
950 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
951 # Update vdevfn
952 dev['vdevfn'] = new_dev['vdevfn']
953 for n in sxp.children(pci_dev):
954 if(n[0] == 'vdevfn'):
955 n[1] = new_dev['vdevfn']
956 else:
957 # Do PV specific checking
958 if pci_state == 'Initialising':
959 # PV PCI device attachment
960 self.pci_dev_check_attachability_and_do_FLR(dev)
962 # If pci platform does not exist, create and exit.
963 if existing_dev_info is None :
964 self.device_create(dev_sxp)
965 return True
967 if first_dev is True :
968 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
969 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
970 devid = self._createDevice('pci', existing_pci_conf)
971 self.info['devices'][existing_dev_uuid][1]['devid'] = devid
973 if self.domid is not None:
974 # use DevController.reconfigureDevice to change device config
975 dev_control = self.getDeviceController(dev_class)
976 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
977 if not self.info.is_hvm() and not self.info.is_stubdom():
978 # in PV case, wait until backend state becomes connected.
979 dev_control.waitForDevice_reconfigure(devid)
980 num_devs = dev_control.cleanupDevice(devid)
982 # update XendConfig with new device info
983 if dev_uuid:
984 new_dev_sxp = dev_control.configuration(devid)
985 self.info.device_update(dev_uuid, new_dev_sxp)
987 # If there is no device left, destroy pci and remove config.
988 if num_devs == 0:
989 if self.info.is_hvm():
990 self.destroyDevice('pci', devid, True)
991 else:
992 self.destroyDevice('pci', devid)
993 del self.info['devices'][dev_uuid]
994 else:
995 new_dev_sxp = ['pci']
996 for cur_dev in sxp.children(existing_dev_info, 'dev'):
997 if pci_state == 'Closing':
998 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
999 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
1000 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
1001 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
1002 continue
1003 new_dev_sxp.append(cur_dev)
1005 if pci_state == 'Initialising' and pci_sub_state != 'Booting':
1006 for new_dev in sxp.children(dev_sxp, 'dev'):
1007 new_dev_sxp.append(new_dev)
1009 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
1010 self.info.device_update(dev_uuid, new_dev_sxp)
1012 # If there is no device left, remove config.
1013 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1014 del self.info['devices'][dev_uuid]
1016 xen.xend.XendDomain.instance().managed_config_save(self)
1018 return True
1020 def vscsi_device_configure(self, dev_sxp):
1021 """Configure an existing vscsi device.
1022 quoted pci funciton
1023 """
1024 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
1025 if not dev_info:
1026 return False
1027 for dev in sxp.children(dev_info, 'dev'):
1028 if p_devs is not None:
1029 if sxp.child_value(dev, 'p-dev') in p_devs:
1030 return True
1031 if v_devs is not None:
1032 if sxp.child_value(dev, 'v-dev') in v_devs:
1033 return True
1034 return False
1036 def _vscsi_be(be):
1037 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
1038 if be_xdi is not None:
1039 be_domid = be_xdi.getDomid()
1040 if be_domid is not None:
1041 return str(be_domid)
1042 return str(be)
1044 dev_class = sxp.name(dev_sxp)
1045 if dev_class != 'vscsi':
1046 return False
1048 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
1049 devs = dev_config['devs']
1050 v_devs = [d['v-dev'] for d in devs]
1051 state = devs[0]['state']
1052 req_devid = int(devs[0]['devid'])
1053 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
1055 if state == xenbusState['Initialising']:
1056 # new create
1057 # If request devid does not exist, create and exit.
1058 p_devs = [d['p-dev'] for d in devs]
1059 for dev_type, dev_info in self.info.all_devices_sxpr():
1060 if dev_type != 'vscsi':
1061 continue
1062 if _is_vscsi_defined(dev_info, p_devs = p_devs):
1063 raise XendError('The physical device "%s" is already defined' % \
1064 p_devs[0])
1065 if cur_dev_sxp is None:
1066 self.device_create(dev_sxp)
1067 return True
1069 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1070 raise XendError('The virtual device "%s" is already defined' % \
1071 v_devs[0])
1073 if int(dev_config['feature-host']) != \
1074 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
1075 raise XendError('The physical device "%s" cannot define '
1076 'because mode is different' % devs[0]['p-dev'])
1078 new_be = dev_config.get('backend', None)
1079 if new_be is not None:
1080 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
1081 if cur_be is None:
1082 cur_be = xen.xend.XendDomain.DOM0_ID
1083 new_be_dom = _vscsi_be(new_be)
1084 cur_be_dom = _vscsi_be(cur_be)
1085 if new_be_dom != cur_be_dom:
1086 raise XendError('The physical device "%s" cannot define '
1087 'because backend is different' % devs[0]['p-dev'])
1089 elif state == xenbusState['Closing']:
1090 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1091 raise XendError("Cannot detach vscsi device does not exist")
1093 if self.domid is not None:
1094 # use DevController.reconfigureDevice to change device config
1095 dev_control = self.getDeviceController(dev_class)
1096 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
1097 dev_control.waitForDevice_reconfigure(req_devid)
1098 num_devs = dev_control.cleanupDevice(req_devid)
1100 # update XendConfig with new device info
1101 if dev_uuid:
1102 new_dev_sxp = dev_control.configuration(req_devid)
1103 self.info.device_update(dev_uuid, new_dev_sxp)
1105 # If there is no device left, destroy vscsi and remove config.
1106 if num_devs == 0:
1107 self.destroyDevice('vscsi', req_devid)
1108 del self.info['devices'][dev_uuid]
1110 else:
1111 new_dev_sxp = ['vscsi']
1112 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1113 new_dev_sxp.append(cur_mode)
1114 try:
1115 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1116 new_dev_sxp.append(cur_be)
1117 except IndexError:
1118 pass
1120 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1121 if state == xenbusState['Closing']:
1122 if int(cur_mode[1]) == 1:
1123 continue
1124 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1125 continue
1126 new_dev_sxp.append(cur_dev)
1128 if state == xenbusState['Initialising']:
1129 for new_dev in sxp.children(dev_sxp, 'dev'):
1130 new_dev_sxp.append(new_dev)
1132 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1133 self.info.device_update(dev_uuid, new_dev_sxp)
1135 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1136 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1137 del self.info['devices'][dev_uuid]
1139 xen.xend.XendDomain.instance().managed_config_save(self)
1141 return True
1143 def vusb_device_configure(self, dev_sxp, devid):
1144 """Configure a virtual root port.
1145 """
1146 dev_class = sxp.name(dev_sxp)
1147 if dev_class != 'vusb':
1148 return False
1150 dev_config = {}
1151 ports = sxp.child(dev_sxp, 'port')
1152 for port in ports[1:]:
1153 try:
1154 num, bus = port
1155 dev_config['port-%i' % int(num)] = str(bus)
1156 except TypeError:
1157 pass
1159 dev_control = self.getDeviceController(dev_class)
1160 dev_control.reconfigureDevice(devid, dev_config)
1162 return True
1164 def device_configure(self, dev_sxp, devid = None):
1165 """Configure an existing device.
1167 @param dev_config: device configuration
1168 @type dev_config: SXP object (parsed config)
1169 @param devid: device id
1170 @type devid: int
1171 @return: Returns True if successfully updated device
1172 @rtype: boolean
1173 """
1175 # convert device sxp to a dict
1176 dev_class = sxp.name(dev_sxp)
1177 dev_config = {}
1179 if dev_class == 'pci':
1180 return self.pci_device_configure(dev_sxp)
1182 if dev_class == 'vscsi':
1183 return self.vscsi_device_configure(dev_sxp)
1185 if dev_class == 'vusb':
1186 return self.vusb_device_configure(dev_sxp, devid)
1188 for opt_val in dev_sxp[1:]:
1189 try:
1190 dev_config[opt_val[0]] = opt_val[1]
1191 except IndexError:
1192 pass
1194 dev_control = self.getDeviceController(dev_class)
1195 if devid is None:
1196 dev = dev_config.get('dev', '')
1197 if not dev:
1198 raise VmError('Block device must have virtual details specified')
1199 if 'ioemu:' in dev:
1200 (_, dev) = dev.split(':', 1)
1201 try:
1202 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1203 except ValueError:
1204 pass
1205 devid = dev_control.convertToDeviceNumber(dev)
1206 dev_info = self._getDeviceInfo_vbd(devid)
1207 if dev_info is None:
1208 raise VmError("Device %s not connected" % devid)
1209 dev_uuid = sxp.child_value(dev_info, 'uuid')
1211 if self.domid is not None:
1212 # use DevController.reconfigureDevice to change device config
1213 dev_control.reconfigureDevice(devid, dev_config)
1214 else:
1215 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1216 if (new_f['device-type'] == 'cdrom' and
1217 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1218 new_b['mode'] == 'r' and
1219 sxp.child_value(dev_info, 'mode') == 'r'):
1220 pass
1221 else:
1222 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1223 (dev_class, devid, dev_config))
1225 # update XendConfig with new device info
1226 self.info.device_update(dev_uuid, dev_sxp)
1227 xen.xend.XendDomain.instance().managed_config_save(self)
1229 return True
1231 def waitForDevices(self):
1232 """Wait for this domain's configured devices to connect.
1234 @raise VmError: if any device fails to initialise.
1235 """
1236 for devclass in XendDevices.valid_devices():
1237 self.getDeviceController(devclass).waitForDevices()
1239 def hvm_destroyPCIDevice(self, pci_dev):
1240 log.debug("hvm_destroyPCIDevice: %s", pci_dev)
1242 if not self.info.is_hvm():
1243 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1245 # Check the co-assignment.
1246 # To pci-detach a device D from domN, we should ensure: for each DD in the
1247 # list of D's co-assignment devices, DD is not assigned (to domN).
1249 from xen.xend.server.pciif import PciDevice
1250 try:
1251 pci_device = PciDevice(pci_dev)
1252 except Exception, e:
1253 raise VmError("pci: failed to locate device and "+
1254 "parse its resources - "+str(e))
1255 coassignment_list = pci_device.find_coassigned_devices()
1256 coassignment_list.remove(pci_device.name)
1257 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1258 for pci_str in coassignment_list:
1259 if xoptions.get_pci_dev_assign_strict_check() and \
1260 pci_str in assigned_pci_device_str_list:
1261 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1262 " because one of its co-assignment device %s is still " + \
1263 " assigned to the domain." \
1264 )% (pci_device.name, self.info['name_label'], pci_str))
1267 bdf_str = pci_dict_to_bdf_str(pci_dev)
1268 log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
1269 if self.domid is not None:
1270 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1272 return 0
1274 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1275 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1276 deviceClass, devid)
1278 if rm_cfg:
1279 # Convert devid to device number. A device number is
1280 # needed to remove its configuration.
1281 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1283 # Save current sxprs. A device number and a backend
1284 # path are needed to remove its configuration but sxprs
1285 # do not have those after calling destroyDevice.
1286 sxprs = self.getDeviceSxprs(deviceClass)
1288 rc = None
1289 if self.domid is not None:
1291 #new blktap implementation may need a sysfs write after everything is torn down.
1292 if deviceClass == 'tap2':
1293 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1294 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1295 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1296 backpath = xstransact.Read(frontpath, "backend")
1297 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1299 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1300 if not force and rm_cfg:
1301 # The backend path, other than the device itself,
1302 # has to be passed because its accompanied frontend
1303 # path may be void until its removal is actually
1304 # issued. It is probable because destroyDevice is
1305 # issued first.
1306 for dev_num, dev_info in sxprs:
1307 dev_num = int(dev_num)
1308 if dev_num == dev:
1309 for x in dev_info:
1310 if x[0] == 'backend':
1311 backend = x[1]
1312 break
1313 break
1314 self._waitForDevice_destroy(deviceClass, devid, backend)
1316 if rm_cfg and deviceClass != "vif2":
1317 if deviceClass == 'vif':
1318 if self.domid is not None:
1319 mac = ''
1320 for dev_num, dev_info in sxprs:
1321 dev_num = int(dev_num)
1322 if dev_num == dev:
1323 for x in dev_info:
1324 if x[0] == 'mac':
1325 mac = x[1]
1326 break
1327 break
1328 dev_info = self._getDeviceInfo_vif(mac)
1329 else:
1330 _, dev_info = sxprs[dev]
1331 else: # 'vbd' or 'tap' or 'tap2'
1332 dev_info = self._getDeviceInfo_vbd(dev)
1333 # To remove the UUID of the device from refs,
1334 # deviceClass must be always 'vbd'.
1335 deviceClass = 'vbd'
1336 if dev_info is None:
1337 raise XendError("Device %s is not defined" % devid)
1339 dev_uuid = sxp.child_value(dev_info, 'uuid')
1340 del self.info['devices'][dev_uuid]
1341 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1342 xen.xend.XendDomain.instance().managed_config_save(self)
1344 return rc
1346 def getDeviceSxprs(self, deviceClass):
1347 if deviceClass == 'pci':
1348 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1349 if dev_info is None:
1350 return []
1351 dev_uuid = sxp.child_value(dev_info, 'uuid')
1352 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1353 return pci_devs
1354 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1355 return self.getDeviceController(deviceClass).sxprs()
1356 else:
1357 sxprs = []
1358 dev_num = 0
1359 for dev_type, dev_info in self.info.all_devices_sxpr():
1360 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap', 'tap2']) or \
1361 (deviceClass != 'vbd' and dev_type != deviceClass):
1362 continue
1364 if deviceClass == 'vscsi':
1365 vscsi_devs = ['devs', []]
1366 for vscsi_dev in sxp.children(dev_info, 'dev'):
1367 vscsi_dev.append(['frontstate', None])
1368 vscsi_devs[1].append(vscsi_dev)
1369 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1370 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1371 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1372 elif deviceClass == 'vbd':
1373 dev = sxp.child_value(dev_info, 'dev')
1374 if 'ioemu:' in dev:
1375 (_, dev) = dev.split(':', 1)
1376 try:
1377 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1378 except ValueError:
1379 dev_name = dev
1380 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1381 sxprs.append([dev_num, dev_info])
1382 else:
1383 sxprs.append([dev_num, dev_info])
1384 dev_num += 1
1385 return sxprs
1387 def getBlockDeviceClass(self, devid):
1388 # if the domain is running we can get the device class from xenstore.
1389 # This is more accurate, as blktap1 devices show up as blktap2 devices
1390 # in the config.
1391 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1392 # All block devices have a vbd frontend, so we know the frontend path
1393 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1394 frontendPath = "%s/device/vbd/%s" % (self.dompath, dev)
1395 for devclass in XendDevices.valid_devices():
1396 for dev in xstransact.List("%s/device/%s" % (self.vmpath, devclass)):
1397 devFrontendPath = xstransact.Read("%s/device/%s/%s/frontend" % (self.vmpath, devclass, dev))
1398 if frontendPath == devFrontendPath:
1399 return devclass
1401 else: # the domain is not active so we must get the device class
1402 # from the config
1403 # To get a device number from the devid,
1404 # we temporarily use the device controller of VBD.
1405 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1406 dev_info = self._getDeviceInfo_vbd(dev)
1407 if dev_info:
1408 return dev_info[0]
1410 def _getDeviceInfo_vif(self, mac):
1411 for dev_type, dev_info in self.info.all_devices_sxpr():
1412 if dev_type != 'vif':
1413 continue
1414 if mac == sxp.child_value(dev_info, 'mac'):
1415 return dev_info
1417 def _getDeviceInfo_vbd(self, devid):
1418 for dev_type, dev_info in self.info.all_devices_sxpr():
1419 if dev_type != 'vbd' and dev_type != 'tap' and dev_type != 'tap2':
1420 continue
1421 dev = sxp.child_value(dev_info, 'dev')
1422 dev = dev.split(':')[0]
1423 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1424 if devid == dev:
1425 return dev_info
1427 def _getDeviceInfo_pci(self, devid):
1428 for dev_type, dev_info in self.info.all_devices_sxpr():
1429 if dev_type != 'pci':
1430 continue
1431 return dev_info
1432 return None
1434 def _getDeviceInfo_vscsi(self, devid):
1435 devid = int(devid)
1436 for dev_type, dev_info in self.info.all_devices_sxpr():
1437 if dev_type != 'vscsi':
1438 continue
1439 devs = sxp.children(dev_info, 'dev')
1440 if devid == int(sxp.child_value(devs[0], 'devid')):
1441 return dev_info
1442 return None
1444 def _getDeviceInfo_vusb(self, devid):
1445 for dev_type, dev_info in self.info.all_devices_sxpr():
1446 if dev_type != 'vusb':
1447 continue
1448 return dev_info
1449 return None
1451 def _get_assigned_pci_devices(self, devid = 0):
1452 if self.domid is not None:
1453 return get_assigned_pci_devices(self.domid)
1455 dev_info = self._getDeviceInfo_pci(devid)
1456 if dev_info is None:
1457 return []
1458 dev_uuid = sxp.child_value(dev_info, 'uuid')
1459 pci_conf = self.info['devices'][dev_uuid][1]
1460 return map(pci_dict_to_bdf_str, pci_conf['devs'])
1462 def setMemoryTarget(self, target):
1463 """Set the memory target of this domain.
1464 @param target: In MiB.
1465 """
1466 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1467 self.info['name_label'], str(self.domid), target)
1469 MiB = 1024 * 1024
1470 memory_cur = self.get_memory_dynamic_max() / MiB
1472 if self.domid == 0:
1473 dom0_min_mem = xoptions.get_dom0_min_mem()
1474 if target < memory_cur and dom0_min_mem > target:
1475 raise XendError("memory_dynamic_max too small")
1477 self._safe_set_memory('memory_dynamic_min', target * MiB)
1478 self._safe_set_memory('memory_dynamic_max', target * MiB)
1480 if self.domid >= 0:
1481 if target > memory_cur:
1482 balloon.free((target - memory_cur) * 1024, self)
1483 self.storeVm("memory", target)
1484 self.storeDom("memory/target", target << 10)
1485 xc.domain_set_target_mem(self.domid,
1486 (target * 1024))
1487 xen.xend.XendDomain.instance().managed_config_save(self)
1489 def setMemoryMaximum(self, limit):
1490 """Set the maximum memory limit of this domain
1491 @param limit: In MiB.
1492 """
1493 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1494 self.info['name_label'], str(self.domid), limit)
1496 maxmem_cur = self.get_memory_static_max()
1497 MiB = 1024 * 1024
1498 self._safe_set_memory('memory_static_max', limit * MiB)
1500 if self.domid >= 0:
1501 maxmem = int(limit) * 1024
1502 try:
1503 return xc.domain_setmaxmem(self.domid, maxmem)
1504 except Exception, ex:
1505 self._safe_set_memory('memory_static_max', maxmem_cur)
1506 raise XendError(str(ex))
1507 xen.xend.XendDomain.instance().managed_config_save(self)
1510 def getVCPUInfo(self):
1511 try:
1512 # We include the domain name and ID, to help xm.
1513 sxpr = ['domain',
1514 ['domid', self.domid],
1515 ['name', self.info['name_label']],
1516 ['vcpu_count', self.info['VCPUs_max']]]
1518 for i in range(0, self.info['VCPUs_max']):
1519 if self.domid is not None:
1520 info = xc.vcpu_getinfo(self.domid, i)
1522 sxpr.append(['vcpu',
1523 ['number', i],
1524 ['online', info['online']],
1525 ['blocked', info['blocked']],
1526 ['running', info['running']],
1527 ['cpu_time', info['cpu_time'] / 1e9],
1528 ['cpu', info['cpu']],
1529 ['cpumap', info['cpumap']]])
1530 else:
1531 sxpr.append(['vcpu',
1532 ['number', i],
1533 ['online', 0],
1534 ['blocked', 0],
1535 ['running', 0],
1536 ['cpu_time', 0.0],
1537 ['cpu', -1],
1538 ['cpumap', self.info['cpus'][i] and \
1539 self.info['cpus'][i] or range(64)]])
1541 return sxpr
1543 except RuntimeError, exn:
1544 raise XendError(str(exn))
1547 def getDomInfo(self):
1548 return dom_get(self.domid)
1551 # internal functions ... TODO: re-categorised
1554 def _augmentInfo(self, priv):
1555 """Augment self.info, as given to us through L{recreate}, with
1556 values taken from the store. This recovers those values known
1557 to xend but not to the hypervisor.
1558 """
1559 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1560 if priv:
1561 augment_entries.remove('memory')
1562 augment_entries.remove('maxmem')
1563 augment_entries.remove('vcpus')
1564 augment_entries.remove('vcpu_avail')
1566 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1567 for k in augment_entries])
1569 # make returned lists into a dictionary
1570 vm_config = dict(zip(augment_entries, vm_config))
1572 for arg in augment_entries:
1573 val = vm_config[arg]
1574 if val != None:
1575 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1576 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1577 self.info[xapiarg] = val
1578 elif arg == "memory":
1579 self.info["static_memory_min"] = val
1580 elif arg == "maxmem":
1581 self.info["static_memory_max"] = val
1582 else:
1583 self.info[arg] = val
1585 # read CPU Affinity
1586 self.info['cpus'] = []
1587 vcpus_info = self.getVCPUInfo()
1588 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1589 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1591 # For dom0, we ignore any stored value for the vcpus fields, and
1592 # read the current value from Xen instead. This allows boot-time
1593 # settings to take precedence over any entries in the store.
1594 if priv:
1595 xeninfo = dom_get(self.domid)
1596 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1597 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1599 # read image value
1600 image_sxp = self._readVm('image')
1601 if image_sxp:
1602 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1604 # read devices
1605 devices = []
1606 for devclass in XendDevices.valid_devices():
1607 devconfig = self.getDeviceController(devclass).configurations()
1608 if devconfig:
1609 devices.extend(devconfig)
1611 if not self.info['devices'] and devices is not None:
1612 for device in devices:
1613 self.info.device_add(device[0], cfg_sxp = device)
1615 self._update_consoles()
1617 def _update_consoles(self, transaction = None):
1618 if self.domid == None or self.domid == 0:
1619 return
1621 # Update VT100 port if it exists
1622 if transaction is None:
1623 self.console_port = self.readDom('console/port')
1624 else:
1625 self.console_port = self.readDomTxn(transaction, 'console/port')
1626 if self.console_port is not None:
1627 serial_consoles = self.info.console_get_all('vt100')
1628 if not serial_consoles:
1629 cfg = self.info.console_add('vt100', self.console_port)
1630 self._createDevice('console', cfg)
1631 else:
1632 console_uuid = serial_consoles[0].get('uuid')
1633 self.info.console_update(console_uuid, 'location',
1634 self.console_port)
1635 # Notify xenpv device model that console info is ready
1636 if not self.info.is_hvm() and self.info.has_rfb():
1637 console_ctrl = self.getDeviceController('console')
1638 # The value is unchanged. Just for xenstore watcher
1639 console_ctrl.writeBackend(0, 'uuid', console_uuid)
1642 # Update VNC port if it exists and write to xenstore
1643 if transaction is None:
1644 vnc_port = self.readDom('console/vnc-port')
1645 else:
1646 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1647 if vnc_port is not None:
1648 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1649 if dev_type == 'vfb':
1650 old_location = dev_info.get('location')
1651 listen_host = dev_info.get('vnclisten', \
1652 XendOptions.instance().get_vnclisten_address())
1653 new_location = '%s:%s' % (listen_host, str(vnc_port))
1654 if old_location == new_location:
1655 break
1657 dev_info['location'] = new_location
1658 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1659 vfb_ctrl = self.getDeviceController('vfb')
1660 vfb_ctrl.reconfigureDevice(0, dev_info)
1661 break
1664 # Function to update xenstore /vm/*
1667 def _readVm(self, *args):
1668 return xstransact.Read(self.vmpath, *args)
1670 def _writeVm(self, *args):
1671 return xstransact.Write(self.vmpath, *args)
1673 def _removeVm(self, *args):
1674 return xstransact.Remove(self.vmpath, *args)
1676 def _gatherVm(self, *args):
1677 return xstransact.Gather(self.vmpath, *args)
1679 def _listRecursiveVm(self, *args):
1680 return xstransact.ListRecursive(self.vmpath, *args)
1682 def storeVm(self, *args):
1683 return xstransact.Store(self.vmpath, *args)
1685 def permissionsVm(self, *args):
1686 return xstransact.SetPermissions(self.vmpath, *args)
1689 # Function to update xenstore /dom/*
1692 def readDom(self, *args):
1693 return xstransact.Read(self.dompath, *args)
1695 def gatherDom(self, *args):
1696 return xstransact.Gather(self.dompath, *args)
1698 def _writeDom(self, *args):
1699 return xstransact.Write(self.dompath, *args)
1701 def _removeDom(self, *args):
1702 return xstransact.Remove(self.dompath, *args)
1704 def storeDom(self, *args):
1705 return xstransact.Store(self.dompath, *args)
1708 def readDomTxn(self, transaction, *args):
1709 paths = map(lambda x: self.dompath + "/" + x, args)
1710 return transaction.read(*paths)
1712 def gatherDomTxn(self, transaction, *args):
1713 paths = map(lambda x: self.dompath + "/" + x, args)
1714 return transaction.gather(*paths)
1716 def _writeDomTxn(self, transaction, *args):
1717 paths = map(lambda x: self.dompath + "/" + x, args)
1718 return transaction.write(*paths)
1720 def _removeDomTxn(self, transaction, *args):
1721 paths = map(lambda x: self.dompath + "/" + x, args)
1722 return transaction.remove(*paths)
1724 def storeDomTxn(self, transaction, *args):
1725 paths = map(lambda x: self.dompath + "/" + x, args)
1726 return transaction.store(*paths)
1729 def _recreateDom(self):
1730 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1732 def _recreateDomFunc(self, t):
1733 t.remove()
1734 t.mkdir()
1735 t.set_permissions({'dom' : self.domid, 'read' : True})
1736 t.write('vm', self.vmpath)
1737 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1738 # XCP Windows paravirtualized guests use data/
1739 for i in [ 'device', 'control', 'error', 'memory', 'guest', \
1740 'hvmpv', 'data' ]:
1741 t.mkdir(i)
1742 t.set_permissions(i, {'dom' : self.domid})
1744 def _storeDomDetails(self):
1745 to_store = {
1746 'domid': str(self.domid),
1747 'vm': self.vmpath,
1748 'name': self.info['name_label'],
1749 'console/limit': str(xoptions.get_console_limit() * 1024),
1750 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1751 'description': str(self.info['description']),
1754 def f(n, v):
1755 if v is not None:
1756 if type(v) == bool:
1757 to_store[n] = v and "1" or "0"
1758 else:
1759 to_store[n] = str(v)
1761 # Figure out if we need to tell xenconsoled to ignore this guest's
1762 # console - device model will handle console if it is running
1763 constype = "ioemu"
1764 if 'device_model' not in self.info['platform']:
1765 constype = "xenconsoled"
1767 f('console/port', self.console_port)
1768 f('console/ring-ref', self.console_mfn)
1769 f('console/type', constype)
1770 f('store/port', self.store_port)
1771 f('store/ring-ref', self.store_mfn)
1773 if arch.type == "x86":
1774 f('control/platform-feature-multiprocessor-suspend', True)
1776 # elfnotes
1777 for n, v in self.info.get_notes().iteritems():
1778 n = n.lower().replace('_', '-')
1779 if n == 'features':
1780 for v in v.split('|'):
1781 v = v.replace('_', '-')
1782 if v.startswith('!'):
1783 f('image/%s/%s' % (n, v[1:]), False)
1784 else:
1785 f('image/%s/%s' % (n, v), True)
1786 else:
1787 f('image/%s' % n, v)
1789 if self.info.has_key('security_label'):
1790 f('security_label', self.info['security_label'])
1792 to_store.update(self._vcpuDomDetails())
1794 log.debug("Storing domain details: %s", scrub_password(to_store))
1796 self._writeDom(to_store)
1798 def _vcpuDomDetails(self):
1799 def availability(n):
1800 if self.info['vcpu_avail'] & (1 << n):
1801 return 'online'
1802 else:
1803 return 'offline'
1805 result = {}
1806 for v in range(0, self.info['VCPUs_max']):
1807 result["cpu/%d/availability" % v] = availability(v)
1808 return result
1811 # xenstore watches
1814 def _registerWatches(self):
1815 """Register a watch on this VM's entries in the store, and the
1816 domain's control/shutdown node, so that when they are changed
1817 externally, we keep up to date. This should only be called by {@link
1818 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1819 details have been written, but before the new instance is returned."""
1820 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1821 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1822 self._handleShutdownWatch)
1824 def _storeChanged(self, _):
1825 log.trace("XendDomainInfo.storeChanged");
1827 changed = False
1829 # Check whether values in the configuration have
1830 # changed in Xenstore.
1832 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1833 'rtc/timeoffset']
1835 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1836 for k in cfg_vm])
1838 # convert two lists into a python dictionary
1839 vm_details = dict(zip(cfg_vm, vm_details))
1841 for arg, val in vm_details.items():
1842 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1843 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1844 if val != None and val != self.info[xapiarg]:
1845 self.info[xapiarg] = val
1846 changed = True
1847 elif arg == "memory":
1848 if val != None and val != self.info["static_memory_min"]:
1849 self.info["static_memory_min"] = val
1850 changed = True
1851 elif arg == "maxmem":
1852 if val != None and val != self.info["static_memory_max"]:
1853 self.info["static_memory_max"] = val
1854 changed = True
1856 # Check whether image definition has been updated
1857 image_sxp = self._readVm('image')
1858 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1859 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1860 changed = True
1862 # Update the rtc_timeoffset to be preserved across reboot.
1863 # NB. No need to update xenstore domain section.
1864 val = int(vm_details.get("rtc/timeoffset", 0))
1865 self.info["platform"]["rtc_timeoffset"] = val
1867 if changed:
1868 # Update the domain section of the store, as this contains some
1869 # parameters derived from the VM configuration.
1870 self.refresh_shutdown_lock.acquire()
1871 try:
1872 state = self._stateGet()
1873 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1874 self._storeDomDetails()
1875 finally:
1876 self.refresh_shutdown_lock.release()
1878 return 1
1880 def _handleShutdownWatch(self, _):
1881 log.debug('XendDomainInfo.handleShutdownWatch')
1883 reason = self.readDom('control/shutdown')
1885 if reason and reason != 'suspend':
1886 sst = self.readDom('xend/shutdown_start_time')
1887 now = time.time()
1888 if sst:
1889 self.shutdownStartTime = float(sst)
1890 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1891 else:
1892 self.shutdownStartTime = now
1893 self.storeDom('xend/shutdown_start_time', now)
1894 timeout = SHUTDOWN_TIMEOUT
1896 log.trace(
1897 "Scheduling refreshShutdown on domain %d in %ds.",
1898 self.domid, timeout)
1899 threading.Timer(timeout, self.refreshShutdown).start()
1901 return True
1905 # Public Attributes for the VM
1909 def getDomid(self):
1910 return self.domid
1912 def getStubdomDomid(self):
1913 dom_list = xstransact.List('/local/domain')
1914 for d in dom_list:
1915 target = xstransact.Read('/local/domain/' + d + '/target')
1916 if target is not None and int(target) == self.domid:
1917 return int(d)
1918 return None
1920 def setName(self, name, to_store = True):
1921 self._checkName(name)
1922 self.info['name_label'] = name
1923 if to_store:
1924 self.storeVm("name", name)
1926 def getName(self):
1927 return self.info['name_label']
1929 def getDomainPath(self):
1930 return self.dompath
1932 def getShutdownReason(self):
1933 return self.readDom('control/shutdown')
1935 def getStorePort(self):
1936 """For use only by image.py and XendCheckpoint.py."""
1937 return self.store_port
1939 def getConsolePort(self):
1940 """For use only by image.py and XendCheckpoint.py"""
1941 return self.console_port
1943 def getFeatures(self):
1944 """For use only by image.py."""
1945 return self.info['features']
1947 def getVCpuCount(self):
1948 return self.info['VCPUs_max']
1950 def getVCpuAvail(self):
1951 return self.info['vcpu_avail']
1953 def setVCpuCount(self, vcpus):
1954 def vcpus_valid(n):
1955 if vcpus <= 0:
1956 raise XendError('Zero or less VCPUs is invalid')
1957 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1958 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1959 vcpus_valid(vcpus)
1961 self.info['vcpu_avail'] = (1 << vcpus) - 1
1962 if self.domid >= 0:
1963 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1964 self._writeDom(self._vcpuDomDetails())
1965 self.info['VCPUs_live'] = vcpus
1966 else:
1967 if self.info['VCPUs_max'] > vcpus:
1968 # decreasing
1969 del self.info['cpus'][vcpus:]
1970 elif self.info['VCPUs_max'] < vcpus:
1971 # increasing
1972 for c in range(self.info['VCPUs_max'], vcpus):
1973 self.info['cpus'].append(list())
1974 self.info['VCPUs_max'] = vcpus
1975 xen.xend.XendDomain.instance().managed_config_save(self)
1976 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1977 vcpus)
1979 def getMemoryTarget(self):
1980 """Get this domain's target memory size, in KB."""
1981 return self.info['memory_dynamic_max'] / 1024
1983 def getMemoryMaximum(self):
1984 """Get this domain's maximum memory size, in KB."""
1985 # remember, info now stores memory in bytes
1986 return self.info['memory_static_max'] / 1024
1988 def getResume(self):
1989 return str(self._resume)
1991 def setResume(self, isresume):
1992 self._resume = isresume
1994 def getCpus(self):
1995 return self.info['cpus']
1997 def setCpus(self, cpumap):
1998 self.info['cpus'] = cpumap
2000 def getCap(self):
2001 return self.info['vcpus_params']['cap']
2003 def setCap(self, cpu_cap):
2004 self.info['vcpus_params']['cap'] = cpu_cap
2006 def getWeight(self):
2007 return self.info['vcpus_params']['weight']
2009 def setWeight(self, cpu_weight):
2010 self.info['vcpus_params']['weight'] = cpu_weight
2012 def getRestartCount(self):
2013 return self._readVm('xend/restart_count')
2015 def refreshShutdown(self, xeninfo = None):
2016 """ Checks the domain for whether a shutdown is required.
2018 Called from XendDomainInfo and also image.py for HVM images.
2019 """
2021 # If set at the end of this method, a restart is required, with the
2022 # given reason. This restart has to be done out of the scope of
2023 # refresh_shutdown_lock.
2024 restart_reason = None
2026 self.refresh_shutdown_lock.acquire()
2027 try:
2028 if xeninfo is None:
2029 xeninfo = dom_get(self.domid)
2030 if xeninfo is None:
2031 # The domain no longer exists. This will occur if we have
2032 # scheduled a timer to check for shutdown timeouts and the
2033 # shutdown succeeded. It will also occur if someone
2034 # destroys a domain beneath us. We clean up the domain,
2035 # just in case, but we can't clean up the VM, because that
2036 # VM may have migrated to a different domain on this
2037 # machine.
2038 self.cleanupDomain()
2039 self._stateSet(DOM_STATE_HALTED)
2040 return
2042 if xeninfo['dying']:
2043 # Dying means that a domain has been destroyed, but has not
2044 # yet been cleaned up by Xen. This state could persist
2045 # indefinitely if, for example, another domain has some of its
2046 # pages mapped. We might like to diagnose this problem in the
2047 # future, but for now all we do is make sure that it's not us
2048 # holding the pages, by calling cleanupDomain. We can't
2049 # clean up the VM, as above.
2050 self.cleanupDomain()
2051 self._stateSet(DOM_STATE_SHUTDOWN)
2052 return
2054 elif xeninfo['crashed']:
2055 if self.readDom('xend/shutdown_completed'):
2056 # We've seen this shutdown already, but we are preserving
2057 # the domain for debugging. Leave it alone.
2058 return
2060 log.warn('Domain has crashed: name=%s id=%d.',
2061 self.info['name_label'], self.domid)
2062 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
2064 restart_reason = 'crash'
2065 self._stateSet(DOM_STATE_HALTED)
2067 elif xeninfo['shutdown']:
2068 self._stateSet(DOM_STATE_SHUTDOWN)
2069 if self.readDom('xend/shutdown_completed'):
2070 # We've seen this shutdown already, but we are preserving
2071 # the domain for debugging. Leave it alone.
2072 return
2074 else:
2075 reason = shutdown_reason(xeninfo['shutdown_reason'])
2077 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
2078 self.info['name_label'], self.domid, reason)
2079 self._writeVm(LAST_SHUTDOWN_REASON, reason)
2081 self._clearRestart()
2083 if reason == 'suspend':
2084 self._stateSet(DOM_STATE_SUSPENDED)
2085 # Don't destroy the domain. XendCheckpoint will do
2086 # this once it has finished. However, stop watching
2087 # the VM path now, otherwise we will end up with one
2088 # watch for the old domain, and one for the new.
2089 self._unwatchVm()
2090 elif reason in ('poweroff', 'reboot'):
2091 restart_reason = reason
2092 else:
2093 self.destroy()
2095 elif self.dompath is None:
2096 # We have yet to manage to call introduceDomain on this
2097 # domain. This can happen if a restore is in progress, or has
2098 # failed. Ignore this domain.
2099 pass
2100 else:
2101 # Domain is alive. If we are shutting it down, log a message
2102 # if it seems unresponsive.
2103 if xeninfo['paused']:
2104 self._stateSet(DOM_STATE_PAUSED)
2105 else:
2106 self._stateSet(DOM_STATE_RUNNING)
2108 if self.shutdownStartTime:
2109 timeout = (SHUTDOWN_TIMEOUT - time.time() +
2110 self.shutdownStartTime)
2111 if (timeout < 0 and not self.readDom('xend/unresponsive')):
2112 log.info(
2113 "Domain shutdown timeout expired: name=%s id=%s",
2114 self.info['name_label'], self.domid)
2115 self.storeDom('xend/unresponsive', 'True')
2116 finally:
2117 self.refresh_shutdown_lock.release()
2119 if restart_reason and not self.restart_in_progress:
2120 self.restart_in_progress = True
2121 threading.Thread(target = self._maybeRestart,
2122 args = (restart_reason,)).start()
2126 # Restart functions - handling whether we come back up on shutdown.
2129 def _clearRestart(self):
2130 self._removeDom("xend/shutdown_start_time")
2132 def _maybeDumpCore(self, reason):
2133 if reason == 'crash':
2134 if xoptions.get_enable_dump() or self.get_on_crash() \
2135 in ['coredump_and_destroy', 'coredump_and_restart']:
2136 try:
2137 self.dumpCore()
2138 except XendError:
2139 # This error has been logged -- there's nothing more
2140 # we can do in this context.
2141 pass
2143 def _maybeRestart(self, reason):
2144 # Before taking configured action, dump core if configured to do so.
2146 self._maybeDumpCore(reason)
2148 # Dispatch to the correct method based upon the configured on_{reason}
2149 # behaviour.
2150 actions = {"destroy" : self.destroy,
2151 "restart" : self._restart,
2152 "preserve" : self._preserve,
2153 "rename-restart" : self._renameRestart,
2154 "coredump-destroy" : self.destroy,
2155 "coredump-restart" : self._restart}
2157 action_conf = {
2158 'poweroff': 'actions_after_shutdown',
2159 'reboot': 'actions_after_reboot',
2160 'crash': 'actions_after_crash',
2163 action_target = self.info.get(action_conf.get(reason))
2164 func = actions.get(action_target, None)
2165 if func and callable(func):
2166 func()
2167 else:
2168 self.destroy() # default to destroy
2170 def _renameRestart(self):
2171 self._restart(True)
2173 def _restart(self, rename = False):
2174 """Restart the domain after it has exited.
2176 @param rename True if the old domain is to be renamed and preserved,
2177 False if it is to be destroyed.
2178 """
2179 from xen.xend import XendDomain
2181 if self._readVm(RESTART_IN_PROGRESS):
2182 log.error('Xend failed during restart of domain %s. '
2183 'Refusing to restart to avoid loops.',
2184 str(self.domid))
2185 self.destroy()
2186 return
2188 old_domid = self.domid
2189 self._writeVm(RESTART_IN_PROGRESS, 'True')
2191 elapse = time.time() - self.info['start_time']
2192 if elapse < MINIMUM_RESTART_TIME:
2193 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2194 'Refusing to restart to avoid loops.',
2195 self.info['name_label'], elapse)
2196 self.destroy()
2197 return
2199 prev_vm_xend = self._listRecursiveVm('xend')
2200 new_dom_info = self.info
2201 try:
2202 if rename:
2203 new_dom_info = self._preserveForRestart()
2204 else:
2205 self._unwatchVm()
2206 self.destroy()
2208 # new_dom's VM will be the same as this domain's VM, except where
2209 # the rename flag has instructed us to call preserveForRestart.
2210 # In that case, it is important that we remove the
2211 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2212 # once the new one is available.
2214 new_dom = None
2215 try:
2216 new_dom = XendDomain.instance().domain_create_from_dict(
2217 new_dom_info)
2218 for x in prev_vm_xend[0][1]:
2219 new_dom._writeVm('xend/%s' % x[0], x[1])
2220 new_dom.waitForDevices()
2221 new_dom.unpause()
2222 rst_cnt = new_dom._readVm('xend/restart_count')
2223 rst_cnt = int(rst_cnt) + 1
2224 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2225 new_dom._removeVm(RESTART_IN_PROGRESS)
2226 except:
2227 if new_dom:
2228 new_dom._removeVm(RESTART_IN_PROGRESS)
2229 new_dom.destroy()
2230 else:
2231 self._removeVm(RESTART_IN_PROGRESS)
2232 raise
2233 except:
2234 log.exception('Failed to restart domain %s.', str(old_domid))
2236 def _preserveForRestart(self):
2237 """Preserve a domain that has been shut down, by giving it a new UUID,
2238 cloning the VM details, and giving it a new name. This allows us to
2239 keep this domain for debugging, but restart a new one in its place
2240 preserving the restart semantics (name and UUID preserved).
2241 """
2243 new_uuid = uuid.createString()
2244 new_name = 'Domain-%s' % new_uuid
2245 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2246 self.info['name_label'], self.domid, self.info['uuid'],
2247 new_name, new_uuid)
2248 self._unwatchVm()
2249 self._releaseDevices()
2250 # Remove existing vm node in xenstore
2251 self._removeVm()
2252 new_dom_info = self.info.copy()
2253 new_dom_info['name_label'] = self.info['name_label']
2254 new_dom_info['uuid'] = self.info['uuid']
2255 self.info['name_label'] = new_name
2256 self.info['uuid'] = new_uuid
2257 self.vmpath = XS_VMROOT + new_uuid
2258 # Write out new vm node to xenstore
2259 self._storeVmDetails()
2260 self._preserve()
2261 return new_dom_info
2264 def _preserve(self):
2265 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2266 self.domid)
2267 self._unwatchVm()
2268 self.storeDom('xend/shutdown_completed', 'True')
2269 self._stateSet(DOM_STATE_HALTED)
2272 # Debugging ..
2275 def dumpCore(self, corefile = None):
2276 """Create a core dump for this domain.
2278 @raise: XendError if core dumping failed.
2279 """
2281 if not corefile:
2282 # To prohibit directory traversal
2283 based_name = os.path.basename(self.info['name_label'])
2285 coredir = "/var/xen/dump/%s" % (based_name)
2286 if not os.path.exists(coredir):
2287 try:
2288 mkdir.parents(coredir, stat.S_IRWXU)
2289 except Exception, ex:
2290 log.error("Cannot create directory: %s" % str(ex))
2292 if not os.path.isdir(coredir):
2293 # Use former directory to dump core
2294 coredir = '/var/xen/dump'
2296 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2297 corefile = "%s/%s-%s.%s.core" % (coredir, this_time,
2298 self.info['name_label'], self.domid)
2300 if os.path.isdir(corefile):
2301 raise XendError("Cannot dump core in a directory: %s" %
2302 corefile)
2304 try:
2305 try:
2306 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2307 xc.domain_dumpcore(self.domid, corefile)
2308 except RuntimeError, ex:
2309 corefile_incomp = corefile+'-incomplete'
2310 try:
2311 os.rename(corefile, corefile_incomp)
2312 except:
2313 pass
2315 log.error("core dump failed: id = %s name = %s: %s",
2316 self.domid, self.info['name_label'], str(ex))
2317 raise XendError("Failed to dump core: %s" % str(ex))
2318 finally:
2319 self._removeVm(DUMPCORE_IN_PROGRESS)
2322 # Device creation/deletion functions
2325 def _createDevice(self, deviceClass, devConfig):
2326 return self.getDeviceController(deviceClass).createDevice(devConfig)
2328 def _waitForDevice(self, deviceClass, devid):
2329 return self.getDeviceController(deviceClass).waitForDevice(devid)
2331 def _waitForDeviceUUID(self, dev_uuid):
2332 deviceClass, config = self.info['devices'].get(dev_uuid)
2333 self._waitForDevice(deviceClass, config['devid'])
2335 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2336 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2337 devid, backpath)
2339 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2340 return self.getDeviceController(deviceClass).reconfigureDevice(
2341 devid, devconfig)
2343 def _createDevices(self):
2344 """Create the devices for a vm.
2346 @raise: VmError for invalid devices
2347 """
2348 if self.image:
2349 self.image.prepareEnvironment()
2351 vscsi_uuidlist = {}
2352 vscsi_devidlist = []
2353 ordered_refs = self.info.ordered_device_refs()
2354 for dev_uuid in ordered_refs:
2355 devclass, config = self.info['devices'][dev_uuid]
2356 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2357 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2358 dev_uuid = config.get('uuid')
2360 if devclass == 'pci':
2361 self.pci_dev_check_assignability_and_do_FLR(config)
2363 if devclass != 'pci' or not self.info.is_hvm() :
2364 devid = self._createDevice(devclass, config)
2366 # store devid in XendConfig for caching reasons
2367 if dev_uuid in self.info['devices']:
2368 self.info['devices'][dev_uuid][1]['devid'] = devid
2370 elif devclass == 'vscsi':
2371 vscsi_config = config.get('devs', [])[0]
2372 devid = vscsi_config.get('devid', '')
2373 dev_uuid = config.get('uuid')
2374 vscsi_uuidlist[devid] = dev_uuid
2375 vscsi_devidlist.append(devid)
2377 #It is necessary to sorted it for /dev/sdxx in guest.
2378 if len(vscsi_uuidlist) > 0:
2379 vscsi_devidlist.sort()
2380 for vscsiid in vscsi_devidlist:
2381 dev_uuid = vscsi_uuidlist[vscsiid]
2382 devclass, config = self.info['devices'][dev_uuid]
2383 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2384 dev_uuid = config.get('uuid')
2385 devid = self._createDevice(devclass, config)
2386 # store devid in XendConfig for caching reasons
2387 if dev_uuid in self.info['devices']:
2388 self.info['devices'][dev_uuid][1]['devid'] = devid
2391 if self.image:
2392 self.image.createDeviceModel()
2394 #if have pass-through devs, need the virtual pci slots info from qemu
2395 self.pci_device_configure_boot()
2397 def _releaseDevices(self, suspend = False):
2398 """Release all domain's devices. Nothrow guarantee."""
2399 if self.image:
2400 try:
2401 log.debug("Destroying device model")
2402 self.image.destroyDeviceModel()
2403 except Exception, e:
2404 log.exception("Device model destroy failed %s" % str(e))
2405 else:
2406 log.debug("No device model")
2408 log.debug("Releasing devices")
2409 t = xstransact("%s/device" % self.vmpath)
2410 try:
2411 for devclass in XendDevices.valid_devices():
2412 for dev in t.list(devclass):
2413 try:
2414 log.debug("Removing %s", dev);
2415 self.destroyDevice(devclass, dev, False);
2416 except:
2417 # Log and swallow any exceptions in removal --
2418 # there's nothing more we can do.
2419 log.exception("Device release failed: %s; %s; %s",
2420 self.info['name_label'],
2421 devclass, dev)
2422 finally:
2423 t.abort()
2425 def getDeviceController(self, name):
2426 """Get the device controller for this domain, and if it
2427 doesn't exist, create it.
2429 @param name: device class name
2430 @type name: string
2431 @rtype: subclass of DevController
2432 """
2433 if name not in self._deviceControllers:
2434 devController = XendDevices.make_controller(name, self)
2435 if not devController:
2436 raise XendError("Unknown device type: %s" % name)
2437 self._deviceControllers[name] = devController
2439 return self._deviceControllers[name]
2442 # Migration functions (public)
2445 def testMigrateDevices(self, network, dst):
2446 """ Notify all device about intention of migration
2447 @raise: XendError for a device that cannot be migrated
2448 """
2449 for (n, c) in self.info.all_devices_sxpr():
2450 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2451 if rc != 0:
2452 raise XendError("Device of type '%s' refuses migration." % n)
2454 def migrateDevices(self, network, dst, step, domName=''):
2455 """Notify the devices about migration
2456 """
2457 ctr = 0
2458 try:
2459 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2460 self.migrateDevice(dev_type, dev_conf, network, dst,
2461 step, domName)
2462 ctr = ctr + 1
2463 except:
2464 for dev_type, dev_conf in self.info.all_devices_sxpr():
2465 if ctr == 0:
2466 step = step - 1
2467 ctr = ctr - 1
2468 self._recoverMigrateDevice(dev_type, dev_conf, network,
2469 dst, step, domName)
2470 raise
2472 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2473 step, domName=''):
2474 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2475 network, dst, step, domName)
2477 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2478 dst, step, domName=''):
2479 return self.getDeviceController(deviceClass).recover_migrate(
2480 deviceConfig, network, dst, step, domName)
2482 def setChangeHomeServer(self, chs):
2483 if chs is not None:
2484 self.info['change_home_server'] = bool(chs)
2485 else:
2486 if self.info.has_key('change_home_server'):
2487 del self.info['change_home_server']
2490 ## private:
2492 def _constructDomain(self):
2493 """Construct the domain.
2495 @raise: VmError on error
2496 """
2498 log.debug('XendDomainInfo.constructDomain')
2500 self.shutdownStartTime = None
2501 self.restart_in_progress = False
2503 hap = 0
2504 hvm = self.info.is_hvm()
2505 if hvm:
2506 hap = self.info.is_hap()
2507 info = xc.xeninfo()
2508 if 'hvm' not in info['xen_caps']:
2509 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2510 "supported by your CPU and enabled in your "
2511 "BIOS?")
2513 # Hack to pre-reserve some memory for initial domain creation.
2514 # There is an implicit memory overhead for any domain creation. This
2515 # overhead is greater for some types of domain than others. For
2516 # example, an x86 HVM domain will have a default shadow-pagetable
2517 # allocation of 4MB. We free up 16MB here to be on the safe side.
2518 balloon.free(16*1024, self) # 16MB should be plenty
2520 ssidref = 0
2521 if security.on() == xsconstants.XS_POLICY_USE:
2522 ssidref = security.calc_dom_ssidref_from_info(self.info)
2523 if security.has_authorization(ssidref) == False:
2524 raise VmError("VM is not authorized to run.")
2526 s3_integrity = 0
2527 if self.info.has_key('s3_integrity'):
2528 s3_integrity = self.info['s3_integrity']
2530 oos = self.info['platform'].get('oos', 1)
2531 oos_off = 1 - int(oos)
2533 # look-up pool id to use
2534 pool_name = self.info['pool_name']
2535 if len(pool_name) == 0:
2536 pool_name = "Pool-0"
2538 pool = XendCPUPool.lookup_pool(pool_name)
2540 if pool is None:
2541 raise VmError("unknown pool %s" % pool_name)
2542 pool_id = pool.query_pool_id()
2543 if pool_id is None:
2544 raise VmError("pool %s not activated" % pool_name)
2546 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
2548 try:
2549 self.domid = xc.domain_create(
2550 domid = 0,
2551 ssidref = ssidref,
2552 handle = uuid.fromString(self.info['uuid']),
2553 flags = flags,
2554 #cpupool = pool_id,
2555 target = self.info.target())
2556 except Exception, e:
2557 # may get here if due to ACM the operation is not permitted
2558 if security.on() == xsconstants.XS_POLICY_ACM:
2559 raise VmError('Domain in conflict set with running domain?')
2560 log.exception(e)
2562 if not self.domid or self.domid < 0:
2563 failmsg = 'Creating domain failed: name=%s' % self.info['name_label']
2564 if self.domid:
2565 failmsg += ', error=%i' % int(self.domid)
2566 raise VmError(failmsg)
2568 try:
2569 xc.cpupool_movedomain(pool_id, self.domid)
2570 except Exception, e:
2571 raise VmError('Moving domain to target pool failed')
2573 self.dompath = GetDomainPath(self.domid)
2575 self._recreateDom()
2577 # Set TSC mode of domain
2578 tsc_mode = self.info["platform"].get("tsc_mode")
2579 if arch.type == "x86" and tsc_mode is not None:
2580 xc.domain_set_tsc_info(self.domid, int(tsc_mode))
2582 # Set timer configuration of domain
2583 timer_mode = self.info["platform"].get("timer_mode")
2584 if hvm and timer_mode is not None:
2585 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2586 long(timer_mode))
2588 # Set Viridian interface configuration of domain
2589 viridian = self.info["platform"].get("viridian")
2590 if arch.type == "x86" and hvm and viridian is not None:
2591 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2593 # If nomigrate is set, disable migration
2594 nomigrate = self.info["platform"].get("nomigrate")
2595 if nomigrate is not None and long(nomigrate) != 0:
2596 xc.domain_disable_migrate(self.domid)
2598 # Optionally enable virtual HPET
2599 hpet = self.info["platform"].get("hpet")
2600 if hvm and hpet is not None:
2601 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2602 long(hpet))
2604 # Optionally enable periodic vpt aligning
2605 vpt_align = self.info["platform"].get("vpt_align")
2606 if hvm and vpt_align is not None:
2607 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2608 long(vpt_align))
2610 # Set maximum number of vcpus in domain
2611 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2613 # Check for cpu_{cap|weight} validity for credit scheduler
2614 if XendNode.instance().xenschedinfo() == 'credit':
2615 cap = self.getCap()
2616 weight = self.getWeight()
2618 assert type(weight) == int
2619 assert type(cap) == int
2621 if weight < 1 or weight > 65535:
2622 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2624 if cap < 0 or cap > self.getVCpuCount() * 100:
2625 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2626 (self.getVCpuCount() * 100))
2628 # Test whether the devices can be assigned with VT-d
2629 self.info.update_platform_pci()
2630 pci = self.info["platform"].get("pci")
2631 pci_str = ''
2632 if pci and len(pci) > 0:
2633 pci = map(lambda x: x[0:4], pci) # strip options
2634 pci_str = str(pci)
2636 # This test is done for both pv and hvm guest.
2637 for p in pci:
2638 pci_name = '%04x:%02x:%02x.%x' % \
2639 (parse_hex(p[0]), parse_hex(p[1]), parse_hex(p[2]), parse_hex(p[3]))
2640 try:
2641 pci_device = PciDevice(parse_pci_name(pci_name))
2642 except Exception, e:
2643 raise VmError("pci: failed to locate device and "+
2644 "parse its resources - "+str(e))
2645 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
2646 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
2647 %pci_device.name)
2648 if pci_name in get_all_assigned_pci_devices():
2649 raise VmError("failed to assign device %s that has"
2650 " already been assigned to other domain." % pci_name)
2652 if hvm and pci_str != '':
2653 bdf = xc.test_assign_device(0, pci_str)
2654 if bdf != 0:
2655 if bdf == -1:
2656 raise VmError("failed to assign device: maybe the platform"
2657 " doesn't support VT-d, or VT-d isn't enabled"
2658 " properly?")
2659 bus = (bdf >> 16) & 0xff
2660 devfn = (bdf >> 8) & 0xff
2661 dev = (devfn >> 3) & 0x1f
2662 func = devfn & 0x7
2663 raise VmError("failed to assign device %02x:%02x.%x: maybe it has"
2664 " already been assigned to other domain, or maybe"
2665 " it doesn't exist." % (bus, dev, func))
2667 # register the domain in the list
2668 from xen.xend import XendDomain
2669 XendDomain.instance().add_domain(self)
2671 def _introduceDomain(self):
2672 assert self.domid is not None
2673 assert self.store_mfn is not None
2674 assert self.store_port is not None
2676 try:
2677 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2678 except RuntimeError, exn:
2679 raise XendError(str(exn))
2681 def _setTarget(self, target):
2682 assert self.domid is not None
2684 try:
2685 SetTarget(self.domid, target)
2686 self.storeDom('target', target)
2687 except RuntimeError, exn:
2688 raise XendError(str(exn))
2691 def _setCPUAffinity(self):
2692 """ Repin domain vcpus if a restricted cpus list is provided.
2693 Returns the choosen node number.
2694 """
2696 def has_cpus():
2697 if self.info['cpus'] is not None:
2698 for c in self.info['cpus']:
2699 if c:
2700 return True
2701 return False
2703 def has_cpumap():
2704 if self.info.has_key('vcpus_params'):
2705 for k, v in self.info['vcpus_params'].items():
2706 if k.startswith('cpumap'):
2707 return True
2708 return False
2710 index = 0
2711 if has_cpumap():
2712 for v in range(0, self.info['VCPUs_max']):
2713 if self.info['vcpus_params'].has_key('cpumap%i' % v):
2714 cpumask = map(int, self.info['vcpus_params']['cpumap%i' % v].split(','))
2715 xc.vcpu_setaffinity(self.domid, v, cpumask)
2716 elif has_cpus():
2717 for v in range(0, self.info['VCPUs_max']):
2718 if self.info['cpus'][v]:
2719 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2720 else:
2721 def find_relaxed_node(node_list):
2722 import sys
2723 nr_nodes = info['max_node_index'] + 1
2724 if node_list is None:
2725 node_list = range(0, nr_nodes)
2726 nodeload = [0]
2727 nodeload = nodeload * nr_nodes
2728 from xen.xend import XendDomain
2729 doms = XendDomain.instance().list('all')
2730 for dom in filter (lambda d: d.domid != self.domid, doms):
2731 cpuinfo = dom.getVCPUInfo()
2732 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2733 if sxp.child_value(vcpu, 'online') == 0: continue
2734 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2735 for i in range(0, nr_nodes):
2736 node_cpumask = node_to_cpu[i]
2737 for j in node_cpumask:
2738 if j in cpumap:
2739 nodeload[i] += 1
2740 break
2741 for i in range(0, nr_nodes):
2742 if len(node_to_cpu[i]) == 0:
2743 nodeload[i] += 8
2744 else:
2745 nodeload[i] = int(nodeload[i] * 16 / len(node_to_cpu[i]))
2746 if i not in node_list:
2747 nodeload[i] += 8
2748 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
2750 info = xc.numainfo()
2751 if info['max_node_index'] > 0 and XendCPUPool.number_of_pools() < 2:
2752 node_memory_list = info['node_memfree']
2753 node_to_cpu = []
2754 for i in range(0, info['max_node_index'] + 1):
2755 node_to_cpu.append([])
2756 for cpu, node in enumerate(xc.topologyinfo()['cpu_to_node']):
2757 node_to_cpu[node].append(cpu)
2758 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2759 candidate_node_list = []
2760 for i in range(0, info['max_node_index'] + 1):
2761 if node_memory_list[i] >= needmem and len(node_to_cpu[i]) > 0:
2762 candidate_node_list.append(i)
2763 best_node = find_relaxed_node(candidate_node_list)[0]
2764 cpumask = node_to_cpu[best_node]
2765 best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['max_node_index']+1)))
2766 for node_idx in best_nodes:
2767 if len(cpumask) >= self.info['VCPUs_max']:
2768 break
2769 cpumask = cpumask + node_to_cpu[node_idx]
2770 log.debug("allocating additional NUMA node %d", node_idx)
2771 for v in range(0, self.info['VCPUs_max']):
2772 xc.vcpu_setaffinity(self.domid, v, cpumask)
2773 return index
2775 def _freeDMAmemory(self, node):
2777 # If we are PV and have PCI devices the guest will
2778 # turn on a SWIOTLB. The SWIOTLB _MUST_ be located in the DMA32
2779 # zone (under 4GB). To do so, we need to balloon down Dom0 to where
2780 # there is enough (64MB) memory under the 4GB mark. This balloon-ing
2781 # might take more memory out than just 64MB thought :-(
2782 if not self.info.is_pv_and_has_pci():
2783 return
2785 retries = 2000
2786 ask_for_mem = 0
2787 need_mem = 0
2788 try:
2789 while (retries > 0):
2790 physinfo = xc.physinfo()
2791 free_mem = physinfo['free_memory']
2792 max_node_id = physinfo['max_node_id']
2793 node_to_dma32_mem = physinfo['node_to_dma32_mem']
2794 if (node > max_node_id):
2795 return
2796 # Extra 2MB above 64GB seems to do the trick.
2797 need_mem = 64 * 1024 + 2048 - node_to_dma32_mem[node]
2798 # our starting point. We ask just for the difference to
2799 # be have an extra 64MB under 4GB.
2800 ask_for_mem = max(need_mem, ask_for_mem);
2801 if (need_mem > 0):
2802 log.debug('_freeDMAmemory (%d) Need %dKiB DMA memory. '
2803 'Asking for %dKiB', retries, need_mem,
2804 ask_for_mem)
2806 balloon.free(ask_for_mem, self)
2807 ask_for_mem = ask_for_mem + 2048
2808 else:
2809 # OK. We got enough DMA memory.
2810 break
2811 retries = retries - 1
2812 except:
2813 # This is best-try after all.
2814 need_mem = max(1, need_mem)
2815 pass
2817 if (need_mem > 0):
2818 log.warn('We tried our best to balloon down DMA memory to '
2819 'accomodate your PV guest. We need %dKiB extra memory.',
2820 need_mem)
2822 def _setSchedParams(self):
2823 if XendNode.instance().xenschedinfo() == 'credit':
2824 from xen.xend import XendDomain
2825 XendDomain.instance().domain_sched_credit_set(self.getDomid(),
2826 self.getWeight(),
2827 self.getCap())
2828 elif XendNode.instance().xenschedinfo() == 'credit2':
2829 from xen.xend import XendDomain
2830 XendDomain.instance().domain_sched_credit2_set(self.getDomid(),
2831 self.getWeight())
2833 def _initDomain(self):
2834 log.debug('XendDomainInfo.initDomain: %s %s',
2835 self.domid,
2836 self.info['vcpus_params']['weight'])
2838 self._configureBootloader()
2840 try:
2841 self.image = image.create(self, self.info)
2843 # repin domain vcpus if a restricted cpus list is provided
2844 # this is done prior to memory allocation to aide in memory
2845 # distribution for NUMA systems.
2846 node = self._setCPUAffinity()
2848 # Set scheduling parameters.
2849 self._setSchedParams()
2851 # Use architecture- and image-specific calculations to determine
2852 # the various headrooms necessary, given the raw configured
2853 # values. maxmem, memory, and shadow are all in KiB.
2854 # but memory_static_max etc are all stored in bytes now.
2855 memory = self.image.getRequiredAvailableMemory(
2856 self.info['memory_dynamic_max'] / 1024)
2857 maxmem = self.image.getRequiredAvailableMemory(
2858 self.info['memory_static_max'] / 1024)
2859 shadow = self.image.getRequiredShadowMemory(
2860 self.info['shadow_memory'] * 1024,
2861 self.info['memory_static_max'] / 1024)
2863 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2864 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2865 # takes MiB and we must not round down and end up under-providing.
2866 shadow = ((shadow + 1023) / 1024) * 1024
2868 # set memory limit
2869 xc.domain_setmaxmem(self.domid, maxmem)
2871 vtd_mem = 0
2872 info = xc.physinfo()
2873 if 'hvm_directio' in info['virt_caps']:
2874 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2875 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2876 # Round vtd_mem up to a multiple of a MiB.
2877 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2879 self.guest_bitsize = self.image.getBitSize()
2880 # Make sure there's enough RAM available for the domain
2881 balloon.free(memory + shadow + vtd_mem, self)
2883 # Set up the shadow memory
2884 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2885 self.info['shadow_memory'] = shadow_cur
2887 # machine address size
2888 if self.info.has_key('machine_address_size'):
2889 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2890 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2892 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2893 log.debug("_initDomain: suppressing spurious page faults")
2894 xc.domain_suppress_spurious_page_faults(self.domid)
2896 self._createChannels()
2898 channel_details = self.image.createImage()
2900 self.store_mfn = channel_details['store_mfn']
2901 if 'console_mfn' in channel_details:
2902 self.console_mfn = channel_details['console_mfn']
2903 if 'notes' in channel_details:
2904 self.info.set_notes(channel_details['notes'])
2905 if 'native_protocol' in channel_details:
2906 self.native_protocol = channel_details['native_protocol'];
2908 self._introduceDomain()
2909 if self.info.target():
2910 self._setTarget(self.info.target())
2912 self._freeDMAmemory(node)
2914 self._createDevices()
2916 self.image.cleanupTmpImages()
2918 self.info['start_time'] = time.time()
2920 self._stateSet(DOM_STATE_RUNNING)
2921 except VmError, exn:
2922 log.exception("XendDomainInfo.initDomain: exception occurred")
2923 if self.image:
2924 self.image.cleanupTmpImages()
2925 raise exn
2926 except RuntimeError, exn:
2927 log.exception("XendDomainInfo.initDomain: exception occurred")
2928 if self.image:
2929 self.image.cleanupTmpImages()
2930 raise VmError(str(exn))
2933 def cleanupDomain(self):
2934 """Cleanup domain resources; release devices. Idempotent. Nothrow
2935 guarantee."""
2937 self.refresh_shutdown_lock.acquire()
2938 try:
2939 self.unwatchShutdown()
2940 self._releaseDevices()
2941 bootloader_tidy(self)
2943 if self.image:
2944 self.image = None
2946 try:
2947 self._removeDom()
2948 except:
2949 log.exception("Removing domain path failed.")
2951 self._stateSet(DOM_STATE_HALTED)
2952 self.domid = None # Do not push into _stateSet()!
2953 finally:
2954 self.refresh_shutdown_lock.release()
2957 def unwatchShutdown(self):
2958 """Remove the watch on the domain's control/shutdown node, if any.
2959 Idempotent. Nothrow guarantee. Expects to be protected by the
2960 refresh_shutdown_lock."""
2962 try:
2963 try:
2964 if self.shutdownWatch:
2965 self.shutdownWatch.unwatch()
2966 finally:
2967 self.shutdownWatch = None
2968 except:
2969 log.exception("Unwatching control/shutdown failed.")
2971 def waitForShutdown(self):
2972 self.state_updated.acquire()
2973 try:
2974 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2975 self.state_updated.wait(timeout=1.0)
2976 finally:
2977 self.state_updated.release()
2979 def waitForSuspend(self):
2980 """Wait for the guest to respond to a suspend request by
2981 shutting down. If the guest hasn't re-written control/shutdown
2982 after a certain amount of time, it's obviously not listening and
2983 won't suspend, so we give up. HVM guests with no PV drivers
2984 should already be shutdown.
2985 """
2986 state = "suspend"
2987 nr_tries = 60
2989 self.state_updated.acquire()
2990 try:
2991 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2992 self.state_updated.wait(1.0)
2993 if state == "suspend":
2994 if nr_tries == 0:
2995 msg = ('Timeout waiting for domain %s to suspend'
2996 % self.domid)
2997 self._writeDom('control/shutdown', '')
2998 raise XendError(msg)
2999 state = self.readDom('control/shutdown')
3000 nr_tries -= 1
3001 finally:
3002 self.state_updated.release()
3005 # TODO: recategorise - called from XendCheckpoint
3008 def completeRestore(self, store_mfn, console_mfn):
3010 log.debug("XendDomainInfo.completeRestore")
3012 self.store_mfn = store_mfn
3013 self.console_mfn = console_mfn
3015 self._introduceDomain()
3016 self.image = image.create(self, self.info)
3017 if self.image:
3018 self.image.createDeviceModel(True)
3019 self._storeDomDetails()
3020 self._registerWatches()
3021 self.refreshShutdown()
3023 log.debug("XendDomainInfo.completeRestore done")
3026 def _endRestore(self):
3027 self.setResume(False)
3030 # VM Destroy
3033 def _prepare_phantom_paths(self):
3034 # get associated devices to destroy
3035 # build list of phantom devices to be removed after normal devices
3036 plist = []
3037 if self.domid is not None:
3038 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
3039 try:
3040 for dev in t.list():
3041 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
3042 % (self.dompath, dev))
3043 if backend_phantom_vbd is not None:
3044 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
3045 % backend_phantom_vbd)
3046 plist.append(backend_phantom_vbd)
3047 plist.append(frontend_phantom_vbd)
3048 finally:
3049 t.abort()
3050 return plist
3052 def _cleanup_phantom_devs(self, plist):
3053 # remove phantom devices
3054 if not plist == []:
3055 time.sleep(2)
3056 for paths in plist:
3057 if paths.find('backend') != -1:
3058 # Modify online status /before/ updating state (latter is watched by
3059 # drivers, so this ordering avoids a race).
3060 xstransact.Write(paths, 'online', "0")
3061 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
3062 # force
3063 xstransact.Remove(paths)
3065 def destroy(self):
3066 """Cleanup VM and destroy domain. Nothrow guarantee."""
3068 if self.domid is None:
3069 return
3070 from xen.xend import XendDomain
3071 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
3073 paths = self._prepare_phantom_paths()
3075 if self.dompath is not None:
3076 try:
3077 xc.domain_destroy_hook(self.domid)
3078 xc.domain_pause(self.domid)
3079 do_FLR(self.domid, self.info.is_hvm())
3080 xc.domain_destroy(self.domid)
3081 for state in DOM_STATES_OLD:
3082 self.info[state] = 0
3083 self._stateSet(DOM_STATE_HALTED)
3084 except:
3085 log.exception("XendDomainInfo.destroy: domain destruction failed.")
3087 XendDomain.instance().remove_domain(self)
3088 self.cleanupDomain()
3090 if self.info.is_hvm() or self.guest_bitsize != 32:
3091 if self.alloc_mem:
3092 import MemoryPool
3093 log.debug("%s KiB need to add to Memory pool" %self.alloc_mem)
3094 MemoryPool.instance().increase_memory(self.alloc_mem)
3096 self._cleanup_phantom_devs(paths)
3097 self._cleanupVm()
3099 if ("transient" in self.info["other_config"] and \
3100 bool(self.info["other_config"]["transient"])) or \
3101 ("change_home_server" in self.info and \
3102 bool(self.info["change_home_server"])):
3103 XendDomain.instance().domain_delete_by_dominfo(self)
3106 def resetDomain(self):
3107 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
3109 old_domid = self.domid
3110 prev_vm_xend = self._listRecursiveVm('xend')
3111 new_dom_info = self.info
3112 try:
3113 self._unwatchVm()
3114 self.destroy()
3116 new_dom = None
3117 try:
3118 from xen.xend import XendDomain
3119 new_dom_info['domid'] = None
3120 new_dom = XendDomain.instance().domain_create_from_dict(
3121 new_dom_info)
3122 for x in prev_vm_xend[0][1]:
3123 new_dom._writeVm('xend/%s' % x[0], x[1])
3124 new_dom.waitForDevices()
3125 new_dom.unpause()
3126 except:
3127 if new_dom:
3128 new_dom.destroy()
3129 raise
3130 except:
3131 log.exception('Failed to reset domain %s.', str(old_domid))
3134 def resumeDomain(self):
3135 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
3137 # resume a suspended domain (e.g. after live checkpoint, or after
3138 # a later error during save or migate); checks that the domain
3139 # is currently suspended first so safe to call from anywhere
3141 xeninfo = dom_get(self.domid)
3142 if xeninfo is None:
3143 return
3144 if not xeninfo['shutdown']:
3145 return
3146 reason = shutdown_reason(xeninfo['shutdown_reason'])
3147 if reason != 'suspend':
3148 return
3150 try:
3151 # could also fetch a parsed note from xenstore
3152 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
3153 if not fast:
3154 self._releaseDevices()
3155 self.testDeviceComplete()
3156 self.testvifsComplete()
3157 log.debug("XendDomainInfo.resumeDomain: devices released")
3159 self._resetChannels()
3161 self._removeDom('control/shutdown')
3162 self._removeDom('device-misc/vif/nextDeviceID')
3164 self._createChannels()
3165 self._introduceDomain()
3166 self._storeDomDetails()
3168 self._createDevices()
3169 log.debug("XendDomainInfo.resumeDomain: devices created")
3171 xc.domain_resume(self.domid, fast)
3172 ResumeDomain(self.domid)
3173 except:
3174 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
3175 self.image.resumeDeviceModel()
3176 log.debug("XendDomainInfo.resumeDomain: completed")
3180 # Channels for xenstore and console
3183 def _createChannels(self):
3184 """Create the channels to the domain.
3185 """
3186 self.store_port = self._createChannel()
3187 self.console_port = self._createChannel()
3190 def _createChannel(self):
3191 """Create an event channel to the domain.
3192 """
3193 try:
3194 if self.domid != None:
3195 return xc.evtchn_alloc_unbound(domid = self.domid,
3196 remote_dom = 0)
3197 except:
3198 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
3199 raise
3201 def _resetChannels(self):
3202 """Reset all event channels in the domain.
3203 """
3204 try:
3205 if self.domid != None:
3206 return xc.evtchn_reset(dom = self.domid)
3207 except:
3208 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
3209 raise
3213 # Bootloader configuration
3216 def _configureBootloader(self):
3217 """Run the bootloader if we're configured to do so."""
3219 blexec = self.info['PV_bootloader']
3220 bootloader_args = self.info['PV_bootloader_args']
3221 kernel = self.info['PV_kernel']
3222 ramdisk = self.info['PV_ramdisk']
3223 args = self.info['PV_args']
3224 boot = self.info['HVM_boot_policy']
3226 if boot:
3227 # HVM booting.
3228 pass
3229 elif not blexec and kernel:
3230 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
3231 # will be picked up by image.py.
3232 pass
3233 else:
3234 # Boot using bootloader
3235 if not blexec or blexec == 'pygrub':
3236 blexec = auxbin.pathTo('pygrub')
3238 blcfg = None
3239 disks = [x for x in self.info['vbd_refs']
3240 if self.info['devices'][x][1]['bootable']]
3242 if not disks:
3243 msg = "Had a bootloader specified, but no disks are bootable"
3244 log.error(msg)
3245 raise VmError(msg)
3247 devinfo = self.info['devices'][disks[0]]
3248 devtype = devinfo[0]
3249 disk = devinfo[1]['uname']
3251 (fn, types) = parse_uname(disk)
3252 def _shouldMount(types):
3253 if types[0] in ('file', 'phy'):
3254 return False
3255 if types[0] in ('tap', 'tap2'):
3256 if types[1] in ('aio', 'sync'):
3257 return False
3258 else:
3259 return True
3260 return os.access('/etc/xen/scripts/block-%s' % types[0], os.X_OK)
3262 mounted = _shouldMount(types)
3263 mounted_vbd_uuid = 0
3264 if mounted:
3265 # This is a file, not a device. pygrub can cope with a
3266 # file if it's raw, but if it's QCOW or other such formats
3267 # used through blktap, then we need to mount it first.
3269 log.info("Mounting %s on %s." %
3270 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3272 vbd = {
3273 'mode': 'RO',
3274 'device': BOOTLOADER_LOOPBACK_DEVICE,
3277 from xen.xend import XendDomain
3278 dom0 = XendDomain.instance().privilegedDomain()
3279 mounted_vbd_uuid = dom0.create_vbd(vbd, disk);
3280 dom0._waitForDeviceUUID(mounted_vbd_uuid)
3281 fn = BOOTLOADER_LOOPBACK_DEVICE
3283 try:
3284 blcfg = bootloader(blexec, fn, self, False,
3285 bootloader_args, kernel, ramdisk, args)
3286 finally:
3287 if mounted:
3288 log.info("Unmounting %s from %s." %
3289 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3290 _, vbd_info = dom0.info['devices'][mounted_vbd_uuid]
3291 dom0.destroyDevice(dom0.getBlockDeviceClass(vbd_info['devid']),
3292 BOOTLOADER_LOOPBACK_DEVICE, force = True)
3294 if blcfg is None:
3295 msg = "Had a bootloader specified, but can't find disk"
3296 log.error(msg)
3297 raise VmError(msg)
3299 self.info.update_with_image_sxp(blcfg, True)
3303 # VM Functions
3306 def _readVMDetails(self, params):
3307 """Read the specified parameters from the store.
3308 """
3309 try:
3310 return self._gatherVm(*params)
3311 except ValueError:
3312 # One of the int/float entries in params has a corresponding store
3313 # entry that is invalid. We recover, because older versions of
3314 # Xend may have put the entry there (memory/target, for example),
3315 # but this is in general a bad situation to have reached.
3316 log.exception(
3317 "Store corrupted at %s! Domain %d's configuration may be "
3318 "affected.", self.vmpath, self.domid)
3319 return []
3321 def _cleanupVm(self):
3322 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
3324 self._unwatchVm()
3326 try:
3327 self._removeVm()
3328 except:
3329 log.exception("Removing VM path failed.")
3332 def checkLiveMigrateMemory(self):
3333 """ Make sure there's enough memory to migrate this domain """
3334 overhead_kb = 0
3335 if arch.type == "x86":
3336 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3337 # the minimum that Xen would allocate if no value were given.
3338 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3339 (self.info['memory_static_max'] / 1024 / 1024) * 4
3340 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3341 # The domain might already have some shadow memory
3342 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3343 if overhead_kb > 0:
3344 balloon.free(overhead_kb, self)
3346 def _unwatchVm(self):
3347 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3348 guarantee."""
3349 try:
3350 try:
3351 if self.vmWatch:
3352 self.vmWatch.unwatch()
3353 finally:
3354 self.vmWatch = None
3355 except:
3356 log.exception("Unwatching VM path failed.")
3358 def testDeviceComplete(self):
3359 """ For Block IO migration safety we must ensure that
3360 the device has shutdown correctly, i.e. all blocks are
3361 flushed to disk
3362 """
3363 start = time.time()
3364 while True:
3365 test = 0
3366 diff = time.time() - start
3367 vbds = self.getDeviceController('vbd').deviceIDs()
3368 taps = self.getDeviceController('tap').deviceIDs()
3369 tap2s = self.getDeviceController('tap2').deviceIDs()
3370 for i in vbds + taps + tap2s:
3371 test = 1
3372 log.info("Dev %s still active, looping...", i)
3373 time.sleep(0.1)
3375 if test == 0:
3376 break
3377 if diff >= MIGRATE_TIMEOUT:
3378 log.info("Dev still active but hit max loop timeout")
3379 break
3381 def testvifsComplete(self):
3382 """ In case vifs are released and then created for the same
3383 domain, we need to wait the device shut down.
3384 """
3385 start = time.time()
3386 while True:
3387 test = 0
3388 diff = time.time() - start
3389 for i in self.getDeviceController('vif').deviceIDs():
3390 test = 1
3391 log.info("Dev %s still active, looping...", i)
3392 time.sleep(0.1)
3394 if test == 0:
3395 break
3396 if diff >= MIGRATE_TIMEOUT:
3397 log.info("Dev still active but hit max loop timeout")
3398 break
3400 def _storeVmDetails(self):
3401 to_store = {}
3403 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3404 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3405 if self._infoIsSet(info_key):
3406 to_store[key] = str(self.info[info_key])
3408 if self._infoIsSet("static_memory_min"):
3409 to_store["memory"] = str(self.info["static_memory_min"])
3410 if self._infoIsSet("static_memory_max"):
3411 to_store["maxmem"] = str(self.info["static_memory_max"])
3413 image_sxpr = self.info.image_sxpr()
3414 if image_sxpr:
3415 to_store['image'] = sxp.to_string(image_sxpr)
3417 if not self._readVm('xend/restart_count'):
3418 to_store['xend/restart_count'] = str(0)
3420 log.debug("Storing VM details: %s", scrub_password(to_store))
3422 self._writeVm(to_store)
3423 self._setVmPermissions()
3425 def _setVmPermissions(self):
3426 """Allow the guest domain to read its UUID. We don't allow it to
3427 access any other entry, for security."""
3428 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3429 { 'dom' : self.domid,
3430 'read' : True,
3431 'write' : False })
3434 # Utility functions
3437 def __getattr__(self, name):
3438 if name == "state":
3439 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3440 log.warn("".join(traceback.format_stack()))
3441 return self._stateGet()
3442 else:
3443 raise AttributeError(name)
3445 def __setattr__(self, name, value):
3446 if name == "state":
3447 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3448 log.warn("".join(traceback.format_stack()))
3449 self._stateSet(value)
3450 else:
3451 self.__dict__[name] = value
3453 def _stateSet(self, state):
3454 self.state_updated.acquire()
3455 try:
3456 # TODO Not sure this is correct...
3457 # _stateGet is live now. Why not fire event
3458 # even when it hasn't changed?
3459 if self._stateGet() != state:
3460 self.state_updated.notifyAll()
3461 import XendAPI
3462 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3463 'power_state')
3464 finally:
3465 self.state_updated.release()
3467 def _stateGet(self):
3468 # Lets try and reconsitute the state from xc
3469 # first lets try and get the domain info
3470 # from xc - this will tell us if the domain
3471 # exists
3472 info = dom_get(self.getDomid())
3473 if info is None or info['shutdown']:
3474 # We are either HALTED or SUSPENDED
3475 # check saved image exists
3476 from xen.xend import XendDomain
3477 managed_config_path = \
3478 XendDomain.instance()._managed_check_point_path( \
3479 self.get_uuid())
3480 if os.path.exists(managed_config_path):
3481 return XEN_API_VM_POWER_STATE_SUSPENDED
3482 else:
3483 return XEN_API_VM_POWER_STATE_HALTED
3484 elif info['crashed']:
3485 # Crashed
3486 return XEN_API_VM_POWER_STATE_CRASHED
3487 else:
3488 # We are either RUNNING or PAUSED
3489 if info['paused']:
3490 return XEN_API_VM_POWER_STATE_PAUSED
3491 else:
3492 return XEN_API_VM_POWER_STATE_RUNNING
3494 def _infoIsSet(self, name):
3495 return name in self.info and self.info[name] is not None
3497 def _checkName(self, name):
3498 """Check if a vm name is valid. Valid names contain alphabetic
3499 characters, digits, or characters in '_-.:+'.
3500 The same name cannot be used for more than one vm at the same time.
3502 @param name: name
3503 @raise: VmError if invalid
3504 """
3505 from xen.xend import XendDomain
3507 if name is None or name == '':
3508 raise VmError('Missing VM Name')
3510 if not re.search(r'^[A-Za-z0-9_\-\.\:\+]+$', name):
3511 raise VmError('Invalid VM Name')
3513 dom = XendDomain.instance().domain_lookup_nr(name)
3514 if dom and dom.info['uuid'] != self.info['uuid']:
3515 raise VmError("VM name '%s' already exists%s" %
3516 (name,
3517 dom.domid is not None and
3518 (" as domain %s" % str(dom.domid)) or ""))
3521 def update(self, info = None, refresh = True, transaction = None):
3522 """Update with info from xc.domain_getinfo().
3523 """
3524 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3525 str(self.domid))
3527 if not info:
3528 info = dom_get(self.domid)
3529 if not info:
3530 return
3532 if info["maxmem_kb"] < 0:
3533 info["maxmem_kb"] = XendNode.instance() \
3534 .physinfo_dict()['total_memory'] * 1024
3536 # make sure state is reset for info
3537 # TODO: we should eventually get rid of old_dom_states
3539 self.info.update_config(info)
3540 self._update_consoles(transaction)
3542 if refresh:
3543 self.refreshShutdown(info)
3545 log.trace("XendDomainInfo.update done on domain %s: %s",
3546 str(self.domid), self.info)
3548 def sxpr(self, ignore_store = False, legacy_only = True):
3549 result = self.info.to_sxp(domain = self,
3550 ignore_devices = ignore_store,
3551 legacy_only = legacy_only)
3553 return result
3555 # Xen API
3556 # ----------------------------------------------------------------
3558 def get_uuid(self):
3559 dom_uuid = self.info.get('uuid')
3560 if not dom_uuid: # if it doesn't exist, make one up
3561 dom_uuid = uuid.createString()
3562 self.info['uuid'] = dom_uuid
3563 return dom_uuid
3565 def get_memory_static_max(self):
3566 return self.info.get('memory_static_max', 0)
3567 def get_memory_static_min(self):
3568 return self.info.get('memory_static_min', 0)
3569 def get_memory_dynamic_max(self):
3570 return self.info.get('memory_dynamic_max', 0)
3571 def get_memory_dynamic_min(self):
3572 return self.info.get('memory_dynamic_min', 0)
3574 # only update memory-related config values if they maintain sanity
3575 def _safe_set_memory(self, key, newval):
3576 oldval = self.info.get(key, 0)
3577 try:
3578 self.info[key] = newval
3579 self.info._memory_sanity_check()
3580 except Exception, ex:
3581 self.info[key] = oldval
3582 raise
3584 def set_memory_static_max(self, val):
3585 self._safe_set_memory('memory_static_max', val)
3586 def set_memory_static_min(self, val):
3587 self._safe_set_memory('memory_static_min', val)
3588 def set_memory_dynamic_max(self, val):
3589 self._safe_set_memory('memory_dynamic_max', val)
3590 def set_memory_dynamic_min(self, val):
3591 self._safe_set_memory('memory_dynamic_min', val)
3593 def get_vcpus_params(self):
3594 if self.getDomid() is None:
3595 return self.info['vcpus_params']
3597 retval = xc.sched_credit_domain_get(self.getDomid())
3598 return retval
3599 def get_cpu_pool(self):
3600 if self.getDomid() is None:
3601 return None
3602 xeninfo = dom_get(self.domid)
3603 return xeninfo['cpupool']
3604 def get_power_state(self):
3605 return XEN_API_VM_POWER_STATE[self._stateGet()]
3606 def get_platform(self):
3607 return self.info.get('platform', {})
3608 def get_pci_bus(self):
3609 return self.info.get('pci_bus', '')
3610 def get_tools_version(self):
3611 return self.info.get('tools_version', {})
3612 def get_metrics(self):
3613 return self.metrics.get_uuid();
3616 def get_security_label(self, xspol=None):
3617 import xen.util.xsm.xsm as security
3618 label = security.get_security_label(self, xspol)
3619 return label
3621 def set_security_label(self, seclab, old_seclab, xspol=None,
3622 xspol_old=None):
3623 """
3624 Set the security label of a domain from its old to
3625 a new value.
3626 @param seclab New security label formatted in the form
3627 <policy type>:<policy name>:<vm label>
3628 @param old_seclab The current security label that the
3629 VM must have.
3630 @param xspol An optional policy under which this
3631 update should be done. If not given,
3632 then the current active policy is used.
3633 @param xspol_old The old policy; only to be passed during
3634 the updating of a policy
3635 @return Returns return code, a string with errors from
3636 the hypervisor's operation, old label of the
3637 domain
3638 """
3639 rc = 0
3640 errors = ""
3641 old_label = ""
3642 new_ssidref = 0
3643 domid = self.getDomid()
3644 res_labels = None
3645 is_policy_update = (xspol_old != None)
3647 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3649 state = self._stateGet()
3650 # Relabel only HALTED or RUNNING or PAUSED domains
3651 if domid != 0 and \
3652 state not in \
3653 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3654 DOM_STATE_SUSPENDED ]:
3655 log.warn("Relabeling domain not possible in state '%s'" %
3656 DOM_STATES[state])
3657 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3659 # Remove security label. Works only for halted or suspended domains
3660 if not seclab or seclab == "":
3661 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3662 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3664 if self.info.has_key('security_label'):
3665 old_label = self.info['security_label']
3666 # Check label against expected one.
3667 if old_label != old_seclab:
3668 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3669 del self.info['security_label']
3670 xen.xend.XendDomain.instance().managed_config_save(self)
3671 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3673 tmp = seclab.split(":")
3674 if len(tmp) != 3:
3675 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3676 typ, policy, label = tmp
3678 poladmin = XSPolicyAdminInstance()
3679 if not xspol:
3680 xspol = poladmin.get_policy_by_name(policy)
3682 try:
3683 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3685 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3686 #if domain is running or paused try to relabel in hypervisor
3687 if not xspol:
3688 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3690 if typ != xspol.get_type_name() or \
3691 policy != xspol.get_name():
3692 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3694 if typ == xsconstants.ACM_POLICY_ID:
3695 new_ssidref = xspol.vmlabel_to_ssidref(label)
3696 if new_ssidref == xsconstants.INVALID_SSIDREF:
3697 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3699 # Check that all used resources are accessible under the
3700 # new label
3701 if not is_policy_update and \
3702 not security.resources_compatible_with_vmlabel(xspol,
3703 self, label):
3704 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3706 #Check label against expected one. Can only do this
3707 # if the policy hasn't changed underneath in the meantime
3708 if xspol_old == None:
3709 old_label = self.get_security_label()
3710 if old_label != old_seclab:
3711 log.info("old_label != old_seclab: %s != %s" %
3712 (old_label, old_seclab))
3713 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3715 # relabel domain in the hypervisor
3716 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3717 log.info("rc from relabeling in HV: %d" % rc)
3718 else:
3719 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3721 if rc == 0:
3722 # HALTED, RUNNING or PAUSED
3723 if domid == 0:
3724 if xspol:
3725 self.info['security_label'] = seclab
3726 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3727 else:
3728 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3729 else:
3730 if self.info.has_key('security_label'):
3731 old_label = self.info['security_label']
3732 # Check label against expected one, unless wildcard
3733 if old_label != old_seclab:
3734 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3736 self.info['security_label'] = seclab
3738 try:
3739 xen.xend.XendDomain.instance().managed_config_save(self)
3740 except:
3741 pass
3742 return (rc, errors, old_label, new_ssidref)
3743 finally:
3744 xen.xend.XendDomain.instance().policy_lock.release()
3746 def get_on_shutdown(self):
3747 after_shutdown = self.info.get('actions_after_shutdown')
3748 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3749 return XEN_API_ON_NORMAL_EXIT[-1]
3750 return after_shutdown
3752 def get_on_reboot(self):
3753 after_reboot = self.info.get('actions_after_reboot')
3754 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3755 return XEN_API_ON_NORMAL_EXIT[-1]
3756 return after_reboot
3758 def get_on_suspend(self):
3759 # TODO: not supported
3760 after_suspend = self.info.get('actions_after_suspend')
3761 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3762 return XEN_API_ON_NORMAL_EXIT[-1]
3763 return after_suspend
3765 def get_on_crash(self):
3766 after_crash = self.info.get('actions_after_crash')
3767 if not after_crash or after_crash not in \
3768 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3769 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3770 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3772 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3773 """ Get's a device configuration either from XendConfig or
3774 from the DevController.
3776 @param dev_class: device class, either, 'vbd' or 'vif'
3777 @param dev_uuid: device UUID
3779 @rtype: dictionary
3780 """
3781 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3783 # shortcut if the domain isn't started because
3784 # the devcontrollers will have no better information
3785 # than XendConfig.
3786 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3787 XEN_API_VM_POWER_STATE_SUSPENDED):
3788 if dev_config:
3789 return copy.deepcopy(dev_config)
3790 return None
3792 # instead of using dev_class, we use the dev_type
3793 # that is from XendConfig.
3794 controller = self.getDeviceController(dev_type)
3795 if not controller:
3796 return None
3798 all_configs = controller.getAllDeviceConfigurations()
3799 if not all_configs:
3800 return None
3802 updated_dev_config = copy.deepcopy(dev_config)
3803 for _devid, _devcfg in all_configs.items():
3804 if _devcfg.get('uuid') == dev_uuid:
3805 updated_dev_config.update(_devcfg)
3806 updated_dev_config['id'] = _devid
3807 return updated_dev_config
3809 return updated_dev_config
3811 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3812 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3813 if not config:
3814 return {}
3816 config['VM'] = self.get_uuid()
3818 if dev_class == 'vif':
3819 if not config.has_key('name'):
3820 config['name'] = config.get('vifname', '')
3821 if not config.has_key('MAC'):
3822 config['MAC'] = config.get('mac', '')
3823 if not config.has_key('type'):
3824 config['type'] = 'paravirtualised'
3825 if not config.has_key('device'):
3826 devid = config.get('id')
3827 if devid != None:
3828 config['device'] = 'eth%s' % devid
3829 else:
3830 config['device'] = ''
3832 if not config.has_key('network'):
3833 try:
3834 bridge = config.get('bridge', None)
3835 if bridge is None:
3836 from xen.util import Brctl
3837 if_to_br = dict([(i,b)
3838 for (b,ifs) in Brctl.get_state().items()
3839 for i in ifs])
3840 vifname = "vif%s.%s" % (self.getDomid(),
3841 config.get('id'))
3842 bridge = if_to_br.get(vifname, None)
3843 config['network'] = \
3844 XendNode.instance().bridge_to_network(
3845 config.get('bridge')).get_uuid()
3846 except Exception:
3847 log.exception('bridge_to_network')
3848 # Ignore this for now -- it may happen if the device
3849 # has been specified using the legacy methods, but at
3850 # some point we're going to have to figure out how to
3851 # handle that properly.
3853 config['MTU'] = 1500 # TODO
3855 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3856 xennode = XendNode.instance()
3857 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3858 config['io_read_kbs'] = rx_bps/1024
3859 config['io_write_kbs'] = tx_bps/1024
3860 rx, tx = xennode.get_vif_stat(self.domid, devid)
3861 config['io_total_read_kbs'] = rx/1024
3862 config['io_total_write_kbs'] = tx/1024
3863 else:
3864 config['io_read_kbs'] = 0.0
3865 config['io_write_kbs'] = 0.0
3866 config['io_total_read_kbs'] = 0.0
3867 config['io_total_write_kbs'] = 0.0
3869 config['security_label'] = config.get('security_label', '')
3871 if dev_class == 'vbd':
3873 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3874 controller = self.getDeviceController(dev_class)
3875 devid, _1, _2 = controller.getDeviceDetails(config)
3876 xennode = XendNode.instance()
3877 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3878 config['io_read_kbs'] = rd_blkps
3879 config['io_write_kbs'] = wr_blkps
3880 else:
3881 config['io_read_kbs'] = 0.0
3882 config['io_write_kbs'] = 0.0
3884 config['VDI'] = config.get('VDI', '')
3885 config['device'] = config.get('dev', '')
3886 if config['device'].startswith('ioemu:'):
3887 _, vbd_device = config['device'].split(':', 1)
3888 config['device'] = vbd_device
3889 if ':' in config['device']:
3890 vbd_name, vbd_type = config['device'].split(':', 1)
3891 config['device'] = vbd_name
3892 if vbd_type == 'cdrom':
3893 config['type'] = XEN_API_VBD_TYPE[0]
3894 else:
3895 config['type'] = XEN_API_VBD_TYPE[1]
3897 config['driver'] = 'paravirtualised' # TODO
3898 config['image'] = config.get('uname', '')
3900 if config.get('mode', 'r') == 'r':
3901 config['mode'] = 'RO'
3902 else:
3903 config['mode'] = 'RW'
3905 if dev_class == 'vtpm':
3906 if not config.has_key('type'):
3907 config['type'] = 'paravirtualised' # TODO
3908 if not config.has_key('backend'):
3909 config['backend'] = "00000000-0000-0000-0000-000000000000"
3911 return config
3913 def get_dev_property(self, dev_class, dev_uuid, field):
3914 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3915 try:
3916 return config[field]
3917 except KeyError:
3918 raise XendError('Invalid property for device: %s' % field)
3920 def set_dev_property(self, dev_class, dev_uuid, field, value):
3921 self.info['devices'][dev_uuid][1][field] = value
3923 def get_vcpus_util(self):
3924 vcpu_util = {}
3925 xennode = XendNode.instance()
3926 if 'VCPUs_max' in self.info and self.domid != None:
3927 for i in range(0, self.info['VCPUs_max']):
3928 util = xennode.get_vcpu_util(self.domid, i)
3929 vcpu_util[str(i)] = util
3931 return vcpu_util
3933 def get_consoles(self):
3934 return self.info.get('console_refs', [])
3936 def get_vifs(self):
3937 return self.info.get('vif_refs', [])
3939 def get_vbds(self):
3940 return self.info.get('vbd_refs', [])
3942 def get_vtpms(self):
3943 return self.info.get('vtpm_refs', [])
3945 def get_dpcis(self):
3946 return XendDPCI.get_by_VM(self.info.get('uuid'))
3948 def get_dscsis(self):
3949 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3951 def get_dscsi_HBAs(self):
3952 return XendDSCSI_HBA.get_by_VM(self.info.get('uuid'))
3954 def create_vbd(self, xenapi_vbd, vdi_image_path):
3955 """Create a VBD using a VDI from XendStorageRepository.
3957 @param xenapi_vbd: vbd struct from the Xen API
3958 @param vdi_image_path: VDI UUID
3959 @rtype: string
3960 @return: uuid of the device
3961 """
3962 xenapi_vbd['image'] = vdi_image_path
3963 if vdi_image_path.startswith('tap'):
3964 dev_uuid = self.info.device_add('tap2', cfg_xenapi = xenapi_vbd)
3965 else:
3966 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3968 if not dev_uuid:
3969 raise XendError('Failed to create device')
3971 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3972 XEN_API_VM_POWER_STATE_PAUSED):
3973 _, config = self.info['devices'][dev_uuid]
3975 if vdi_image_path.startswith('tap'):
3976 dev_control = self.getDeviceController('tap2')
3977 else:
3978 dev_control = self.getDeviceController('vbd')
3980 try:
3981 devid = dev_control.createDevice(config)
3982 dev_type = self.getBlockDeviceClass(devid)
3983 self._waitForDevice(dev_type, devid)
3984 self.info.device_update(dev_uuid,
3985 cfg_xenapi = {'devid': devid})
3986 except Exception, exn:
3987 log.exception(exn)
3988 del self.info['devices'][dev_uuid]
3989 self.info['vbd_refs'].remove(dev_uuid)
3990 raise
3992 return dev_uuid
3994 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3995 """Create a VBD using a VDI from XendStorageRepository.
3997 @param xenapi_vbd: vbd struct from the Xen API
3998 @param vdi_image_path: VDI UUID
3999 @rtype: string
4000 @return: uuid of the device
4001 """
4002 xenapi_vbd['image'] = vdi_image_path
4003 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
4004 if not dev_uuid:
4005 raise XendError('Failed to create device')
4007 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
4008 _, config = self.info['devices'][dev_uuid]
4009 config['devid'] = self.getDeviceController('tap').createDevice(config)
4011 return config['devid']
4013 def create_vif(self, xenapi_vif):
4014 """Create VIF device from the passed struct in Xen API format.
4016 @param xenapi_vif: Xen API VIF Struct.
4017 @rtype: string
4018 @return: UUID
4019 """
4020 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
4021 if not dev_uuid:
4022 raise XendError('Failed to create device')
4024 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4025 XEN_API_VM_POWER_STATE_PAUSED):
4027 _, config = self.info['devices'][dev_uuid]
4028 dev_control = self.getDeviceController('vif')
4030 try:
4031 devid = dev_control.createDevice(config)
4032 dev_control.waitForDevice(devid)
4033 self.info.device_update(dev_uuid,
4034 cfg_xenapi = {'devid': devid})
4035 except Exception, exn:
4036 log.exception(exn)
4037 del self.info['devices'][dev_uuid]
4038 self.info['vif_refs'].remove(dev_uuid)
4039 raise
4041 return dev_uuid
4043 def create_vtpm(self, xenapi_vtpm):
4044 """Create a VTPM device from the passed struct in Xen API format.
4046 @return: uuid of the device
4047 @rtype: string
4048 """
4050 if self._stateGet() not in (DOM_STATE_HALTED,):
4051 raise VmError("Can only add vTPM to a halted domain.")
4052 if self.get_vtpms() != []:
4053 raise VmError('Domain already has a vTPM.')
4054 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
4055 if not dev_uuid:
4056 raise XendError('Failed to create device')
4058 return dev_uuid
4060 def create_console(self, xenapi_console):
4061 """ Create a console device from a Xen API struct.
4063 @return: uuid of device
4064 @rtype: string
4065 """
4066 if self._stateGet() not in (DOM_STATE_HALTED,):
4067 raise VmError("Can only add console to a halted domain.")
4069 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
4070 if not dev_uuid:
4071 raise XendError('Failed to create device')
4073 return dev_uuid
4075 def set_console_other_config(self, console_uuid, other_config):
4076 self.info.console_update(console_uuid, 'other_config', other_config)
4078 def create_dpci(self, xenapi_pci):
4079 """Create pci device from the passed struct in Xen API format.
4081 @param xenapi_pci: DPCI struct from Xen API
4082 @rtype: bool
4083 #@rtype: string
4084 @return: True if successfully created device
4085 #@return: UUID
4086 """
4088 dpci_uuid = uuid.createString()
4090 dpci_opts = []
4091 opts_dict = xenapi_pci.get('options')
4092 for k in opts_dict.keys():
4093 dpci_opts.append([k, opts_dict[k]])
4094 opts_sxp = pci_opts_list_to_sxp(dpci_opts)
4096 # Convert xenapi to sxp
4097 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
4099 dev_sxp = ['dev',
4100 ['domain', '0x%02x' % ppci.get_domain()],
4101 ['bus', '0x%02x' % ppci.get_bus()],
4102 ['slot', '0x%02x' % ppci.get_slot()],
4103 ['func', '0x%1x' % ppci.get_func()],
4104 ['vdevfn', '0x%02x' % xenapi_pci.get('hotplug_slot')],
4105 ['key', xenapi_pci['key']],
4106 ['uuid', dpci_uuid]]
4107 dev_sxp = sxp.merge(dev_sxp, opts_sxp)
4109 target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
4111 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4113 old_pci_sxp = self._getDeviceInfo_pci(0)
4115 if old_pci_sxp is None:
4116 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
4117 if not dev_uuid:
4118 raise XendError('Failed to create device')
4120 else:
4121 new_pci_sxp = ['pci']
4122 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
4123 new_pci_sxp.append(existing_dev)
4124 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
4126 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4127 self.info.device_update(dev_uuid, new_pci_sxp)
4129 xen.xend.XendDomain.instance().managed_config_save(self)
4131 else:
4132 try:
4133 self.device_configure(target_pci_sxp)
4135 except Exception, exn:
4136 raise XendError('Failed to create device')
4138 return dpci_uuid
4140 def create_dscsi(self, xenapi_dscsi):
4141 """Create scsi device from the passed struct in Xen API format.
4143 @param xenapi_dscsi: DSCSI struct from Xen API
4144 @rtype: string
4145 @return: UUID
4146 """
4148 dscsi_uuid = uuid.createString()
4150 # Convert xenapi to sxp
4151 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
4152 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
4153 target_vscsi_sxp = \
4154 ['vscsi',
4155 ['dev',
4156 ['devid', devid],
4157 ['p-devname', pscsi.get_dev_name()],
4158 ['p-dev', pscsi.get_physical_HCTL()],
4159 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
4160 ['state', xenbusState['Initialising']],
4161 ['uuid', dscsi_uuid]
4162 ],
4163 ['feature-host', 0]
4166 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4168 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4170 if cur_vscsi_sxp is None:
4171 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
4172 if not dev_uuid:
4173 raise XendError('Failed to create device')
4175 else:
4176 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4177 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
4178 new_vscsi_sxp.append(existing_dev)
4179 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
4181 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4182 self.info.device_update(dev_uuid, new_vscsi_sxp)
4184 xen.xend.XendDomain.instance().managed_config_save(self)
4186 else:
4187 try:
4188 self.device_configure(target_vscsi_sxp)
4189 except Exception, exn:
4190 log.exception('create_dscsi: %s', exn)
4191 raise XendError('Failed to create device')
4193 return dscsi_uuid
4195 def create_dscsi_HBA(self, xenapi_dscsi):
4196 """Create scsi devices from the passed struct in Xen API format.
4198 @param xenapi_dscsi: DSCSI_HBA struct from Xen API
4199 @rtype: string
4200 @return: UUID
4201 """
4203 dscsi_HBA_uuid = uuid.createString()
4205 # Convert xenapi to sxp
4206 feature_host = xenapi_dscsi.get('assignment_mode', 'HOST') == 'HOST' and 1 or 0
4207 target_vscsi_sxp = \
4208 ['vscsi',
4209 ['feature-host', feature_host],
4210 ['uuid', dscsi_HBA_uuid],
4212 pscsi_HBA = XendAPIStore.get(xenapi_dscsi.get('PSCSI_HBA'), 'PSCSI_HBA')
4213 devid = pscsi_HBA.get_physical_host()
4214 for pscsi_uuid in pscsi_HBA.get_PSCSIs():
4215 pscsi = XendAPIStore.get(pscsi_uuid, 'PSCSI')
4216 pscsi_HCTL = pscsi.get_physical_HCTL()
4217 dscsi_uuid = uuid.createString()
4218 dev = \
4219 ['dev',
4220 ['devid', devid],
4221 ['p-devname', pscsi.get_dev_name()],
4222 ['p-dev', pscsi_HCTL],
4223 ['v-dev', pscsi_HCTL],
4224 ['state', xenbusState['Initialising']],
4225 ['uuid', dscsi_uuid]
4227 target_vscsi_sxp.append(dev)
4229 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4230 if not self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp):
4231 raise XendError('Failed to create device')
4232 xen.xend.XendDomain.instance().managed_config_save(self)
4233 else:
4234 try:
4235 self.device_configure(target_vscsi_sxp)
4236 except Exception, exn:
4237 log.exception('create_dscsi_HBA: %s', exn)
4238 raise XendError('Failed to create device')
4240 return dscsi_HBA_uuid
4243 def change_vdi_of_vbd(self, xenapi_vbd, vdi_image_path):
4244 """Change current VDI with the new VDI.
4246 @param xenapi_vbd: vbd struct from the Xen API
4247 @param vdi_image_path: path of VDI
4248 """
4249 dev_uuid = xenapi_vbd['uuid']
4250 if dev_uuid not in self.info['devices']:
4251 raise XendError('Device does not exist')
4253 # Convert xenapi to sxp
4254 if vdi_image_path.startswith('tap'):
4255 dev_class = 'tap'
4256 else:
4257 dev_class = 'vbd'
4258 dev_sxp = [
4259 dev_class,
4260 ['uuid', dev_uuid],
4261 ['uname', vdi_image_path],
4262 ['dev', '%s:cdrom' % xenapi_vbd['device']],
4263 ['mode', 'r'],
4264 ['VDI', xenapi_vbd['VDI']]
4267 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4268 XEN_API_VM_POWER_STATE_PAUSED):
4269 self.device_configure(dev_sxp)
4270 else:
4271 self.info.device_update(dev_uuid, dev_sxp)
4274 def destroy_device_by_uuid(self, dev_type, dev_uuid):
4275 if dev_uuid not in self.info['devices']:
4276 raise XendError('Device does not exist')
4278 try:
4279 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4280 XEN_API_VM_POWER_STATE_PAUSED):
4281 _, config = self.info['devices'][dev_uuid]
4282 devid = config.get('devid')
4283 if devid != None:
4284 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
4285 else:
4286 raise XendError('Unable to get devid for device: %s:%s' %
4287 (dev_type, dev_uuid))
4288 finally:
4289 del self.info['devices'][dev_uuid]
4290 self.info['%s_refs' % dev_type].remove(dev_uuid)
4292 def destroy_vbd(self, dev_uuid):
4293 self.destroy_device_by_uuid('vbd', dev_uuid)
4295 def destroy_vif(self, dev_uuid):
4296 self.destroy_device_by_uuid('vif', dev_uuid)
4298 def destroy_vtpm(self, dev_uuid):
4299 self.destroy_device_by_uuid('vtpm', dev_uuid)
4301 def destroy_dpci(self, dev_uuid):
4303 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
4304 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
4306 old_pci_sxp = self._getDeviceInfo_pci(0)
4307 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4308 target_dev = None
4309 new_pci_sxp = ['pci']
4310 for dev in sxp.children(old_pci_sxp, 'dev'):
4311 pci_dev = {}
4312 pci_dev['domain'] = sxp.child_value(dev, 'domain')
4313 pci_dev['bus'] = sxp.child_value(dev, 'bus')
4314 pci_dev['slot'] = sxp.child_value(dev, 'slot')
4315 pci_dev['func'] = sxp.child_value(dev, 'func')
4316 if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
4317 target_dev = dev
4318 else:
4319 new_pci_sxp.append(dev)
4321 if target_dev is None:
4322 raise XendError('Failed to destroy device')
4324 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
4326 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4328 self.info.device_update(dev_uuid, new_pci_sxp)
4329 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
4330 del self.info['devices'][dev_uuid]
4331 xen.xend.XendDomain.instance().managed_config_save(self)
4333 else:
4334 try:
4335 self.device_configure(target_pci_sxp)
4337 except Exception, exn:
4338 raise XendError('Failed to destroy device')
4340 def destroy_dscsi(self, dev_uuid):
4341 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
4342 devid = dscsi.get_virtual_host()
4343 vHCTL = dscsi.get_virtual_HCTL()
4344 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4345 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4347 target_dev = None
4348 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4349 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4350 if vHCTL == sxp.child_value(dev, 'v-dev'):
4351 target_dev = dev
4352 else:
4353 new_vscsi_sxp.append(dev)
4355 if target_dev is None:
4356 raise XendError('Failed to destroy device')
4358 target_dev.append(['state', xenbusState['Closing']])
4359 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
4361 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4363 self.info.device_update(dev_uuid, new_vscsi_sxp)
4364 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
4365 del self.info['devices'][dev_uuid]
4366 xen.xend.XendDomain.instance().managed_config_save(self)
4368 else:
4369 try:
4370 self.device_configure(target_vscsi_sxp)
4371 except Exception, exn:
4372 log.exception('destroy_dscsi: %s', exn)
4373 raise XendError('Failed to destroy device')
4375 def destroy_dscsi_HBA(self, dev_uuid):
4376 dscsi_HBA = XendAPIStore.get(dev_uuid, 'DSCSI_HBA')
4377 devid = dscsi_HBA.get_virtual_host()
4378 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4379 feature_host = sxp.child_value(cur_vscsi_sxp, 'feature-host')
4381 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4382 new_vscsi_sxp = ['vscsi', ['feature-host', feature_host]]
4383 self.info.device_update(dev_uuid, new_vscsi_sxp)
4384 del self.info['devices'][dev_uuid]
4385 xen.xend.XendDomain.instance().managed_config_save(self)
4386 else:
4387 # If feature_host is 1, all devices are destroyed by just
4388 # one reconfiguration.
4389 # If feature_host is 0, we should reconfigure all devices
4390 # one-by-one to destroy all devices.
4391 # See reconfigureDevice@VSCSIController.
4392 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4393 target_vscsi_sxp = [
4394 'vscsi',
4395 dev + [['state', xenbusState['Closing']]],
4396 ['feature-host', feature_host]
4398 try:
4399 self.device_configure(target_vscsi_sxp)
4400 except Exception, exn:
4401 log.exception('destroy_dscsi_HBA: %s', exn)
4402 raise XendError('Failed to destroy device')
4403 if feature_host:
4404 break
4406 def destroy_xapi_instances(self):
4407 """Destroy Xen-API instances stored in XendAPIStore.
4408 """
4409 # Xen-API classes based on XendBase have their instances stored
4410 # in XendAPIStore. Cleanup these instances here, if they are supposed
4411 # to be destroyed when the parent domain is dead.
4413 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
4414 # XendBase and there's no need to remove them from XendAPIStore.
4416 from xen.xend import XendDomain
4417 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
4418 # domain still exists.
4419 return
4421 # Destroy the VMMetrics instance.
4422 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
4423 is not None:
4424 self.metrics.destroy()
4426 # Destroy DPCI instances.
4427 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
4428 XendAPIStore.deregister(dpci_uuid, "DPCI")
4430 # Destroy DSCSI instances.
4431 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
4432 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
4434 # Destroy DSCSI_HBA instances.
4435 for dscsi_HBA_uuid in XendDSCSI_HBA.get_by_VM(self.info.get('uuid')):
4436 XendAPIStore.deregister(dscsi_HBA_uuid, "DSCSI_HBA")
4438 def has_device(self, dev_class, dev_uuid):
4439 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
4441 def __str__(self):
4442 return '<domain id=%s name=%s memory=%s state=%s>' % \
4443 (str(self.domid), self.info['name_label'],
4444 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
4446 __repr__ = __str__