debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 20858:36c40adffe12

xend: NUMA: fix division by zero on unpopulated nodes

nodes without memory will currently be disabled by also moving the
physical cores connected to them to other nodes. This leads to nodes
without CPUs and thus to a division by zero in the node allocation
algorithm. Attached patch fixes this by checking for 0 before the
division. This fixes domain creation on boxes with memory-less nodes.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Sun Jan 17 18:01:08 2010 +0000 (2010-01-17)
parents aaf34d74b622
children 7a8cee80597e
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import stat
35 import traceback
36 from types import StringTypes
38 import xen.lowlevel.xc
39 from xen.util import asserts, auxbin
40 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
41 import xen.util.xsm.xsm as security
42 from xen.util import xsconstants
43 from xen.util import mkdir
44 from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
45 append_default_pci_opts, \
46 pci_dict_to_bdf_str, pci_dict_to_xc_str, \
47 pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
48 pci_dict_cmp, PCI_DEVFN, PCI_SLOT, PCI_FUNC, parse_hex
50 from xen.xend import balloon, sxp, uuid, image, arch
51 from xen.xend import XendOptions, XendNode, XendConfig
53 from xen.xend.XendConfig import scrub_password
54 from xen.xend.XendBootloader import bootloader, bootloader_tidy
55 from xen.xend.XendError import XendError, VmError
56 from xen.xend.XendDevices import XendDevices
57 from xen.xend.XendTask import XendTask
58 from xen.xend.xenstore.xstransact import xstransact, complete
59 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
60 from xen.xend.xenstore.xswatch import xswatch
61 from xen.xend.XendConstants import *
62 from xen.xend.XendAPIConstants import *
63 from xen.xend.server.DevConstants import xenbusState
64 from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString
66 from xen.xend.XendVMMetrics import XendVMMetrics
68 from xen.xend import XendAPIStore
69 from xen.xend.XendPPCI import XendPPCI
70 from xen.xend.XendDPCI import XendDPCI
71 from xen.xend.XendPSCSI import XendPSCSI
72 from xen.xend.XendDSCSI import XendDSCSI, XendDSCSI_HBA
74 MIGRATE_TIMEOUT = 30.0
75 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
77 xc = xen.lowlevel.xc.xc()
78 xoptions = XendOptions.instance()
80 log = logging.getLogger("xend.XendDomainInfo")
81 #log.setLevel(logging.TRACE)
84 def create(config):
85 """Creates and start a VM using the supplied configuration.
87 @param config: A configuration object involving lists of tuples.
88 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
90 @rtype: XendDomainInfo
91 @return: An up and running XendDomainInfo instance
92 @raise VmError: Invalid configuration or failure to start.
93 """
94 from xen.xend import XendDomain
95 domconfig = XendConfig.XendConfig(sxp_obj = config)
96 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
97 if othervm is None or othervm.domid is None:
98 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
99 if othervm is not None and othervm.domid is not None:
100 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
101 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
102 vm = XendDomainInfo(domconfig)
103 try:
104 vm.start()
105 except:
106 log.exception('Domain construction failed')
107 vm.destroy()
108 raise
110 return vm
112 def create_from_dict(config_dict):
113 """Creates and start a VM using the supplied configuration.
115 @param config_dict: An configuration dictionary.
117 @rtype: XendDomainInfo
118 @return: An up and running XendDomainInfo instance
119 @raise VmError: Invalid configuration or failure to start.
120 """
122 log.debug("XendDomainInfo.create_from_dict(%s)",
123 scrub_password(config_dict))
124 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
125 try:
126 vm.start()
127 except:
128 log.exception('Domain construction failed')
129 vm.destroy()
130 raise
131 return vm
133 def recreate(info, priv):
134 """Create the VM object for an existing domain. The domain must not
135 be dying, as the paths in the store should already have been removed,
136 and asking us to recreate them causes problems.
138 @param xeninfo: Parsed configuration
139 @type xeninfo: Dictionary
140 @param priv: Is a privileged domain (Dom 0)
141 @type priv: bool
143 @rtype: XendDomainInfo
144 @return: A up and running XendDomainInfo instance
145 @raise VmError: Invalid configuration.
146 @raise XendError: Errors with configuration.
147 """
149 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
151 assert not info['dying']
153 xeninfo = XendConfig.XendConfig(dominfo = info)
154 xeninfo['is_control_domain'] = priv
155 xeninfo['is_a_template'] = False
156 xeninfo['auto_power_on'] = False
157 domid = xeninfo['domid']
158 uuid1 = uuid.fromString(xeninfo['uuid'])
159 needs_reinitialising = False
161 dompath = GetDomainPath(domid)
162 if not dompath:
163 raise XendError('No domain path in store for existing '
164 'domain %d' % domid)
166 log.info("Recreating domain %d, UUID %s. at %s" %
167 (domid, xeninfo['uuid'], dompath))
169 # need to verify the path and uuid if not Domain-0
170 # if the required uuid and vm aren't set, then that means
171 # we need to recreate the dom with our own values
172 #
173 # NOTE: this is probably not desirable, really we should just
174 # abort or ignore, but there may be cases where xenstore's
175 # entry disappears (eg. xenstore-rm /)
176 #
177 try:
178 vmpath = xstransact.Read(dompath, "vm")
179 if not vmpath:
180 if not priv:
181 log.warn('/local/domain/%d/vm is missing. recreate is '
182 'confused, trying our best to recover' % domid)
183 needs_reinitialising = True
184 raise XendError('reinit')
186 uuid2_str = xstransact.Read(vmpath, "uuid")
187 if not uuid2_str:
188 log.warn('%s/uuid/ is missing. recreate is confused, '
189 'trying our best to recover' % vmpath)
190 needs_reinitialising = True
191 raise XendError('reinit')
193 uuid2 = uuid.fromString(uuid2_str)
194 if uuid1 != uuid2:
195 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
196 'Trying out best to recover' % domid)
197 needs_reinitialising = True
198 except XendError:
199 pass # our best shot at 'goto' in python :)
201 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
202 vmpath = vmpath)
204 if needs_reinitialising:
205 vm._recreateDom()
206 vm._removeVm()
207 vm._storeVmDetails()
208 vm._storeDomDetails()
210 vm.image = image.create(vm, vm.info)
211 vm.image.recreate()
213 vm._registerWatches()
214 vm.refreshShutdown(xeninfo)
216 # register the domain in the list
217 from xen.xend import XendDomain
218 XendDomain.instance().add_domain(vm)
220 return vm
223 def restore(config):
224 """Create a domain and a VM object to do a restore.
226 @param config: Domain SXP configuration
227 @type config: list of lists. (see C{create})
229 @rtype: XendDomainInfo
230 @return: A up and running XendDomainInfo instance
231 @raise VmError: Invalid configuration or failure to start.
232 @raise XendError: Errors with configuration.
233 """
235 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
236 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
237 resume = True)
238 try:
239 vm.resume()
240 return vm
241 except:
242 vm.destroy()
243 raise
245 def createDormant(domconfig):
246 """Create a dormant/inactive XenDomainInfo without creating VM.
247 This is for creating instances of persistent domains that are not
248 yet start.
250 @param domconfig: Parsed configuration
251 @type domconfig: XendConfig object
253 @rtype: XendDomainInfo
254 @return: A up and running XendDomainInfo instance
255 @raise XendError: Errors with configuration.
256 """
258 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
260 # domid does not make sense for non-running domains.
261 domconfig.pop('domid', None)
262 vm = XendDomainInfo(domconfig)
263 return vm
265 def domain_by_name(name):
266 """Get domain by name
268 @params name: Name of the domain
269 @type name: string
270 @return: XendDomainInfo or None
271 """
272 from xen.xend import XendDomain
273 return XendDomain.instance().domain_lookup_by_name_nr(name)
276 def shutdown_reason(code):
277 """Get a shutdown reason from a code.
279 @param code: shutdown code
280 @type code: int
281 @return: shutdown reason
282 @rtype: string
283 """
284 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
286 def dom_get(dom):
287 """Get info from xen for an existing domain.
289 @param dom: domain id
290 @type dom: int
291 @return: info or None
292 @rtype: dictionary
293 """
294 try:
295 domlist = xc.domain_getinfo(dom, 1)
296 if domlist and dom == domlist[0]['domid']:
297 return domlist[0]
298 except Exception, err:
299 # ignore missing domain
300 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
301 return None
303 from xen.xend.server.pciif import parse_pci_name, PciDevice,\
304 get_assigned_pci_devices, get_all_assigned_pci_devices
307 def do_FLR(domid, is_hvm):
308 dev_str_list = get_assigned_pci_devices(domid)
310 for dev_str in dev_str_list:
311 try:
312 dev = PciDevice(parse_pci_name(dev_str))
313 except Exception, e:
314 raise VmError("pci: failed to locate device and "+
315 "parse it's resources - "+str(e))
316 dev.do_FLR(is_hvm, xoptions.get_pci_dev_assign_strict_check())
318 class XendDomainInfo:
319 """An object represents a domain.
321 @TODO: try to unify dom and domid, they mean the same thing, but
322 xc refers to it as dom, and everywhere else, including
323 xenstore it is domid. The best way is to change xc's
324 python interface.
326 @ivar info: Parsed configuration
327 @type info: dictionary
328 @ivar domid: Domain ID (if VM has started)
329 @type domid: int or None
330 @ivar guest_bitsize: the bitsize of guest
331 @type guest_bitsize: int or None
332 @ivar alloc_mem: the memory domain allocated when booting
333 @type alloc_mem: int or None
334 @ivar vmpath: XenStore path to this VM.
335 @type vmpath: string
336 @ivar dompath: XenStore path to this Domain.
337 @type dompath: string
338 @ivar image: Reference to the VM Image.
339 @type image: xen.xend.image.ImageHandler
340 @ivar store_port: event channel to xenstored
341 @type store_port: int
342 @ivar console_port: event channel to xenconsoled
343 @type console_port: int
344 @ivar store_mfn: xenstored mfn
345 @type store_mfn: int
346 @ivar console_mfn: xenconsoled mfn
347 @type console_mfn: int
348 @ivar notes: OS image notes
349 @type notes: dictionary
350 @ivar vmWatch: reference to a watch on the xenstored vmpath
351 @type vmWatch: xen.xend.xenstore.xswatch
352 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
353 @type shutdownWatch: xen.xend.xenstore.xswatch
354 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
355 @type shutdownStartTime: float or None
356 @ivar restart_in_progress: Is a domain restart thread running?
357 @type restart_in_progress: bool
358 # @ivar state: Domain state
359 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
360 @ivar state_updated: lock for self.state
361 @type state_updated: threading.Condition
362 @ivar refresh_shutdown_lock: lock for polling shutdown state
363 @type refresh_shutdown_lock: threading.Condition
364 @ivar _deviceControllers: device controller cache for this domain
365 @type _deviceControllers: dict 'string' to DevControllers
366 """
368 def __init__(self, info, domid = None, dompath = None, augment = False,
369 priv = False, resume = False, vmpath = None):
370 """Constructor for a domain
372 @param info: parsed configuration
373 @type info: dictionary
374 @keyword domid: Set initial domain id (if any)
375 @type domid: int
376 @keyword dompath: Set initial dompath (if any)
377 @type dompath: string
378 @keyword augment: Augment given info with xenstored VM info
379 @type augment: bool
380 @keyword priv: Is a privileged domain (Dom 0)
381 @type priv: bool
382 @keyword resume: Is this domain being resumed?
383 @type resume: bool
384 """
386 self.info = info
387 if domid == None:
388 self.domid = self.info.get('domid')
389 else:
390 self.domid = domid
391 self.guest_bitsize = None
392 self.alloc_mem = None
394 #REMOVE: uuid is now generated in XendConfig
395 #if not self._infoIsSet('uuid'):
396 # self.info['uuid'] = uuid.toString(uuid.create())
398 # Find a unique /vm/<uuid>/<integer> path if not specified.
399 # This avoids conflict between pre-/post-migrate domains when doing
400 # localhost relocation.
401 self.vmpath = vmpath
402 i = 0
403 while self.vmpath == None:
404 self.vmpath = XS_VMROOT + self.info['uuid']
405 if i != 0:
406 self.vmpath = self.vmpath + '-' + str(i)
407 try:
408 if self._readVm("uuid"):
409 self.vmpath = None
410 i = i + 1
411 except:
412 pass
414 self.dompath = dompath
416 self.image = None
417 self.store_port = None
418 self.store_mfn = None
419 self.console_port = None
420 self.console_mfn = None
422 self.native_protocol = None
424 self.vmWatch = None
425 self.shutdownWatch = None
426 self.shutdownStartTime = None
427 self._resume = resume
428 self.restart_in_progress = False
430 self.state_updated = threading.Condition()
431 self.refresh_shutdown_lock = threading.Condition()
432 self._stateSet(DOM_STATE_HALTED)
434 self._deviceControllers = {}
436 for state in DOM_STATES_OLD:
437 self.info[state] = 0
439 if augment:
440 self._augmentInfo(priv)
442 self._checkName(self.info['name_label'])
444 self.metrics = XendVMMetrics(uuid.createString(), self)
447 #
448 # Public functions available through XMLRPC
449 #
452 def start(self, is_managed = False):
453 """Attempts to start the VM by do the appropriate
454 initialisation if it not started.
455 """
456 from xen.xend import XendDomain
458 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
459 try:
460 XendTask.log_progress(0, 30, self._constructDomain)
461 XendTask.log_progress(31, 60, self._initDomain)
463 XendTask.log_progress(61, 70, self._storeVmDetails)
464 XendTask.log_progress(71, 80, self._storeDomDetails)
465 XendTask.log_progress(81, 90, self._registerWatches)
466 XendTask.log_progress(91, 100, self.refreshShutdown)
468 xendomains = XendDomain.instance()
470 # save running configuration if XendDomains believe domain is
471 # persistent
472 if is_managed:
473 xendomains.managed_config_save(self)
474 except:
475 log.exception('VM start failed')
476 self.destroy()
477 raise
478 else:
479 raise XendError('VM already running')
481 def resume(self):
482 """Resumes a domain that has come back from suspension."""
483 state = self._stateGet()
484 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
485 try:
486 self._constructDomain()
488 try:
489 self._setCPUAffinity()
490 except:
491 # usually a CPU we want to set affinity to does not exist
492 # we just ignore it so that the domain can still be restored
493 log.warn("Cannot restore CPU affinity")
495 self._setSchedParams()
496 self._storeVmDetails()
497 self._createChannels()
498 self._createDevices()
499 self._storeDomDetails()
500 self._endRestore()
501 except:
502 log.exception('VM resume failed')
503 self.destroy()
504 raise
505 else:
506 raise XendError('VM is not suspended; it is %s'
507 % XEN_API_VM_POWER_STATE[state])
509 def shutdown(self, reason):
510 """Shutdown a domain by signalling this via xenstored."""
511 log.debug('XendDomainInfo.shutdown(%s)', reason)
512 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
513 raise XendError('Domain cannot be shutdown')
515 if self.domid == 0:
516 raise XendError('Domain 0 cannot be shutdown')
518 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
519 raise XendError('Invalid reason: %s' % reason)
520 self.storeDom("control/shutdown", reason)
522 # HVM domain shuts itself down only if it has PV drivers
523 if self.info.is_hvm():
524 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
525 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
526 if not hvm_pvdrv or hvm_s_state != 0:
527 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
528 log.info("HVM save:remote shutdown dom %d!", self.domid)
529 xc.domain_shutdown(self.domid, code)
531 def pause(self):
532 """Pause domain
534 @raise XendError: Failed pausing a domain
535 """
536 try:
537 if(self.domid):
538 # get all blktap2 devices
539 dev = xstransact.List(self.vmpath + '/device/tap2')
540 for x in dev:
541 path = self.getDeviceController('tap2').readBackend(x, 'params')
542 if path and path.startswith(TAPDISK_DEVICE):
543 try:
544 _minor, _dev, ctrl = parseDeviceString(path)
545 #pause the disk
546 f = open(ctrl + '/pause', 'w')
547 f.write('pause');
548 f.close()
549 except:
550 pass
551 except Exception, ex:
552 log.warn('Could not pause blktap disk.');
554 try:
555 xc.domain_pause(self.domid)
556 self._stateSet(DOM_STATE_PAUSED)
557 except Exception, ex:
558 log.exception(ex)
559 raise XendError("Domain unable to be paused: %s" % str(ex))
561 def unpause(self):
562 """Unpause domain
564 @raise XendError: Failed unpausing a domain
565 """
566 try:
567 if(self.domid):
568 dev = xstransact.List(self.vmpath + '/device/tap2')
569 for x in dev:
570 path = self.getDeviceController('tap2').readBackend(x, 'params')
571 if path and path.startswith(TAPDISK_DEVICE):
572 try:
573 #Figure out the sysfs path.
574 _minor, _dev, ctrl = parseDeviceString(path)
575 #unpause the disk
576 if(os.path.exists(ctrl + '/resume')):
577 f = open(ctrl + '/resume', 'w');
578 f.write('resume');
579 f.close();
580 except:
581 pass
583 except Exception, ex:
584 log.warn('Could not unpause blktap disk: %s' % str(ex));
586 try:
587 xc.domain_unpause(self.domid)
588 self._stateSet(DOM_STATE_RUNNING)
589 except Exception, ex:
590 log.exception(ex)
591 raise XendError("Domain unable to be unpaused: %s" % str(ex))
593 def send_sysrq(self, key):
594 """ Send a Sysrq equivalent key via xenstored."""
595 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
596 raise XendError("Domain '%s' is not started" % self.info['name_label'])
598 asserts.isCharConvertible(key)
599 self.storeDom("control/sysrq", '%c' % key)
601 def pci_device_configure_boot(self):
603 if not self.info.is_hvm():
604 return
606 devid = '0'
607 first = True
608 dev_info = self._getDeviceInfo_pci(devid)
609 if dev_info is None:
610 return
612 # get the virtual slot info from xenstore
613 dev_uuid = sxp.child_value(dev_info, 'uuid')
614 pci_conf = self.info['devices'][dev_uuid][1]
615 pci_devs = pci_conf['devs']
617 # Keep a set of keys that are done rather than
618 # just itterating through set(map(..., pci_devs))
619 # to preserve any order information present.
620 done = set()
621 for key in map(lambda x: x['key'], pci_devs):
622 if key in done:
623 continue
624 done |= set([key])
625 dev = filter(lambda x: x['key'] == key, pci_devs)
627 head_dev = dev.pop()
628 dev_sxp = pci_convert_dict_to_sxp(head_dev, 'Initialising',
629 'Booting')
630 self.pci_device_configure(dev_sxp, first_dev = first)
631 first = False
633 # That is all for single-function virtual devices
634 if len(dev) == 0:
635 continue
637 if int(head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
638 new_dev_info = self._getDeviceInfo_pci(devid)
639 if new_dev_info is None:
640 continue
641 new_dev_uuid = sxp.child_value(new_dev_info, 'uuid')
642 new_pci_conf = self.info['devices'][new_dev_uuid][1]
643 new_pci_devs = new_pci_conf['devs']
645 new_head_dev = filter(lambda x: pci_dict_cmp(x, head_dev),
646 new_pci_devs)[0]
648 if int(new_head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
649 continue
651 vdevfn = PCI_SLOT(int(new_head_dev['vdevfn'], 16))
652 new_dev = []
653 for i in dev:
654 i['vdevfn'] = '0x%02x' % \
655 PCI_DEVFN(vdevfn,
656 PCI_FUNC(int(i['vdevfn'], 16)))
657 new_dev.append(i)
659 dev = new_dev
661 for i in dev:
662 dev_sxp = pci_convert_dict_to_sxp(i, 'Initialising', 'Booting')
663 self.pci_device_configure(dev_sxp)
665 def hvm_pci_device_create(self, dev_config):
666 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
667 % scrub_password(dev_config))
669 if not self.info.is_hvm():
670 raise VmError("hvm_pci_device_create called on non-HVM guest")
672 #all the PCI devs share one conf node
673 devid = '0'
675 new_dev = dev_config['devs'][0]
676 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
678 #check conflict before trigger hotplug event
679 if dev_info is not None:
680 dev_uuid = sxp.child_value(dev_info, 'uuid')
681 pci_conf = self.info['devices'][dev_uuid][1]
682 pci_devs = pci_conf['devs']
683 for x in pci_devs:
684 if (int(x['vdevfn'], 16) == int(new_dev['vdevfn'], 16) and
685 not int(x['vdevfn'], 16) & AUTO_PHP_SLOT):
686 raise VmError("vdevfn %s already have a device." %
687 (new_dev['vdevfn']))
689 if (pci_dict_cmp(x, new_dev)):
690 raise VmError("device is already inserted")
692 # Test whether the devices can be assigned.
693 self.pci_dev_check_attachability_and_do_FLR(new_dev)
695 return self.hvm_pci_device_insert_dev(new_dev)
697 def pci_dev_check_assignability_and_do_FLR(self, config):
698 """ In the case of static device assignment(i.e., the 'pci' string in
699 guest config file), we check if the device(s) specified in the 'pci'
700 can be assigned to guest or not; if yes, we do_FLR the device(s).
701 """
702 pci_dev_ctrl = self.getDeviceController('pci')
703 return pci_dev_ctrl.dev_check_assignability_and_do_FLR(config)
705 def pci_dev_check_attachability_and_do_FLR(self, new_dev):
706 """ In the case of dynamic device assignment(i.e., xm pci-attach), we
707 check if the device can be attached to guest or not; if yes, we do_FLR
708 the device.
709 """
711 # Test whether the devices can be assigned
713 pci_name = pci_dict_to_bdf_str(new_dev)
714 _all_assigned_pci_devices = get_all_assigned_pci_devices(self.domid)
715 if pci_name in _all_assigned_pci_devices:
716 raise VmError("failed to assign device %s that has"
717 " already been assigned to other domain." % pci_name)
719 # Test whether the device is owned by pciback or pci-stub.
720 try:
721 pci_device = PciDevice(new_dev)
722 except Exception, e:
723 raise VmError("pci: failed to locate device and "+
724 "parse its resources - "+str(e))
725 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
726 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
727 %pci_device.name)
729 strict_check = xoptions.get_pci_dev_assign_strict_check()
730 # Check non-page-aligned MMIO BAR.
731 if pci_device.has_non_page_aligned_bar and strict_check:
732 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
733 pci_device.name)
735 # PV guest has less checkings.
736 if not self.info.is_hvm():
737 # try to do FLR for PV guest
738 pci_device.do_FLR(self.info.is_hvm(), strict_check)
739 return
741 if not strict_check:
742 return
744 # Check if there is intermediate PCIe switch bewteen the device and
745 # Root Complex.
746 if pci_device.is_behind_switch_lacking_acs():
747 err_msg = 'pci: to avoid potential security issue, %s is not'+\
748 ' allowed to be assigned to guest since it is behind'+\
749 ' PCIe switch that does not support or enable ACS.'
750 raise VmError(err_msg % pci_device.name)
752 # Check the co-assignment.
753 # To pci-attach a device D to domN, we should ensure each of D's
754 # co-assignment devices hasn't been assigned, or has been assigned to
755 # domN.
756 coassignment_list = pci_device.find_coassigned_devices()
757 pci_device.devs_check_driver(coassignment_list)
758 assigned_pci_device_str_list = self._get_assigned_pci_devices()
759 for pci_str in coassignment_list:
760 if not (pci_str in _all_assigned_pci_devices):
761 continue
762 if not pci_str in assigned_pci_device_str_list:
763 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
764 " because one of its co-assignment device %s has been" + \
765 " assigned to other domain." \
766 )% (pci_device.name, self.info['name_label'], pci_str))
768 # try to do FLR for HVM guest
769 pci_device.do_FLR(self.info.is_hvm(), strict_check)
771 def hvm_pci_device_insert(self, dev_config):
772 log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
773 % scrub_password(dev_config))
775 if not self.info.is_hvm():
776 raise VmError("hvm_pci_device_create called on non-HVM guest")
778 new_dev = dev_config['devs'][0]
780 return self.hvm_pci_device_insert_dev(new_dev)
782 def hvm_pci_device_insert_dev(self, new_dev):
783 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
784 % scrub_password(new_dev))
786 if self.domid is not None:
787 opts = ''
788 optslist = []
789 pci_defopts = []
790 if 'pci_msitranslate' in self.info['platform']:
791 pci_defopts.append(['msitranslate',
792 str(self.info['platform']['pci_msitranslate'])])
793 if 'pci_power_mgmt' in self.info['platform']:
794 pci_defopts.append(['power_mgmt',
795 str(self.info['platform']['pci_power_mgmt'])])
796 if new_dev.has_key('opts'):
797 optslist += new_dev['opts']
799 if optslist or pci_defopts:
800 opts = ',' + serialise_pci_opts(
801 append_default_pci_opts(optslist, pci_defopts))
803 bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
804 int(new_dev['vdevfn'], 16), opts)
805 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
806 bdf = xc.assign_device(self.domid, pci_dict_to_xc_str(new_dev))
807 if bdf > 0:
808 raise VmError("Failed to assign device to IOMMU (%s)" % bdf_str)
809 log.debug("pci: assign device %s" % bdf_str)
810 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
812 vdevfn = xstransact.Read("/local/domain/0/device-model/%i/parameter"
813 % self.getDomid())
814 try:
815 vdevfn_int = int(vdevfn, 16)
816 except ValueError:
817 raise VmError(("Cannot pass-through PCI function '%s'. " +
818 "Device model reported an error: %s") %
819 (bdf_str, vdevfn))
820 else:
821 vdevfn = new_dev['vdevfn']
823 return vdevfn
826 def device_create(self, dev_config):
827 """Create a new device.
829 @param dev_config: device configuration
830 @type dev_config: SXP object (parsed config)
831 """
832 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
833 dev_type = sxp.name(dev_config)
834 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
835 dev_config_dict = self.info['devices'][dev_uuid][1]
836 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
838 if dev_type == 'vif':
839 for x in dev_config:
840 if x != 'vif' and x[0] == 'mac':
841 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
842 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
843 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
845 if self.domid is not None:
846 try:
847 dev_config_dict['devid'] = devid = \
848 self._createDevice(dev_type, dev_config_dict)
849 if dev_type == 'tap2':
850 # createDevice may create a blktap1 device if blktap2 is not
851 # installed or if the blktap driver is not supported in
852 # blktap1
853 dev_type = self.getBlockDeviceClass(devid)
854 self._waitForDevice(dev_type, devid)
855 except VmError, ex:
856 del self.info['devices'][dev_uuid]
857 if dev_type == 'pci':
858 for dev in dev_config_dict['devs']:
859 XendAPIStore.deregister(dev['uuid'], 'DPCI')
860 elif dev_type == 'vscsi':
861 for dev in dev_config_dict['devs']:
862 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
863 elif dev_type == 'tap' or dev_type == 'tap2':
864 self.info['vbd_refs'].remove(dev_uuid)
865 else:
866 self.info['%s_refs' % dev_type].remove(dev_uuid)
867 raise ex
868 else:
869 devid = None
871 xen.xend.XendDomain.instance().managed_config_save(self)
872 return self.getDeviceController(dev_type).sxpr(devid)
875 def pci_device_configure(self, dev_sxp, devid = 0, first_dev = False):
876 """Configure an existing pci device.
878 @param dev_sxp: device configuration
879 @type dev_sxp: SXP object (parsed config)
880 @param devid: device id
881 @type devid: int
882 @return: Returns True if successfully updated device
883 @rtype: boolean
884 """
885 log.debug("XendDomainInfo.pci_device_configure: %s"
886 % scrub_password(dev_sxp))
888 dev_class = sxp.name(dev_sxp)
890 if dev_class != 'pci':
891 return False
893 pci_state = sxp.child_value(dev_sxp, 'state')
894 pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
895 existing_dev_info = self._getDeviceInfo_pci(devid)
897 if existing_dev_info is None and pci_state != 'Initialising':
898 raise XendError("Cannot detach when pci platform does not exist")
900 pci_dev = sxp.children(dev_sxp, 'dev')[0]
901 dev_config = pci_convert_sxp_to_dict(dev_sxp)
902 dev = dev_config['devs'][0]
904 stubdomid = self.getStubdomDomid()
905 # Do HVM specific processing
906 if self.info.is_hvm():
907 from xen.xend import XendDomain
908 if pci_state == 'Initialising':
909 if stubdomid is not None :
910 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
912 # HVM PCI device attachment
913 if pci_sub_state == 'Booting':
914 vdevfn = self.hvm_pci_device_insert(dev_config)
915 else:
916 vdevfn = self.hvm_pci_device_create(dev_config)
917 # Update vdevfn
918 dev['vdevfn'] = vdevfn
919 for n in sxp.children(pci_dev):
920 if(n[0] == 'vdevfn'):
921 n[1] = vdevfn
922 else:
923 # HVM PCI device detachment
924 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
925 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
926 existing_pci_devs = existing_pci_conf['devs']
927 new_devs = filter(lambda x: pci_dict_cmp(x, dev),
928 existing_pci_devs)
929 if len(new_devs) < 0:
930 raise VmError("Device %s is not connected" %
931 pci_dict_to_bdf_str(dev))
932 new_dev = new_devs[0]
933 # Only tell qemu-dm to unplug function 0.
934 # When unplugging a function, all functions in the
935 # same vslot must be unplugged, and function 0 must
936 # be one of the functions present when a vslot is
937 # hot-plugged. Telling qemu-dm to unplug function 0
938 # also tells it to unplug all other functions in the
939 # same vslot.
940 if (PCI_FUNC(int(new_dev['vdevfn'], 16)) == 0):
941 self.hvm_destroyPCIDevice(new_dev)
942 if stubdomid is not None :
943 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
944 # Update vdevfn
945 dev['vdevfn'] = new_dev['vdevfn']
946 for n in sxp.children(pci_dev):
947 if(n[0] == 'vdevfn'):
948 n[1] = new_dev['vdevfn']
949 else:
950 # Do PV specific checking
951 if pci_state == 'Initialising':
952 # PV PCI device attachment
953 self.pci_dev_check_attachability_and_do_FLR(dev)
955 # If pci platform does not exist, create and exit.
956 if existing_dev_info is None :
957 self.device_create(dev_sxp)
958 return True
960 if first_dev is True :
961 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
962 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
963 devid = self._createDevice('pci', existing_pci_conf)
964 self.info['devices'][existing_dev_uuid][1]['devid'] = devid
966 if self.domid is not None:
967 # use DevController.reconfigureDevice to change device config
968 dev_control = self.getDeviceController(dev_class)
969 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
970 if not self.info.is_hvm() and not self.info.is_stubdom():
971 # in PV case, wait until backend state becomes connected.
972 dev_control.waitForDevice_reconfigure(devid)
973 num_devs = dev_control.cleanupDevice(devid)
975 # update XendConfig with new device info
976 if dev_uuid:
977 new_dev_sxp = dev_control.configuration(devid)
978 self.info.device_update(dev_uuid, new_dev_sxp)
980 # If there is no device left, destroy pci and remove config.
981 if num_devs == 0:
982 if self.info.is_hvm():
983 self.destroyDevice('pci', devid, True)
984 else:
985 self.destroyDevice('pci', devid)
986 del self.info['devices'][dev_uuid]
987 else:
988 new_dev_sxp = ['pci']
989 for cur_dev in sxp.children(existing_dev_info, 'dev'):
990 if pci_state == 'Closing':
991 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
992 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
993 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
994 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
995 continue
996 new_dev_sxp.append(cur_dev)
998 if pci_state == 'Initialising' and pci_sub_state != 'Booting':
999 for new_dev in sxp.children(dev_sxp, 'dev'):
1000 new_dev_sxp.append(new_dev)
1002 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
1003 self.info.device_update(dev_uuid, new_dev_sxp)
1005 # If there is no device left, remove config.
1006 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1007 del self.info['devices'][dev_uuid]
1009 xen.xend.XendDomain.instance().managed_config_save(self)
1011 return True
1013 def vscsi_device_configure(self, dev_sxp):
1014 """Configure an existing vscsi device.
1015 quoted pci funciton
1016 """
1017 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
1018 if not dev_info:
1019 return False
1020 for dev in sxp.children(dev_info, 'dev'):
1021 if p_devs is not None:
1022 if sxp.child_value(dev, 'p-dev') in p_devs:
1023 return True
1024 if v_devs is not None:
1025 if sxp.child_value(dev, 'v-dev') in v_devs:
1026 return True
1027 return False
1029 def _vscsi_be(be):
1030 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
1031 if be_xdi is not None:
1032 be_domid = be_xdi.getDomid()
1033 if be_domid is not None:
1034 return str(be_domid)
1035 return str(be)
1037 dev_class = sxp.name(dev_sxp)
1038 if dev_class != 'vscsi':
1039 return False
1041 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
1042 devs = dev_config['devs']
1043 v_devs = [d['v-dev'] for d in devs]
1044 state = devs[0]['state']
1045 req_devid = int(devs[0]['devid'])
1046 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
1048 if state == xenbusState['Initialising']:
1049 # new create
1050 # If request devid does not exist, create and exit.
1051 p_devs = [d['p-dev'] for d in devs]
1052 for dev_type, dev_info in self.info.all_devices_sxpr():
1053 if dev_type != 'vscsi':
1054 continue
1055 if _is_vscsi_defined(dev_info, p_devs = p_devs):
1056 raise XendError('The physical device "%s" is already defined' % \
1057 p_devs[0])
1058 if cur_dev_sxp is None:
1059 self.device_create(dev_sxp)
1060 return True
1062 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1063 raise XendError('The virtual device "%s" is already defined' % \
1064 v_devs[0])
1066 if int(dev_config['feature-host']) != \
1067 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
1068 raise XendError('The physical device "%s" cannot define '
1069 'because mode is different' % devs[0]['p-dev'])
1071 new_be = dev_config.get('backend', None)
1072 if new_be is not None:
1073 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
1074 if cur_be is None:
1075 cur_be = xen.xend.XendDomain.DOM0_ID
1076 new_be_dom = _vscsi_be(new_be)
1077 cur_be_dom = _vscsi_be(cur_be)
1078 if new_be_dom != cur_be_dom:
1079 raise XendError('The physical device "%s" cannot define '
1080 'because backend is different' % devs[0]['p-dev'])
1082 elif state == xenbusState['Closing']:
1083 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1084 raise XendError("Cannot detach vscsi device does not exist")
1086 if self.domid is not None:
1087 # use DevController.reconfigureDevice to change device config
1088 dev_control = self.getDeviceController(dev_class)
1089 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
1090 dev_control.waitForDevice_reconfigure(req_devid)
1091 num_devs = dev_control.cleanupDevice(req_devid)
1093 # update XendConfig with new device info
1094 if dev_uuid:
1095 new_dev_sxp = dev_control.configuration(req_devid)
1096 self.info.device_update(dev_uuid, new_dev_sxp)
1098 # If there is no device left, destroy vscsi and remove config.
1099 if num_devs == 0:
1100 self.destroyDevice('vscsi', req_devid)
1101 del self.info['devices'][dev_uuid]
1103 else:
1104 new_dev_sxp = ['vscsi']
1105 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1106 new_dev_sxp.append(cur_mode)
1107 try:
1108 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1109 new_dev_sxp.append(cur_be)
1110 except IndexError:
1111 pass
1113 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1114 if state == xenbusState['Closing']:
1115 if int(cur_mode[1]) == 1:
1116 continue
1117 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1118 continue
1119 new_dev_sxp.append(cur_dev)
1121 if state == xenbusState['Initialising']:
1122 for new_dev in sxp.children(dev_sxp, 'dev'):
1123 new_dev_sxp.append(new_dev)
1125 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1126 self.info.device_update(dev_uuid, new_dev_sxp)
1128 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1129 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1130 del self.info['devices'][dev_uuid]
1132 xen.xend.XendDomain.instance().managed_config_save(self)
1134 return True
1136 def vusb_device_configure(self, dev_sxp, devid):
1137 """Configure a virtual root port.
1138 """
1139 dev_class = sxp.name(dev_sxp)
1140 if dev_class != 'vusb':
1141 return False
1143 dev_config = {}
1144 ports = sxp.child(dev_sxp, 'port')
1145 for port in ports[1:]:
1146 try:
1147 num, bus = port
1148 dev_config['port-%i' % int(num)] = str(bus)
1149 except TypeError:
1150 pass
1152 dev_control = self.getDeviceController(dev_class)
1153 dev_control.reconfigureDevice(devid, dev_config)
1155 return True
1157 def device_configure(self, dev_sxp, devid = None):
1158 """Configure an existing device.
1160 @param dev_config: device configuration
1161 @type dev_config: SXP object (parsed config)
1162 @param devid: device id
1163 @type devid: int
1164 @return: Returns True if successfully updated device
1165 @rtype: boolean
1166 """
1168 # convert device sxp to a dict
1169 dev_class = sxp.name(dev_sxp)
1170 dev_config = {}
1172 if dev_class == 'pci':
1173 return self.pci_device_configure(dev_sxp)
1175 if dev_class == 'vscsi':
1176 return self.vscsi_device_configure(dev_sxp)
1178 if dev_class == 'vusb':
1179 return self.vusb_device_configure(dev_sxp, devid)
1181 for opt_val in dev_sxp[1:]:
1182 try:
1183 dev_config[opt_val[0]] = opt_val[1]
1184 except IndexError:
1185 pass
1187 dev_control = self.getDeviceController(dev_class)
1188 if devid is None:
1189 dev = dev_config.get('dev', '')
1190 if not dev:
1191 raise VmError('Block device must have virtual details specified')
1192 if 'ioemu:' in dev:
1193 (_, dev) = dev.split(':', 1)
1194 try:
1195 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1196 except ValueError:
1197 pass
1198 devid = dev_control.convertToDeviceNumber(dev)
1199 dev_info = self._getDeviceInfo_vbd(devid)
1200 if dev_info is None:
1201 raise VmError("Device %s not connected" % devid)
1202 dev_uuid = sxp.child_value(dev_info, 'uuid')
1204 if self.domid is not None:
1205 # use DevController.reconfigureDevice to change device config
1206 dev_control.reconfigureDevice(devid, dev_config)
1207 else:
1208 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1209 if (new_f['device-type'] == 'cdrom' and
1210 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1211 new_b['mode'] == 'r' and
1212 sxp.child_value(dev_info, 'mode') == 'r'):
1213 pass
1214 else:
1215 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1216 (dev_class, devid, dev_config))
1218 # update XendConfig with new device info
1219 self.info.device_update(dev_uuid, dev_sxp)
1220 xen.xend.XendDomain.instance().managed_config_save(self)
1222 return True
1224 def waitForDevices(self):
1225 """Wait for this domain's configured devices to connect.
1227 @raise VmError: if any device fails to initialise.
1228 """
1229 for devclass in XendDevices.valid_devices():
1230 self.getDeviceController(devclass).waitForDevices()
1232 def hvm_destroyPCIDevice(self, pci_dev):
1233 log.debug("hvm_destroyPCIDevice: %s", pci_dev)
1235 if not self.info.is_hvm():
1236 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1238 # Check the co-assignment.
1239 # To pci-detach a device D from domN, we should ensure: for each DD in the
1240 # list of D's co-assignment devices, DD is not assigned (to domN).
1242 from xen.xend.server.pciif import PciDevice
1243 try:
1244 pci_device = PciDevice(pci_dev)
1245 except Exception, e:
1246 raise VmError("pci: failed to locate device and "+
1247 "parse its resources - "+str(e))
1248 coassignment_list = pci_device.find_coassigned_devices()
1249 coassignment_list.remove(pci_device.name)
1250 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1251 for pci_str in coassignment_list:
1252 if xoptions.get_pci_dev_assign_strict_check() and \
1253 pci_str in assigned_pci_device_str_list:
1254 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1255 " because one of its co-assignment device %s is still " + \
1256 " assigned to the domain." \
1257 )% (pci_device.name, self.info['name_label'], pci_str))
1260 bdf_str = pci_dict_to_bdf_str(pci_dev)
1261 log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
1262 if self.domid is not None:
1263 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1265 return 0
1267 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1268 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1269 deviceClass, devid)
1271 if rm_cfg:
1272 # Convert devid to device number. A device number is
1273 # needed to remove its configuration.
1274 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1276 # Save current sxprs. A device number and a backend
1277 # path are needed to remove its configuration but sxprs
1278 # do not have those after calling destroyDevice.
1279 sxprs = self.getDeviceSxprs(deviceClass)
1281 rc = None
1282 if self.domid is not None:
1284 #new blktap implementation may need a sysfs write after everything is torn down.
1285 if deviceClass == 'tap2':
1286 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1287 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1288 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1289 backpath = xstransact.Read(frontpath, "backend")
1290 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1292 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1293 if not force and rm_cfg:
1294 # The backend path, other than the device itself,
1295 # has to be passed because its accompanied frontend
1296 # path may be void until its removal is actually
1297 # issued. It is probable because destroyDevice is
1298 # issued first.
1299 for dev_num, dev_info in sxprs:
1300 dev_num = int(dev_num)
1301 if dev_num == dev:
1302 for x in dev_info:
1303 if x[0] == 'backend':
1304 backend = x[1]
1305 break
1306 break
1307 self._waitForDevice_destroy(deviceClass, devid, backend)
1309 if rm_cfg and deviceClass != "vif2":
1310 if deviceClass == 'vif':
1311 if self.domid is not None:
1312 mac = ''
1313 for dev_num, dev_info in sxprs:
1314 dev_num = int(dev_num)
1315 if dev_num == dev:
1316 for x in dev_info:
1317 if x[0] == 'mac':
1318 mac = x[1]
1319 break
1320 break
1321 dev_info = self._getDeviceInfo_vif(mac)
1322 else:
1323 _, dev_info = sxprs[dev]
1324 else: # 'vbd' or 'tap' or 'tap2'
1325 dev_info = self._getDeviceInfo_vbd(dev)
1326 # To remove the UUID of the device from refs,
1327 # deviceClass must be always 'vbd'.
1328 deviceClass = 'vbd'
1329 if dev_info is None:
1330 raise XendError("Device %s is not defined" % devid)
1332 dev_uuid = sxp.child_value(dev_info, 'uuid')
1333 del self.info['devices'][dev_uuid]
1334 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1335 xen.xend.XendDomain.instance().managed_config_save(self)
1337 return rc
1339 def getDeviceSxprs(self, deviceClass):
1340 if deviceClass == 'pci':
1341 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1342 if dev_info is None:
1343 return []
1344 dev_uuid = sxp.child_value(dev_info, 'uuid')
1345 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1346 return pci_devs
1347 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1348 return self.getDeviceController(deviceClass).sxprs()
1349 else:
1350 sxprs = []
1351 dev_num = 0
1352 for dev_type, dev_info in self.info.all_devices_sxpr():
1353 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap', 'tap2']) or \
1354 (deviceClass != 'vbd' and dev_type != deviceClass):
1355 continue
1357 if deviceClass == 'vscsi':
1358 vscsi_devs = ['devs', []]
1359 for vscsi_dev in sxp.children(dev_info, 'dev'):
1360 vscsi_dev.append(['frontstate', None])
1361 vscsi_devs[1].append(vscsi_dev)
1362 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1363 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1364 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1365 elif deviceClass == 'vbd':
1366 dev = sxp.child_value(dev_info, 'dev')
1367 if 'ioemu:' in dev:
1368 (_, dev) = dev.split(':', 1)
1369 try:
1370 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1371 except ValueError:
1372 dev_name = dev
1373 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1374 sxprs.append([dev_num, dev_info])
1375 else:
1376 sxprs.append([dev_num, dev_info])
1377 dev_num += 1
1378 return sxprs
1380 def getBlockDeviceClass(self, devid):
1381 # if the domain is running we can get the device class from xenstore.
1382 # This is more accurate, as blktap1 devices show up as blktap2 devices
1383 # in the config.
1384 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1385 # All block devices have a vbd frontend, so we know the frontend path
1386 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1387 frontendPath = "%s/device/vbd/%s" % (self.dompath, dev)
1388 for devclass in XendDevices.valid_devices():
1389 for dev in xstransact.List("%s/device/%s" % (self.vmpath, devclass)):
1390 devFrontendPath = xstransact.Read("%s/device/%s/%s/frontend" % (self.vmpath, devclass, dev))
1391 if frontendPath == devFrontendPath:
1392 return devclass
1394 else: # the domain is not active so we must get the device class
1395 # from the config
1396 # To get a device number from the devid,
1397 # we temporarily use the device controller of VBD.
1398 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1399 dev_info = self._getDeviceInfo_vbd(dev)
1400 if dev_info:
1401 return dev_info[0]
1403 def _getDeviceInfo_vif(self, mac):
1404 for dev_type, dev_info in self.info.all_devices_sxpr():
1405 if dev_type != 'vif':
1406 continue
1407 if mac == sxp.child_value(dev_info, 'mac'):
1408 return dev_info
1410 def _getDeviceInfo_vbd(self, devid):
1411 for dev_type, dev_info in self.info.all_devices_sxpr():
1412 if dev_type != 'vbd' and dev_type != 'tap' and dev_type != 'tap2':
1413 continue
1414 dev = sxp.child_value(dev_info, 'dev')
1415 dev = dev.split(':')[0]
1416 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1417 if devid == dev:
1418 return dev_info
1420 def _getDeviceInfo_pci(self, devid):
1421 for dev_type, dev_info in self.info.all_devices_sxpr():
1422 if dev_type != 'pci':
1423 continue
1424 return dev_info
1425 return None
1427 def _getDeviceInfo_vscsi(self, devid):
1428 devid = int(devid)
1429 for dev_type, dev_info in self.info.all_devices_sxpr():
1430 if dev_type != 'vscsi':
1431 continue
1432 devs = sxp.children(dev_info, 'dev')
1433 if devid == int(sxp.child_value(devs[0], 'devid')):
1434 return dev_info
1435 return None
1437 def _getDeviceInfo_vusb(self, devid):
1438 for dev_type, dev_info in self.info.all_devices_sxpr():
1439 if dev_type != 'vusb':
1440 continue
1441 return dev_info
1442 return None
1444 def _get_assigned_pci_devices(self, devid = 0):
1445 if self.domid is not None:
1446 return get_assigned_pci_devices(self.domid)
1448 dev_info = self._getDeviceInfo_pci(devid)
1449 if dev_info is None:
1450 return []
1451 dev_uuid = sxp.child_value(dev_info, 'uuid')
1452 pci_conf = self.info['devices'][dev_uuid][1]
1453 return map(pci_dict_to_bdf_str, pci_conf['devs'])
1455 def setMemoryTarget(self, target):
1456 """Set the memory target of this domain.
1457 @param target: In MiB.
1458 """
1459 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1460 self.info['name_label'], str(self.domid), target)
1462 MiB = 1024 * 1024
1463 memory_cur = self.get_memory_dynamic_max() / MiB
1465 if self.domid == 0:
1466 dom0_min_mem = xoptions.get_dom0_min_mem()
1467 if target < memory_cur and dom0_min_mem > target:
1468 raise XendError("memory_dynamic_max too small")
1470 self._safe_set_memory('memory_dynamic_min', target * MiB)
1471 self._safe_set_memory('memory_dynamic_max', target * MiB)
1473 if self.domid >= 0:
1474 if target > memory_cur:
1475 balloon.free((target - memory_cur) * 1024, self)
1476 self.storeVm("memory", target)
1477 self.storeDom("memory/target", target << 10)
1478 xc.domain_set_target_mem(self.domid,
1479 (target * 1024))
1480 xen.xend.XendDomain.instance().managed_config_save(self)
1482 def setMemoryMaximum(self, limit):
1483 """Set the maximum memory limit of this domain
1484 @param limit: In MiB.
1485 """
1486 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1487 self.info['name_label'], str(self.domid), limit)
1489 maxmem_cur = self.get_memory_static_max()
1490 MiB = 1024 * 1024
1491 self._safe_set_memory('memory_static_max', limit * MiB)
1493 if self.domid >= 0:
1494 maxmem = int(limit) * 1024
1495 try:
1496 return xc.domain_setmaxmem(self.domid, maxmem)
1497 except Exception, ex:
1498 self._safe_set_memory('memory_static_max', maxmem_cur)
1499 raise XendError(str(ex))
1500 xen.xend.XendDomain.instance().managed_config_save(self)
1503 def getVCPUInfo(self):
1504 try:
1505 # We include the domain name and ID, to help xm.
1506 sxpr = ['domain',
1507 ['domid', self.domid],
1508 ['name', self.info['name_label']],
1509 ['vcpu_count', self.info['VCPUs_max']]]
1511 for i in range(0, self.info['VCPUs_max']):
1512 if self.domid is not None:
1513 info = xc.vcpu_getinfo(self.domid, i)
1515 sxpr.append(['vcpu',
1516 ['number', i],
1517 ['online', info['online']],
1518 ['blocked', info['blocked']],
1519 ['running', info['running']],
1520 ['cpu_time', info['cpu_time'] / 1e9],
1521 ['cpu', info['cpu']],
1522 ['cpumap', info['cpumap']]])
1523 else:
1524 sxpr.append(['vcpu',
1525 ['number', i],
1526 ['online', 0],
1527 ['blocked', 0],
1528 ['running', 0],
1529 ['cpu_time', 0.0],
1530 ['cpu', -1],
1531 ['cpumap', self.info['cpus'][i] and \
1532 self.info['cpus'][i] or range(64)]])
1534 return sxpr
1536 except RuntimeError, exn:
1537 raise XendError(str(exn))
1540 def getDomInfo(self):
1541 return dom_get(self.domid)
1544 # internal functions ... TODO: re-categorised
1547 def _augmentInfo(self, priv):
1548 """Augment self.info, as given to us through L{recreate}, with
1549 values taken from the store. This recovers those values known
1550 to xend but not to the hypervisor.
1551 """
1552 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1553 if priv:
1554 augment_entries.remove('memory')
1555 augment_entries.remove('maxmem')
1556 augment_entries.remove('vcpus')
1557 augment_entries.remove('vcpu_avail')
1559 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1560 for k in augment_entries])
1562 # make returned lists into a dictionary
1563 vm_config = dict(zip(augment_entries, vm_config))
1565 for arg in augment_entries:
1566 val = vm_config[arg]
1567 if val != None:
1568 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1569 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1570 self.info[xapiarg] = val
1571 elif arg == "memory":
1572 self.info["static_memory_min"] = val
1573 elif arg == "maxmem":
1574 self.info["static_memory_max"] = val
1575 else:
1576 self.info[arg] = val
1578 # read CPU Affinity
1579 self.info['cpus'] = []
1580 vcpus_info = self.getVCPUInfo()
1581 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1582 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1584 # For dom0, we ignore any stored value for the vcpus fields, and
1585 # read the current value from Xen instead. This allows boot-time
1586 # settings to take precedence over any entries in the store.
1587 if priv:
1588 xeninfo = dom_get(self.domid)
1589 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1590 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1592 # read image value
1593 image_sxp = self._readVm('image')
1594 if image_sxp:
1595 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1597 # read devices
1598 devices = []
1599 for devclass in XendDevices.valid_devices():
1600 devconfig = self.getDeviceController(devclass).configurations()
1601 if devconfig:
1602 devices.extend(devconfig)
1604 if not self.info['devices'] and devices is not None:
1605 for device in devices:
1606 self.info.device_add(device[0], cfg_sxp = device)
1608 self._update_consoles()
1610 def _update_consoles(self, transaction = None):
1611 if self.domid == None or self.domid == 0:
1612 return
1614 # Update VT100 port if it exists
1615 if transaction is None:
1616 self.console_port = self.readDom('console/port')
1617 else:
1618 self.console_port = self.readDomTxn(transaction, 'console/port')
1619 if self.console_port is not None:
1620 serial_consoles = self.info.console_get_all('vt100')
1621 if not serial_consoles:
1622 cfg = self.info.console_add('vt100', self.console_port)
1623 self._createDevice('console', cfg)
1624 else:
1625 console_uuid = serial_consoles[0].get('uuid')
1626 self.info.console_update(console_uuid, 'location',
1627 self.console_port)
1630 # Update VNC port if it exists and write to xenstore
1631 if transaction is None:
1632 vnc_port = self.readDom('console/vnc-port')
1633 else:
1634 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1635 if vnc_port is not None:
1636 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1637 if dev_type == 'vfb':
1638 old_location = dev_info.get('location')
1639 listen_host = dev_info.get('vnclisten', \
1640 XendOptions.instance().get_vnclisten_address())
1641 new_location = '%s:%s' % (listen_host, str(vnc_port))
1642 if old_location == new_location:
1643 break
1645 dev_info['location'] = new_location
1646 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1647 vfb_ctrl = self.getDeviceController('vfb')
1648 vfb_ctrl.reconfigureDevice(0, dev_info)
1649 break
1652 # Function to update xenstore /vm/*
1655 def _readVm(self, *args):
1656 return xstransact.Read(self.vmpath, *args)
1658 def _writeVm(self, *args):
1659 return xstransact.Write(self.vmpath, *args)
1661 def _removeVm(self, *args):
1662 return xstransact.Remove(self.vmpath, *args)
1664 def _gatherVm(self, *args):
1665 return xstransact.Gather(self.vmpath, *args)
1667 def _listRecursiveVm(self, *args):
1668 return xstransact.ListRecursive(self.vmpath, *args)
1670 def storeVm(self, *args):
1671 return xstransact.Store(self.vmpath, *args)
1673 def permissionsVm(self, *args):
1674 return xstransact.SetPermissions(self.vmpath, *args)
1677 # Function to update xenstore /dom/*
1680 def readDom(self, *args):
1681 return xstransact.Read(self.dompath, *args)
1683 def gatherDom(self, *args):
1684 return xstransact.Gather(self.dompath, *args)
1686 def _writeDom(self, *args):
1687 return xstransact.Write(self.dompath, *args)
1689 def _removeDom(self, *args):
1690 return xstransact.Remove(self.dompath, *args)
1692 def storeDom(self, *args):
1693 return xstransact.Store(self.dompath, *args)
1696 def readDomTxn(self, transaction, *args):
1697 paths = map(lambda x: self.dompath + "/" + x, args)
1698 return transaction.read(*paths)
1700 def gatherDomTxn(self, transaction, *args):
1701 paths = map(lambda x: self.dompath + "/" + x, args)
1702 return transaction.gather(*paths)
1704 def _writeDomTxn(self, transaction, *args):
1705 paths = map(lambda x: self.dompath + "/" + x, args)
1706 return transaction.write(*paths)
1708 def _removeDomTxn(self, transaction, *args):
1709 paths = map(lambda x: self.dompath + "/" + x, args)
1710 return transaction.remove(*paths)
1712 def storeDomTxn(self, transaction, *args):
1713 paths = map(lambda x: self.dompath + "/" + x, args)
1714 return transaction.store(*paths)
1717 def _recreateDom(self):
1718 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1720 def _recreateDomFunc(self, t):
1721 t.remove()
1722 t.mkdir()
1723 t.set_permissions({'dom' : self.domid, 'read' : True})
1724 t.write('vm', self.vmpath)
1725 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1726 # XCP Windows paravirtualized guests use data/
1727 for i in [ 'device', 'control', 'error', 'memory', 'guest', \
1728 'hvmpv', 'data' ]:
1729 t.mkdir(i)
1730 t.set_permissions(i, {'dom' : self.domid})
1732 def _storeDomDetails(self):
1733 to_store = {
1734 'domid': str(self.domid),
1735 'vm': self.vmpath,
1736 'name': self.info['name_label'],
1737 'console/limit': str(xoptions.get_console_limit() * 1024),
1738 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1739 'description': str(self.info['description']),
1742 def f(n, v):
1743 if v is not None:
1744 if type(v) == bool:
1745 to_store[n] = v and "1" or "0"
1746 else:
1747 to_store[n] = str(v)
1749 # Figure out if we need to tell xenconsoled to ignore this guest's
1750 # console - device model will handle console if it is running
1751 constype = "ioemu"
1752 if 'device_model' not in self.info['platform']:
1753 constype = "xenconsoled"
1755 f('console/port', self.console_port)
1756 f('console/ring-ref', self.console_mfn)
1757 f('console/type', constype)
1758 f('store/port', self.store_port)
1759 f('store/ring-ref', self.store_mfn)
1761 if arch.type == "x86":
1762 f('control/platform-feature-multiprocessor-suspend', True)
1764 # elfnotes
1765 for n, v in self.info.get_notes().iteritems():
1766 n = n.lower().replace('_', '-')
1767 if n == 'features':
1768 for v in v.split('|'):
1769 v = v.replace('_', '-')
1770 if v.startswith('!'):
1771 f('image/%s/%s' % (n, v[1:]), False)
1772 else:
1773 f('image/%s/%s' % (n, v), True)
1774 else:
1775 f('image/%s' % n, v)
1777 if self.info.has_key('security_label'):
1778 f('security_label', self.info['security_label'])
1780 to_store.update(self._vcpuDomDetails())
1782 log.debug("Storing domain details: %s", scrub_password(to_store))
1784 self._writeDom(to_store)
1786 def _vcpuDomDetails(self):
1787 def availability(n):
1788 if self.info['vcpu_avail'] & (1 << n):
1789 return 'online'
1790 else:
1791 return 'offline'
1793 result = {}
1794 for v in range(0, self.info['VCPUs_max']):
1795 result["cpu/%d/availability" % v] = availability(v)
1796 return result
1799 # xenstore watches
1802 def _registerWatches(self):
1803 """Register a watch on this VM's entries in the store, and the
1804 domain's control/shutdown node, so that when they are changed
1805 externally, we keep up to date. This should only be called by {@link
1806 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1807 details have been written, but before the new instance is returned."""
1808 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1809 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1810 self._handleShutdownWatch)
1812 def _storeChanged(self, _):
1813 log.trace("XendDomainInfo.storeChanged");
1815 changed = False
1817 # Check whether values in the configuration have
1818 # changed in Xenstore.
1820 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1821 'rtc/timeoffset']
1823 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1824 for k in cfg_vm])
1826 # convert two lists into a python dictionary
1827 vm_details = dict(zip(cfg_vm, vm_details))
1829 for arg, val in vm_details.items():
1830 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1831 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1832 if val != None and val != self.info[xapiarg]:
1833 self.info[xapiarg] = val
1834 changed = True
1835 elif arg == "memory":
1836 if val != None and val != self.info["static_memory_min"]:
1837 self.info["static_memory_min"] = val
1838 changed = True
1839 elif arg == "maxmem":
1840 if val != None and val != self.info["static_memory_max"]:
1841 self.info["static_memory_max"] = val
1842 changed = True
1844 # Check whether image definition has been updated
1845 image_sxp = self._readVm('image')
1846 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1847 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1848 changed = True
1850 # Update the rtc_timeoffset to be preserved across reboot.
1851 # NB. No need to update xenstore domain section.
1852 val = int(vm_details.get("rtc/timeoffset", 0))
1853 self.info["platform"]["rtc_timeoffset"] = val
1855 if changed:
1856 # Update the domain section of the store, as this contains some
1857 # parameters derived from the VM configuration.
1858 self.refresh_shutdown_lock.acquire()
1859 try:
1860 state = self._stateGet()
1861 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1862 self._storeDomDetails()
1863 finally:
1864 self.refresh_shutdown_lock.release()
1866 return 1
1868 def _handleShutdownWatch(self, _):
1869 log.debug('XendDomainInfo.handleShutdownWatch')
1871 reason = self.readDom('control/shutdown')
1873 if reason and reason != 'suspend':
1874 sst = self.readDom('xend/shutdown_start_time')
1875 now = time.time()
1876 if sst:
1877 self.shutdownStartTime = float(sst)
1878 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1879 else:
1880 self.shutdownStartTime = now
1881 self.storeDom('xend/shutdown_start_time', now)
1882 timeout = SHUTDOWN_TIMEOUT
1884 log.trace(
1885 "Scheduling refreshShutdown on domain %d in %ds.",
1886 self.domid, timeout)
1887 threading.Timer(timeout, self.refreshShutdown).start()
1889 return True
1893 # Public Attributes for the VM
1897 def getDomid(self):
1898 return self.domid
1900 def getStubdomDomid(self):
1901 dom_list = xstransact.List('/local/domain')
1902 for d in dom_list:
1903 target = xstransact.Read('/local/domain/' + d + '/target')
1904 if target is not None and int(target) is self.domid :
1905 return int(d)
1906 return None
1908 def setName(self, name, to_store = True):
1909 self._checkName(name)
1910 self.info['name_label'] = name
1911 if to_store:
1912 self.storeVm("name", name)
1914 def getName(self):
1915 return self.info['name_label']
1917 def getDomainPath(self):
1918 return self.dompath
1920 def getShutdownReason(self):
1921 return self.readDom('control/shutdown')
1923 def getStorePort(self):
1924 """For use only by image.py and XendCheckpoint.py."""
1925 return self.store_port
1927 def getConsolePort(self):
1928 """For use only by image.py and XendCheckpoint.py"""
1929 return self.console_port
1931 def getFeatures(self):
1932 """For use only by image.py."""
1933 return self.info['features']
1935 def getVCpuCount(self):
1936 return self.info['VCPUs_max']
1938 def getVCpuAvail(self):
1939 return self.info['vcpu_avail']
1941 def setVCpuCount(self, vcpus):
1942 def vcpus_valid(n):
1943 if vcpus <= 0:
1944 raise XendError('Zero or less VCPUs is invalid')
1945 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1946 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1947 vcpus_valid(vcpus)
1949 self.info['vcpu_avail'] = (1 << vcpus) - 1
1950 if self.domid >= 0:
1951 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1952 self._writeDom(self._vcpuDomDetails())
1953 self.info['VCPUs_live'] = vcpus
1954 else:
1955 if self.info['VCPUs_max'] > vcpus:
1956 # decreasing
1957 del self.info['cpus'][vcpus:]
1958 elif self.info['VCPUs_max'] < vcpus:
1959 # increasing
1960 for c in range(self.info['VCPUs_max'], vcpus):
1961 self.info['cpus'].append(list())
1962 self.info['VCPUs_max'] = vcpus
1963 xen.xend.XendDomain.instance().managed_config_save(self)
1964 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1965 vcpus)
1967 def getMemoryTarget(self):
1968 """Get this domain's target memory size, in KB."""
1969 return self.info['memory_dynamic_max'] / 1024
1971 def getMemoryMaximum(self):
1972 """Get this domain's maximum memory size, in KB."""
1973 # remember, info now stores memory in bytes
1974 return self.info['memory_static_max'] / 1024
1976 def getResume(self):
1977 return str(self._resume)
1979 def setResume(self, isresume):
1980 self._resume = isresume
1982 def getCpus(self):
1983 return self.info['cpus']
1985 def setCpus(self, cpumap):
1986 self.info['cpus'] = cpumap
1988 def getCap(self):
1989 return self.info['vcpus_params']['cap']
1991 def setCap(self, cpu_cap):
1992 self.info['vcpus_params']['cap'] = cpu_cap
1994 def getWeight(self):
1995 return self.info['vcpus_params']['weight']
1997 def setWeight(self, cpu_weight):
1998 self.info['vcpus_params']['weight'] = cpu_weight
2000 def getRestartCount(self):
2001 return self._readVm('xend/restart_count')
2003 def refreshShutdown(self, xeninfo = None):
2004 """ Checks the domain for whether a shutdown is required.
2006 Called from XendDomainInfo and also image.py for HVM images.
2007 """
2009 # If set at the end of this method, a restart is required, with the
2010 # given reason. This restart has to be done out of the scope of
2011 # refresh_shutdown_lock.
2012 restart_reason = None
2014 self.refresh_shutdown_lock.acquire()
2015 try:
2016 if xeninfo is None:
2017 xeninfo = dom_get(self.domid)
2018 if xeninfo is None:
2019 # The domain no longer exists. This will occur if we have
2020 # scheduled a timer to check for shutdown timeouts and the
2021 # shutdown succeeded. It will also occur if someone
2022 # destroys a domain beneath us. We clean up the domain,
2023 # just in case, but we can't clean up the VM, because that
2024 # VM may have migrated to a different domain on this
2025 # machine.
2026 self.cleanupDomain()
2027 self._stateSet(DOM_STATE_HALTED)
2028 return
2030 if xeninfo['dying']:
2031 # Dying means that a domain has been destroyed, but has not
2032 # yet been cleaned up by Xen. This state could persist
2033 # indefinitely if, for example, another domain has some of its
2034 # pages mapped. We might like to diagnose this problem in the
2035 # future, but for now all we do is make sure that it's not us
2036 # holding the pages, by calling cleanupDomain. We can't
2037 # clean up the VM, as above.
2038 self.cleanupDomain()
2039 self._stateSet(DOM_STATE_SHUTDOWN)
2040 return
2042 elif xeninfo['crashed']:
2043 if self.readDom('xend/shutdown_completed'):
2044 # We've seen this shutdown already, but we are preserving
2045 # the domain for debugging. Leave it alone.
2046 return
2048 log.warn('Domain has crashed: name=%s id=%d.',
2049 self.info['name_label'], self.domid)
2050 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
2052 restart_reason = 'crash'
2053 self._stateSet(DOM_STATE_HALTED)
2055 elif xeninfo['shutdown']:
2056 self._stateSet(DOM_STATE_SHUTDOWN)
2057 if self.readDom('xend/shutdown_completed'):
2058 # We've seen this shutdown already, but we are preserving
2059 # the domain for debugging. Leave it alone.
2060 return
2062 else:
2063 reason = shutdown_reason(xeninfo['shutdown_reason'])
2065 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
2066 self.info['name_label'], self.domid, reason)
2067 self._writeVm(LAST_SHUTDOWN_REASON, reason)
2069 self._clearRestart()
2071 if reason == 'suspend':
2072 self._stateSet(DOM_STATE_SUSPENDED)
2073 # Don't destroy the domain. XendCheckpoint will do
2074 # this once it has finished. However, stop watching
2075 # the VM path now, otherwise we will end up with one
2076 # watch for the old domain, and one for the new.
2077 self._unwatchVm()
2078 elif reason in ('poweroff', 'reboot'):
2079 restart_reason = reason
2080 else:
2081 self.destroy()
2083 elif self.dompath is None:
2084 # We have yet to manage to call introduceDomain on this
2085 # domain. This can happen if a restore is in progress, or has
2086 # failed. Ignore this domain.
2087 pass
2088 else:
2089 # Domain is alive. If we are shutting it down, log a message
2090 # if it seems unresponsive.
2091 if xeninfo['paused']:
2092 self._stateSet(DOM_STATE_PAUSED)
2093 else:
2094 self._stateSet(DOM_STATE_RUNNING)
2096 if self.shutdownStartTime:
2097 timeout = (SHUTDOWN_TIMEOUT - time.time() +
2098 self.shutdownStartTime)
2099 if (timeout < 0 and not self.readDom('xend/unresponsive')):
2100 log.info(
2101 "Domain shutdown timeout expired: name=%s id=%s",
2102 self.info['name_label'], self.domid)
2103 self.storeDom('xend/unresponsive', 'True')
2104 finally:
2105 self.refresh_shutdown_lock.release()
2107 if restart_reason and not self.restart_in_progress:
2108 self.restart_in_progress = True
2109 threading.Thread(target = self._maybeRestart,
2110 args = (restart_reason,)).start()
2114 # Restart functions - handling whether we come back up on shutdown.
2117 def _clearRestart(self):
2118 self._removeDom("xend/shutdown_start_time")
2120 def _maybeDumpCore(self, reason):
2121 if reason == 'crash':
2122 if xoptions.get_enable_dump() or self.get_on_crash() \
2123 in ['coredump_and_destroy', 'coredump_and_restart']:
2124 try:
2125 self.dumpCore()
2126 except XendError:
2127 # This error has been logged -- there's nothing more
2128 # we can do in this context.
2129 pass
2131 def _maybeRestart(self, reason):
2132 # Before taking configured action, dump core if configured to do so.
2134 self._maybeDumpCore(reason)
2136 # Dispatch to the correct method based upon the configured on_{reason}
2137 # behaviour.
2138 actions = {"destroy" : self.destroy,
2139 "restart" : self._restart,
2140 "preserve" : self._preserve,
2141 "rename-restart" : self._renameRestart,
2142 "coredump-destroy" : self.destroy,
2143 "coredump-restart" : self._restart}
2145 action_conf = {
2146 'poweroff': 'actions_after_shutdown',
2147 'reboot': 'actions_after_reboot',
2148 'crash': 'actions_after_crash',
2151 action_target = self.info.get(action_conf.get(reason))
2152 func = actions.get(action_target, None)
2153 if func and callable(func):
2154 func()
2155 else:
2156 self.destroy() # default to destroy
2158 def _renameRestart(self):
2159 self._restart(True)
2161 def _restart(self, rename = False):
2162 """Restart the domain after it has exited.
2164 @param rename True if the old domain is to be renamed and preserved,
2165 False if it is to be destroyed.
2166 """
2167 from xen.xend import XendDomain
2169 if self._readVm(RESTART_IN_PROGRESS):
2170 log.error('Xend failed during restart of domain %s. '
2171 'Refusing to restart to avoid loops.',
2172 str(self.domid))
2173 self.destroy()
2174 return
2176 old_domid = self.domid
2177 self._writeVm(RESTART_IN_PROGRESS, 'True')
2179 elapse = time.time() - self.info['start_time']
2180 if elapse < MINIMUM_RESTART_TIME:
2181 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2182 'Refusing to restart to avoid loops.',
2183 self.info['name_label'], elapse)
2184 self.destroy()
2185 return
2187 prev_vm_xend = self._listRecursiveVm('xend')
2188 new_dom_info = self.info
2189 try:
2190 if rename:
2191 new_dom_info = self._preserveForRestart()
2192 else:
2193 self._unwatchVm()
2194 self.destroy()
2196 # new_dom's VM will be the same as this domain's VM, except where
2197 # the rename flag has instructed us to call preserveForRestart.
2198 # In that case, it is important that we remove the
2199 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2200 # once the new one is available.
2202 new_dom = None
2203 try:
2204 new_dom = XendDomain.instance().domain_create_from_dict(
2205 new_dom_info)
2206 for x in prev_vm_xend[0][1]:
2207 new_dom._writeVm('xend/%s' % x[0], x[1])
2208 new_dom.waitForDevices()
2209 new_dom.unpause()
2210 rst_cnt = new_dom._readVm('xend/restart_count')
2211 rst_cnt = int(rst_cnt) + 1
2212 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2213 new_dom._removeVm(RESTART_IN_PROGRESS)
2214 except:
2215 if new_dom:
2216 new_dom._removeVm(RESTART_IN_PROGRESS)
2217 new_dom.destroy()
2218 else:
2219 self._removeVm(RESTART_IN_PROGRESS)
2220 raise
2221 except:
2222 log.exception('Failed to restart domain %s.', str(old_domid))
2224 def _preserveForRestart(self):
2225 """Preserve a domain that has been shut down, by giving it a new UUID,
2226 cloning the VM details, and giving it a new name. This allows us to
2227 keep this domain for debugging, but restart a new one in its place
2228 preserving the restart semantics (name and UUID preserved).
2229 """
2231 new_uuid = uuid.createString()
2232 new_name = 'Domain-%s' % new_uuid
2233 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2234 self.info['name_label'], self.domid, self.info['uuid'],
2235 new_name, new_uuid)
2236 self._unwatchVm()
2237 self._releaseDevices()
2238 # Remove existing vm node in xenstore
2239 self._removeVm()
2240 new_dom_info = self.info.copy()
2241 new_dom_info['name_label'] = self.info['name_label']
2242 new_dom_info['uuid'] = self.info['uuid']
2243 self.info['name_label'] = new_name
2244 self.info['uuid'] = new_uuid
2245 self.vmpath = XS_VMROOT + new_uuid
2246 # Write out new vm node to xenstore
2247 self._storeVmDetails()
2248 self._preserve()
2249 return new_dom_info
2252 def _preserve(self):
2253 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2254 self.domid)
2255 self._unwatchVm()
2256 self.storeDom('xend/shutdown_completed', 'True')
2257 self._stateSet(DOM_STATE_HALTED)
2260 # Debugging ..
2263 def dumpCore(self, corefile = None):
2264 """Create a core dump for this domain.
2266 @raise: XendError if core dumping failed.
2267 """
2269 if not corefile:
2270 # To prohibit directory traversal
2271 based_name = os.path.basename(self.info['name_label'])
2273 coredir = "/var/xen/dump/%s" % (based_name)
2274 if not os.path.exists(coredir):
2275 try:
2276 mkdir.parents(coredir, stat.S_IRWXU)
2277 except Exception, ex:
2278 log.error("Cannot create directory: %s" % str(ex))
2280 if not os.path.isdir(coredir):
2281 # Use former directory to dump core
2282 coredir = '/var/xen/dump'
2284 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2285 corefile = "%s/%s-%s.%s.core" % (coredir, this_time,
2286 self.info['name_label'], self.domid)
2288 if os.path.isdir(corefile):
2289 raise XendError("Cannot dump core in a directory: %s" %
2290 corefile)
2292 try:
2293 try:
2294 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2295 xc.domain_dumpcore(self.domid, corefile)
2296 except RuntimeError, ex:
2297 corefile_incomp = corefile+'-incomplete'
2298 try:
2299 os.rename(corefile, corefile_incomp)
2300 except:
2301 pass
2303 log.error("core dump failed: id = %s name = %s: %s",
2304 self.domid, self.info['name_label'], str(ex))
2305 raise XendError("Failed to dump core: %s" % str(ex))
2306 finally:
2307 self._removeVm(DUMPCORE_IN_PROGRESS)
2310 # Device creation/deletion functions
2313 def _createDevice(self, deviceClass, devConfig):
2314 return self.getDeviceController(deviceClass).createDevice(devConfig)
2316 def _waitForDevice(self, deviceClass, devid):
2317 return self.getDeviceController(deviceClass).waitForDevice(devid)
2319 def _waitForDeviceUUID(self, dev_uuid):
2320 deviceClass, config = self.info['devices'].get(dev_uuid)
2321 self._waitForDevice(deviceClass, config['devid'])
2323 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2324 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2325 devid, backpath)
2327 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2328 return self.getDeviceController(deviceClass).reconfigureDevice(
2329 devid, devconfig)
2331 def _createDevices(self):
2332 """Create the devices for a vm.
2334 @raise: VmError for invalid devices
2335 """
2336 if self.image:
2337 self.image.prepareEnvironment()
2339 vscsi_uuidlist = {}
2340 vscsi_devidlist = []
2341 ordered_refs = self.info.ordered_device_refs()
2342 for dev_uuid in ordered_refs:
2343 devclass, config = self.info['devices'][dev_uuid]
2344 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2345 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2346 dev_uuid = config.get('uuid')
2348 if devclass == 'pci':
2349 self.pci_dev_check_assignability_and_do_FLR(config)
2351 if devclass != 'pci' or not self.info.is_hvm() :
2352 devid = self._createDevice(devclass, config)
2354 # store devid in XendConfig for caching reasons
2355 if dev_uuid in self.info['devices']:
2356 self.info['devices'][dev_uuid][1]['devid'] = devid
2358 elif devclass == 'vscsi':
2359 vscsi_config = config.get('devs', [])[0]
2360 devid = vscsi_config.get('devid', '')
2361 dev_uuid = config.get('uuid')
2362 vscsi_uuidlist[devid] = dev_uuid
2363 vscsi_devidlist.append(devid)
2365 #It is necessary to sorted it for /dev/sdxx in guest.
2366 if len(vscsi_uuidlist) > 0:
2367 vscsi_devidlist.sort()
2368 for vscsiid in vscsi_devidlist:
2369 dev_uuid = vscsi_uuidlist[vscsiid]
2370 devclass, config = self.info['devices'][dev_uuid]
2371 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2372 dev_uuid = config.get('uuid')
2373 devid = self._createDevice(devclass, config)
2374 # store devid in XendConfig for caching reasons
2375 if dev_uuid in self.info['devices']:
2376 self.info['devices'][dev_uuid][1]['devid'] = devid
2379 if self.image:
2380 self.image.createDeviceModel()
2382 #if have pass-through devs, need the virtual pci slots info from qemu
2383 self.pci_device_configure_boot()
2385 def _releaseDevices(self, suspend = False):
2386 """Release all domain's devices. Nothrow guarantee."""
2387 if self.image:
2388 try:
2389 log.debug("Destroying device model")
2390 self.image.destroyDeviceModel()
2391 except Exception, e:
2392 log.exception("Device model destroy failed %s" % str(e))
2393 else:
2394 log.debug("No device model")
2396 log.debug("Releasing devices")
2397 t = xstransact("%s/device" % self.vmpath)
2398 try:
2399 for devclass in XendDevices.valid_devices():
2400 for dev in t.list(devclass):
2401 try:
2402 log.debug("Removing %s", dev);
2403 self.destroyDevice(devclass, dev, False);
2404 except:
2405 # Log and swallow any exceptions in removal --
2406 # there's nothing more we can do.
2407 log.exception("Device release failed: %s; %s; %s",
2408 self.info['name_label'],
2409 devclass, dev)
2410 finally:
2411 t.abort()
2413 def getDeviceController(self, name):
2414 """Get the device controller for this domain, and if it
2415 doesn't exist, create it.
2417 @param name: device class name
2418 @type name: string
2419 @rtype: subclass of DevController
2420 """
2421 if name not in self._deviceControllers:
2422 devController = XendDevices.make_controller(name, self)
2423 if not devController:
2424 raise XendError("Unknown device type: %s" % name)
2425 self._deviceControllers[name] = devController
2427 return self._deviceControllers[name]
2430 # Migration functions (public)
2433 def testMigrateDevices(self, network, dst):
2434 """ Notify all device about intention of migration
2435 @raise: XendError for a device that cannot be migrated
2436 """
2437 for (n, c) in self.info.all_devices_sxpr():
2438 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2439 if rc != 0:
2440 raise XendError("Device of type '%s' refuses migration." % n)
2442 def migrateDevices(self, network, dst, step, domName=''):
2443 """Notify the devices about migration
2444 """
2445 ctr = 0
2446 try:
2447 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2448 self.migrateDevice(dev_type, dev_conf, network, dst,
2449 step, domName)
2450 ctr = ctr + 1
2451 except:
2452 for dev_type, dev_conf in self.info.all_devices_sxpr():
2453 if ctr == 0:
2454 step = step - 1
2455 ctr = ctr - 1
2456 self._recoverMigrateDevice(dev_type, dev_conf, network,
2457 dst, step, domName)
2458 raise
2460 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2461 step, domName=''):
2462 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2463 network, dst, step, domName)
2465 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2466 dst, step, domName=''):
2467 return self.getDeviceController(deviceClass).recover_migrate(
2468 deviceConfig, network, dst, step, domName)
2470 def setChangeHomeServer(self, chs):
2471 if chs is not None:
2472 self.info['change_home_server'] = bool(chs)
2473 else:
2474 if self.info.has_key('change_home_server'):
2475 del self.info['change_home_server']
2478 ## private:
2480 def _constructDomain(self):
2481 """Construct the domain.
2483 @raise: VmError on error
2484 """
2486 log.debug('XendDomainInfo.constructDomain')
2488 self.shutdownStartTime = None
2489 self.restart_in_progress = False
2491 hap = 0
2492 hvm = self.info.is_hvm()
2493 if hvm:
2494 hap = self.info.is_hap()
2495 info = xc.xeninfo()
2496 if 'hvm' not in info['xen_caps']:
2497 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2498 "supported by your CPU and enabled in your "
2499 "BIOS?")
2501 # Hack to pre-reserve some memory for initial domain creation.
2502 # There is an implicit memory overhead for any domain creation. This
2503 # overhead is greater for some types of domain than others. For
2504 # example, an x86 HVM domain will have a default shadow-pagetable
2505 # allocation of 1MB. We free up 4MB here to be on the safe side.
2506 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2507 balloon.free(4*1024, self) # 4MB should be plenty
2509 ssidref = 0
2510 if security.on() == xsconstants.XS_POLICY_USE:
2511 ssidref = security.calc_dom_ssidref_from_info(self.info)
2512 if security.has_authorization(ssidref) == False:
2513 raise VmError("VM is not authorized to run.")
2515 s3_integrity = 0
2516 if self.info.has_key('s3_integrity'):
2517 s3_integrity = self.info['s3_integrity']
2519 oos = self.info['platform'].get('oos', 1)
2520 oos_off = 1 - int(oos)
2522 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
2524 try:
2525 self.domid = xc.domain_create(
2526 domid = 0,
2527 ssidref = ssidref,
2528 handle = uuid.fromString(self.info['uuid']),
2529 flags = flags,
2530 target = self.info.target())
2531 except Exception, e:
2532 # may get here if due to ACM the operation is not permitted
2533 if security.on() == xsconstants.XS_POLICY_ACM:
2534 raise VmError('Domain in conflict set with running domain?')
2535 log.exception(e)
2537 if not self.domid or self.domid < 0:
2538 failmsg = 'Creating domain failed: name=%s' % self.info['name_label']
2539 if self.domid:
2540 failmsg += ', error=%i' % int(self.domid)
2541 raise VmError(failmsg)
2543 self.dompath = GetDomainPath(self.domid)
2545 self._recreateDom()
2547 # Set TSC mode of domain
2548 tsc_mode = self.info["platform"].get("tsc_mode")
2549 if arch.type == "x86" and tsc_mode is not None:
2550 xc.domain_set_tsc_info(self.domid, int(tsc_mode))
2552 # Set timer configuration of domain
2553 timer_mode = self.info["platform"].get("timer_mode")
2554 if hvm and timer_mode is not None:
2555 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2556 long(timer_mode))
2558 # Set Viridian interface configuration of domain
2559 viridian = self.info["platform"].get("viridian")
2560 if arch.type == "x86" and hvm and viridian is not None:
2561 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2563 # If nomigrate is set, disable migration
2564 nomigrate = self.info["platform"].get("nomigrate")
2565 if nomigrate is not None and long(nomigrate) != 0:
2566 xc.domain_disable_migrate(self.domid)
2568 # Optionally enable virtual HPET
2569 hpet = self.info["platform"].get("hpet")
2570 if hvm and hpet is not None:
2571 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2572 long(hpet))
2574 # Optionally enable periodic vpt aligning
2575 vpt_align = self.info["platform"].get("vpt_align")
2576 if hvm and vpt_align is not None:
2577 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2578 long(vpt_align))
2580 # Set maximum number of vcpus in domain
2581 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2583 # Check for cpu_{cap|weight} validity for credit scheduler
2584 if XendNode.instance().xenschedinfo() == 'credit':
2585 cap = self.getCap()
2586 weight = self.getWeight()
2588 assert type(weight) == int
2589 assert type(cap) == int
2591 if weight < 1 or weight > 65535:
2592 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2594 if cap < 0 or cap > self.getVCpuCount() * 100:
2595 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2596 (self.getVCpuCount() * 100))
2598 # Test whether the devices can be assigned with VT-d
2599 self.info.update_platform_pci()
2600 pci = self.info["platform"].get("pci")
2601 pci_str = ''
2602 if pci and len(pci) > 0:
2603 pci = map(lambda x: x[0:4], pci) # strip options
2604 pci_str = str(pci)
2606 # This test is done for both pv and hvm guest.
2607 for p in pci:
2608 pci_name = '%04x:%02x:%02x.%x' % \
2609 (parse_hex(p[0]), parse_hex(p[1]), parse_hex(p[2]), parse_hex(p[3]))
2610 try:
2611 pci_device = PciDevice(parse_pci_name(pci_name))
2612 except Exception, e:
2613 raise VmError("pci: failed to locate device and "+
2614 "parse its resources - "+str(e))
2615 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
2616 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
2617 %pci_device.name)
2618 if pci_name in get_all_assigned_pci_devices():
2619 raise VmError("failed to assign device %s that has"
2620 " already been assigned to other domain." % pci_name)
2622 if hvm and pci_str != '':
2623 bdf = xc.test_assign_device(0, pci_str)
2624 if bdf != 0:
2625 if bdf == -1:
2626 raise VmError("failed to assign device: maybe the platform"
2627 " doesn't support VT-d, or VT-d isn't enabled"
2628 " properly?")
2629 bus = (bdf >> 16) & 0xff
2630 devfn = (bdf >> 8) & 0xff
2631 dev = (devfn >> 3) & 0x1f
2632 func = devfn & 0x7
2633 raise VmError("failed to assign device %02x:%02x.%x: maybe it has"
2634 " already been assigned to other domain, or maybe"
2635 " it doesn't exist." % (bus, dev, func))
2637 # register the domain in the list
2638 from xen.xend import XendDomain
2639 XendDomain.instance().add_domain(self)
2641 def _introduceDomain(self):
2642 assert self.domid is not None
2643 assert self.store_mfn is not None
2644 assert self.store_port is not None
2646 try:
2647 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2648 except RuntimeError, exn:
2649 raise XendError(str(exn))
2651 def _setTarget(self, target):
2652 assert self.domid is not None
2654 try:
2655 SetTarget(self.domid, target)
2656 self.storeDom('target', target)
2657 except RuntimeError, exn:
2658 raise XendError(str(exn))
2661 def _setCPUAffinity(self):
2662 """ Repin domain vcpus if a restricted cpus list is provided.
2663 Returns the choosen node number.
2664 """
2666 def has_cpus():
2667 if self.info['cpus'] is not None:
2668 for c in self.info['cpus']:
2669 if c:
2670 return True
2671 return False
2673 def has_cpumap():
2674 if self.info.has_key('vcpus_params'):
2675 for k, v in self.info['vcpus_params'].items():
2676 if k.startswith('cpumap'):
2677 return True
2678 return False
2680 index = 0
2681 if has_cpumap():
2682 for v in range(0, self.info['VCPUs_max']):
2683 if self.info['vcpus_params'].has_key('cpumap%i' % v):
2684 cpumask = map(int, self.info['vcpus_params']['cpumap%i' % v].split(','))
2685 xc.vcpu_setaffinity(self.domid, v, cpumask)
2686 elif has_cpus():
2687 for v in range(0, self.info['VCPUs_max']):
2688 if self.info['cpus'][v]:
2689 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2690 else:
2691 def find_relaxed_node(node_list):
2692 import sys
2693 nr_nodes = info['nr_nodes']
2694 if node_list is None:
2695 node_list = range(0, nr_nodes)
2696 nodeload = [0]
2697 nodeload = nodeload * nr_nodes
2698 from xen.xend import XendDomain
2699 doms = XendDomain.instance().list('all')
2700 for dom in filter (lambda d: d.domid != self.domid, doms):
2701 cpuinfo = dom.getVCPUInfo()
2702 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2703 if sxp.child_value(vcpu, 'online') == 0: continue
2704 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2705 for i in range(0, nr_nodes):
2706 node_cpumask = info['node_to_cpu'][i]
2707 for j in node_cpumask:
2708 if j in cpumap:
2709 nodeload[i] += 1
2710 break
2711 for i in range(0, nr_nodes):
2712 if len(info['node_to_cpu'][i]) == 0:
2713 nodeload[i] += 8
2714 else:
2715 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2716 if i not in node_list:
2717 nodeload[i] += 8
2718 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
2720 info = xc.physinfo()
2721 if info['nr_nodes'] > 1:
2722 node_memory_list = info['node_to_memory']
2723 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2724 candidate_node_list = []
2725 for i in range(0, info['nr_nodes']):
2726 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2727 candidate_node_list.append(i)
2728 best_node = find_relaxed_node(candidate_node_list)[0]
2729 cpumask = info['node_to_cpu'][best_node]
2730 best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['nr_nodes'])))
2731 for node_idx in best_nodes:
2732 if len(cpumask) >= self.info['VCPUs_max']:
2733 break
2734 cpumask = cpumask + info['node_to_cpu'][node_idx]
2735 log.debug("allocating additional NUMA node %d", node_idx)
2736 for v in range(0, self.info['VCPUs_max']):
2737 xc.vcpu_setaffinity(self.domid, v, cpumask)
2738 return index
2740 def _freeDMAmemory(self, node):
2742 # If we are PV and have PCI devices the guest will
2743 # turn on a SWIOTLB. The SWIOTLB _MUST_ be located in the DMA32
2744 # zone (under 4GB). To do so, we need to balloon down Dom0 to where
2745 # there is enough (64MB) memory under the 4GB mark. This balloon-ing
2746 # might take more memory out than just 64MB thought :-(
2747 if not self.info.is_pv_and_has_pci():
2748 return
2750 retries = 2000
2751 ask_for_mem = 0
2752 need_mem = 0
2753 try:
2754 while (retries > 0):
2755 physinfo = xc.physinfo()
2756 free_mem = physinfo['free_memory']
2757 nr_nodes = physinfo['nr_nodes']
2758 node_to_dma32_mem = physinfo['node_to_dma32_mem']
2759 if (node > nr_nodes):
2760 return
2761 # Extra 2MB above 64GB seems to do the trick.
2762 need_mem = 64 * 1024 + 2048 - node_to_dma32_mem[node]
2763 # our starting point. We ask just for the difference to
2764 # be have an extra 64MB under 4GB.
2765 ask_for_mem = max(need_mem, ask_for_mem);
2766 if (need_mem > 0):
2767 log.debug('_freeDMAmemory (%d) Need %dKiB DMA memory. '
2768 'Asking for %dKiB', retries, need_mem,
2769 ask_for_mem)
2771 balloon.free(ask_for_mem, self)
2772 ask_for_mem = ask_for_mem + 2048
2773 else:
2774 # OK. We got enough DMA memory.
2775 break
2776 retries = retries - 1
2777 except:
2778 # This is best-try after all.
2779 need_mem = max(1, need_mem)
2780 pass
2782 if (need_mem > 0):
2783 log.warn('We tried our best to balloon down DMA memory to '
2784 'accomodate your PV guest. We need %dKiB extra memory.',
2785 need_mem)
2787 def _setSchedParams(self):
2788 if XendNode.instance().xenschedinfo() == 'credit':
2789 from xen.xend import XendDomain
2790 XendDomain.instance().domain_sched_credit_set(self.getDomid(),
2791 self.getWeight(),
2792 self.getCap())
2794 def _initDomain(self):
2795 log.debug('XendDomainInfo.initDomain: %s %s',
2796 self.domid,
2797 self.info['vcpus_params']['weight'])
2799 self._configureBootloader()
2801 try:
2802 self.image = image.create(self, self.info)
2804 # repin domain vcpus if a restricted cpus list is provided
2805 # this is done prior to memory allocation to aide in memory
2806 # distribution for NUMA systems.
2807 node = self._setCPUAffinity()
2809 # Set scheduling parameters.
2810 self._setSchedParams()
2812 # Use architecture- and image-specific calculations to determine
2813 # the various headrooms necessary, given the raw configured
2814 # values. maxmem, memory, and shadow are all in KiB.
2815 # but memory_static_max etc are all stored in bytes now.
2816 memory = self.image.getRequiredAvailableMemory(
2817 self.info['memory_dynamic_max'] / 1024)
2818 maxmem = self.image.getRequiredAvailableMemory(
2819 self.info['memory_static_max'] / 1024)
2820 shadow = self.image.getRequiredShadowMemory(
2821 self.info['shadow_memory'] * 1024,
2822 self.info['memory_static_max'] / 1024)
2824 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2825 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2826 # takes MiB and we must not round down and end up under-providing.
2827 shadow = ((shadow + 1023) / 1024) * 1024
2829 # set memory limit
2830 xc.domain_setmaxmem(self.domid, maxmem)
2832 vtd_mem = 0
2833 info = xc.physinfo()
2834 if 'hvm_directio' in info['virt_caps']:
2835 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2836 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2837 # Round vtd_mem up to a multiple of a MiB.
2838 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2840 self.guest_bitsize = self.image.getBitSize()
2841 # Make sure there's enough RAM available for the domain
2842 balloon.free(memory + shadow + vtd_mem, self)
2844 # Set up the shadow memory
2845 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2846 self.info['shadow_memory'] = shadow_cur
2848 # machine address size
2849 if self.info.has_key('machine_address_size'):
2850 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2851 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2853 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2854 log.debug("_initDomain: suppressing spurious page faults")
2855 xc.domain_suppress_spurious_page_faults(self.domid)
2857 self._createChannels()
2859 channel_details = self.image.createImage()
2861 self.store_mfn = channel_details['store_mfn']
2862 if 'console_mfn' in channel_details:
2863 self.console_mfn = channel_details['console_mfn']
2864 if 'notes' in channel_details:
2865 self.info.set_notes(channel_details['notes'])
2866 if 'native_protocol' in channel_details:
2867 self.native_protocol = channel_details['native_protocol'];
2869 self._introduceDomain()
2870 if self.info.target():
2871 self._setTarget(self.info.target())
2873 self._freeDMAmemory(node)
2875 self._createDevices()
2877 self.image.cleanupTmpImages()
2879 self.info['start_time'] = time.time()
2881 self._stateSet(DOM_STATE_RUNNING)
2882 except VmError, exn:
2883 log.exception("XendDomainInfo.initDomain: exception occurred")
2884 if self.image:
2885 self.image.cleanupTmpImages()
2886 raise exn
2887 except RuntimeError, exn:
2888 log.exception("XendDomainInfo.initDomain: exception occurred")
2889 if self.image:
2890 self.image.cleanupTmpImages()
2891 raise VmError(str(exn))
2894 def cleanupDomain(self):
2895 """Cleanup domain resources; release devices. Idempotent. Nothrow
2896 guarantee."""
2898 self.refresh_shutdown_lock.acquire()
2899 try:
2900 self.unwatchShutdown()
2901 self._releaseDevices()
2902 bootloader_tidy(self)
2904 if self.image:
2905 self.image = None
2907 try:
2908 self._removeDom()
2909 except:
2910 log.exception("Removing domain path failed.")
2912 self._stateSet(DOM_STATE_HALTED)
2913 self.domid = None # Do not push into _stateSet()!
2914 finally:
2915 self.refresh_shutdown_lock.release()
2918 def unwatchShutdown(self):
2919 """Remove the watch on the domain's control/shutdown node, if any.
2920 Idempotent. Nothrow guarantee. Expects to be protected by the
2921 refresh_shutdown_lock."""
2923 try:
2924 try:
2925 if self.shutdownWatch:
2926 self.shutdownWatch.unwatch()
2927 finally:
2928 self.shutdownWatch = None
2929 except:
2930 log.exception("Unwatching control/shutdown failed.")
2932 def waitForShutdown(self):
2933 self.state_updated.acquire()
2934 try:
2935 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2936 self.state_updated.wait(timeout=1.0)
2937 finally:
2938 self.state_updated.release()
2940 def waitForSuspend(self):
2941 """Wait for the guest to respond to a suspend request by
2942 shutting down. If the guest hasn't re-written control/shutdown
2943 after a certain amount of time, it's obviously not listening and
2944 won't suspend, so we give up. HVM guests with no PV drivers
2945 should already be shutdown.
2946 """
2947 state = "suspend"
2948 nr_tries = 60
2950 self.state_updated.acquire()
2951 try:
2952 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2953 self.state_updated.wait(1.0)
2954 if state == "suspend":
2955 if nr_tries == 0:
2956 msg = ('Timeout waiting for domain %s to suspend'
2957 % self.domid)
2958 self._writeDom('control/shutdown', '')
2959 raise XendError(msg)
2960 state = self.readDom('control/shutdown')
2961 nr_tries -= 1
2962 finally:
2963 self.state_updated.release()
2966 # TODO: recategorise - called from XendCheckpoint
2969 def completeRestore(self, store_mfn, console_mfn):
2971 log.debug("XendDomainInfo.completeRestore")
2973 self.store_mfn = store_mfn
2974 self.console_mfn = console_mfn
2976 self._introduceDomain()
2977 self.image = image.create(self, self.info)
2978 if self.image:
2979 self.image.createDeviceModel(True)
2980 self._storeDomDetails()
2981 self._registerWatches()
2982 self.refreshShutdown()
2984 log.debug("XendDomainInfo.completeRestore done")
2987 def _endRestore(self):
2988 self.setResume(False)
2991 # VM Destroy
2994 def _prepare_phantom_paths(self):
2995 # get associated devices to destroy
2996 # build list of phantom devices to be removed after normal devices
2997 plist = []
2998 if self.domid is not None:
2999 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
3000 try:
3001 for dev in t.list():
3002 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
3003 % (self.dompath, dev))
3004 if backend_phantom_vbd is not None:
3005 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
3006 % backend_phantom_vbd)
3007 plist.append(backend_phantom_vbd)
3008 plist.append(frontend_phantom_vbd)
3009 finally:
3010 t.abort()
3011 return plist
3013 def _cleanup_phantom_devs(self, plist):
3014 # remove phantom devices
3015 if not plist == []:
3016 time.sleep(2)
3017 for paths in plist:
3018 if paths.find('backend') != -1:
3019 # Modify online status /before/ updating state (latter is watched by
3020 # drivers, so this ordering avoids a race).
3021 xstransact.Write(paths, 'online', "0")
3022 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
3023 # force
3024 xstransact.Remove(paths)
3026 def destroy(self):
3027 """Cleanup VM and destroy domain. Nothrow guarantee."""
3029 if self.domid is None:
3030 return
3031 from xen.xend import XendDomain
3032 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
3034 paths = self._prepare_phantom_paths()
3036 if self.dompath is not None:
3037 try:
3038 xc.domain_destroy_hook(self.domid)
3039 xc.domain_pause(self.domid)
3040 do_FLR(self.domid, self.info.is_hvm())
3041 xc.domain_destroy(self.domid)
3042 for state in DOM_STATES_OLD:
3043 self.info[state] = 0
3044 self._stateSet(DOM_STATE_HALTED)
3045 except:
3046 log.exception("XendDomainInfo.destroy: domain destruction failed.")
3048 XendDomain.instance().remove_domain(self)
3049 self.cleanupDomain()
3051 if self.info.is_hvm() or self.guest_bitsize != 32:
3052 if self.alloc_mem:
3053 import MemoryPool
3054 log.debug("%s KiB need to add to Memory pool" %self.alloc_mem)
3055 MemoryPool.instance().increase_memory(self.alloc_mem)
3057 self._cleanup_phantom_devs(paths)
3058 self._cleanupVm()
3060 if ("transient" in self.info["other_config"] and \
3061 bool(self.info["other_config"]["transient"])) or \
3062 ("change_home_server" in self.info and \
3063 bool(self.info["change_home_server"])):
3064 XendDomain.instance().domain_delete_by_dominfo(self)
3067 def resetDomain(self):
3068 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
3070 old_domid = self.domid
3071 prev_vm_xend = self._listRecursiveVm('xend')
3072 new_dom_info = self.info
3073 try:
3074 self._unwatchVm()
3075 self.destroy()
3077 new_dom = None
3078 try:
3079 from xen.xend import XendDomain
3080 new_dom_info['domid'] = None
3081 new_dom = XendDomain.instance().domain_create_from_dict(
3082 new_dom_info)
3083 for x in prev_vm_xend[0][1]:
3084 new_dom._writeVm('xend/%s' % x[0], x[1])
3085 new_dom.waitForDevices()
3086 new_dom.unpause()
3087 except:
3088 if new_dom:
3089 new_dom.destroy()
3090 raise
3091 except:
3092 log.exception('Failed to reset domain %s.', str(old_domid))
3095 def resumeDomain(self):
3096 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
3098 # resume a suspended domain (e.g. after live checkpoint, or after
3099 # a later error during save or migate); checks that the domain
3100 # is currently suspended first so safe to call from anywhere
3102 xeninfo = dom_get(self.domid)
3103 if xeninfo is None:
3104 return
3105 if not xeninfo['shutdown']:
3106 return
3107 reason = shutdown_reason(xeninfo['shutdown_reason'])
3108 if reason != 'suspend':
3109 return
3111 try:
3112 # could also fetch a parsed note from xenstore
3113 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
3114 if not fast:
3115 self._releaseDevices()
3116 self.testDeviceComplete()
3117 self.testvifsComplete()
3118 log.debug("XendDomainInfo.resumeDomain: devices released")
3120 self._resetChannels()
3122 self._removeDom('control/shutdown')
3123 self._removeDom('device-misc/vif/nextDeviceID')
3125 self._createChannels()
3126 self._introduceDomain()
3127 self._storeDomDetails()
3129 self._createDevices()
3130 log.debug("XendDomainInfo.resumeDomain: devices created")
3132 xc.domain_resume(self.domid, fast)
3133 ResumeDomain(self.domid)
3134 except:
3135 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
3136 self.image.resumeDeviceModel()
3137 log.debug("XendDomainInfo.resumeDomain: completed")
3141 # Channels for xenstore and console
3144 def _createChannels(self):
3145 """Create the channels to the domain.
3146 """
3147 self.store_port = self._createChannel()
3148 self.console_port = self._createChannel()
3151 def _createChannel(self):
3152 """Create an event channel to the domain.
3153 """
3154 try:
3155 if self.domid != None:
3156 return xc.evtchn_alloc_unbound(domid = self.domid,
3157 remote_dom = 0)
3158 except:
3159 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
3160 raise
3162 def _resetChannels(self):
3163 """Reset all event channels in the domain.
3164 """
3165 try:
3166 if self.domid != None:
3167 return xc.evtchn_reset(dom = self.domid)
3168 except:
3169 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
3170 raise
3174 # Bootloader configuration
3177 def _configureBootloader(self):
3178 """Run the bootloader if we're configured to do so."""
3180 blexec = self.info['PV_bootloader']
3181 bootloader_args = self.info['PV_bootloader_args']
3182 kernel = self.info['PV_kernel']
3183 ramdisk = self.info['PV_ramdisk']
3184 args = self.info['PV_args']
3185 boot = self.info['HVM_boot_policy']
3187 if boot:
3188 # HVM booting.
3189 pass
3190 elif not blexec and kernel:
3191 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
3192 # will be picked up by image.py.
3193 pass
3194 else:
3195 # Boot using bootloader
3196 if not blexec or blexec == 'pygrub':
3197 blexec = auxbin.pathTo('pygrub')
3199 blcfg = None
3200 disks = [x for x in self.info['vbd_refs']
3201 if self.info['devices'][x][1]['bootable']]
3203 if not disks:
3204 msg = "Had a bootloader specified, but no disks are bootable"
3205 log.error(msg)
3206 raise VmError(msg)
3208 devinfo = self.info['devices'][disks[0]]
3209 devtype = devinfo[0]
3210 disk = devinfo[1]['uname']
3212 fn = blkdev_uname_to_file(disk)
3214 # If this is a drbd volume, check if we need to activate it
3215 if disk.find(":") != -1:
3216 (disktype, diskname) = disk.split(':', 1)
3217 if disktype == 'drbd':
3218 (drbdadmstdin, drbdadmstdout) = os.popen2(["/sbin/drbdadm", "state", diskname])
3219 (state, junk) = drbdadmstdout.readline().split('/', 1)
3220 if state == 'Secondary':
3221 os.system('/sbin/drbdadm primary ' + diskname)
3223 taptype = blkdev_uname_to_taptype(disk)
3224 mounted = devtype in ['tap', 'tap2'] and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
3225 if mounted:
3226 # This is a file, not a device. pygrub can cope with a
3227 # file if it's raw, but if it's QCOW or other such formats
3228 # used through blktap, then we need to mount it first.
3230 log.info("Mounting %s on %s." %
3231 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3233 vbd = {
3234 'mode': 'RO',
3235 'device': BOOTLOADER_LOOPBACK_DEVICE,
3238 from xen.xend import XendDomain
3239 dom0 = XendDomain.instance().privilegedDomain()
3240 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
3241 fn = BOOTLOADER_LOOPBACK_DEVICE
3243 try:
3244 blcfg = bootloader(blexec, fn, self, False,
3245 bootloader_args, kernel, ramdisk, args)
3246 finally:
3247 if mounted:
3248 log.info("Unmounting %s from %s." %
3249 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3251 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
3253 if blcfg is None:
3254 msg = "Had a bootloader specified, but can't find disk"
3255 log.error(msg)
3256 raise VmError(msg)
3258 self.info.update_with_image_sxp(blcfg, True)
3262 # VM Functions
3265 def _readVMDetails(self, params):
3266 """Read the specified parameters from the store.
3267 """
3268 try:
3269 return self._gatherVm(*params)
3270 except ValueError:
3271 # One of the int/float entries in params has a corresponding store
3272 # entry that is invalid. We recover, because older versions of
3273 # Xend may have put the entry there (memory/target, for example),
3274 # but this is in general a bad situation to have reached.
3275 log.exception(
3276 "Store corrupted at %s! Domain %d's configuration may be "
3277 "affected.", self.vmpath, self.domid)
3278 return []
3280 def _cleanupVm(self):
3281 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
3283 self._unwatchVm()
3285 try:
3286 self._removeVm()
3287 except:
3288 log.exception("Removing VM path failed.")
3291 def checkLiveMigrateMemory(self):
3292 """ Make sure there's enough memory to migrate this domain """
3293 overhead_kb = 0
3294 if arch.type == "x86":
3295 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3296 # the minimum that Xen would allocate if no value were given.
3297 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3298 (self.info['memory_static_max'] / 1024 / 1024) * 4
3299 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3300 # The domain might already have some shadow memory
3301 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3302 if overhead_kb > 0:
3303 balloon.free(overhead_kb, self)
3305 def _unwatchVm(self):
3306 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3307 guarantee."""
3308 try:
3309 try:
3310 if self.vmWatch:
3311 self.vmWatch.unwatch()
3312 finally:
3313 self.vmWatch = None
3314 except:
3315 log.exception("Unwatching VM path failed.")
3317 def testDeviceComplete(self):
3318 """ For Block IO migration safety we must ensure that
3319 the device has shutdown correctly, i.e. all blocks are
3320 flushed to disk
3321 """
3322 start = time.time()
3323 while True:
3324 test = 0
3325 diff = time.time() - start
3326 vbds = self.getDeviceController('vbd').deviceIDs()
3327 taps = self.getDeviceController('tap').deviceIDs()
3328 tap2s = self.getDeviceController('tap2').deviceIDs()
3329 for i in vbds + taps + tap2s:
3330 test = 1
3331 log.info("Dev %s still active, looping...", i)
3332 time.sleep(0.1)
3334 if test == 0:
3335 break
3336 if diff >= MIGRATE_TIMEOUT:
3337 log.info("Dev still active but hit max loop timeout")
3338 break
3340 def testvifsComplete(self):
3341 """ In case vifs are released and then created for the same
3342 domain, we need to wait the device shut down.
3343 """
3344 start = time.time()
3345 while True:
3346 test = 0
3347 diff = time.time() - start
3348 for i in self.getDeviceController('vif').deviceIDs():
3349 test = 1
3350 log.info("Dev %s still active, looping...", i)
3351 time.sleep(0.1)
3353 if test == 0:
3354 break
3355 if diff >= MIGRATE_TIMEOUT:
3356 log.info("Dev still active but hit max loop timeout")
3357 break
3359 def _storeVmDetails(self):
3360 to_store = {}
3362 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3363 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3364 if self._infoIsSet(info_key):
3365 to_store[key] = str(self.info[info_key])
3367 if self._infoIsSet("static_memory_min"):
3368 to_store["memory"] = str(self.info["static_memory_min"])
3369 if self._infoIsSet("static_memory_max"):
3370 to_store["maxmem"] = str(self.info["static_memory_max"])
3372 image_sxpr = self.info.image_sxpr()
3373 if image_sxpr:
3374 to_store['image'] = sxp.to_string(image_sxpr)
3376 if not self._readVm('xend/restart_count'):
3377 to_store['xend/restart_count'] = str(0)
3379 log.debug("Storing VM details: %s", scrub_password(to_store))
3381 self._writeVm(to_store)
3382 self._setVmPermissions()
3384 def _setVmPermissions(self):
3385 """Allow the guest domain to read its UUID. We don't allow it to
3386 access any other entry, for security."""
3387 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3388 { 'dom' : self.domid,
3389 'read' : True,
3390 'write' : False })
3393 # Utility functions
3396 def __getattr__(self, name):
3397 if name == "state":
3398 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3399 log.warn("".join(traceback.format_stack()))
3400 return self._stateGet()
3401 else:
3402 raise AttributeError(name)
3404 def __setattr__(self, name, value):
3405 if name == "state":
3406 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3407 log.warn("".join(traceback.format_stack()))
3408 self._stateSet(value)
3409 else:
3410 self.__dict__[name] = value
3412 def _stateSet(self, state):
3413 self.state_updated.acquire()
3414 try:
3415 # TODO Not sure this is correct...
3416 # _stateGet is live now. Why not fire event
3417 # even when it hasn't changed?
3418 if self._stateGet() != state:
3419 self.state_updated.notifyAll()
3420 import XendAPI
3421 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3422 'power_state')
3423 finally:
3424 self.state_updated.release()
3426 def _stateGet(self):
3427 # Lets try and reconsitute the state from xc
3428 # first lets try and get the domain info
3429 # from xc - this will tell us if the domain
3430 # exists
3431 info = dom_get(self.getDomid())
3432 if info is None or info['shutdown']:
3433 # We are either HALTED or SUSPENDED
3434 # check saved image exists
3435 from xen.xend import XendDomain
3436 managed_config_path = \
3437 XendDomain.instance()._managed_check_point_path( \
3438 self.get_uuid())
3439 if os.path.exists(managed_config_path):
3440 return XEN_API_VM_POWER_STATE_SUSPENDED
3441 else:
3442 return XEN_API_VM_POWER_STATE_HALTED
3443 elif info['crashed']:
3444 # Crashed
3445 return XEN_API_VM_POWER_STATE_CRASHED
3446 else:
3447 # We are either RUNNING or PAUSED
3448 if info['paused']:
3449 return XEN_API_VM_POWER_STATE_PAUSED
3450 else:
3451 return XEN_API_VM_POWER_STATE_RUNNING
3453 def _infoIsSet(self, name):
3454 return name in self.info and self.info[name] is not None
3456 def _checkName(self, name):
3457 """Check if a vm name is valid. Valid names contain alphabetic
3458 characters, digits, or characters in '_-.:/+'.
3459 The same name cannot be used for more than one vm at the same time.
3461 @param name: name
3462 @raise: VmError if invalid
3463 """
3464 from xen.xend import XendDomain
3466 if name is None or name == '':
3467 raise VmError('Missing VM Name')
3469 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3470 raise VmError('Invalid VM Name')
3472 dom = XendDomain.instance().domain_lookup_nr(name)
3473 if dom and dom.info['uuid'] != self.info['uuid']:
3474 raise VmError("VM name '%s' already exists%s" %
3475 (name,
3476 dom.domid is not None and
3477 (" as domain %s" % str(dom.domid)) or ""))
3480 def update(self, info = None, refresh = True, transaction = None):
3481 """Update with info from xc.domain_getinfo().
3482 """
3483 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3484 str(self.domid))
3486 if not info:
3487 info = dom_get(self.domid)
3488 if not info:
3489 return
3491 if info["maxmem_kb"] < 0:
3492 info["maxmem_kb"] = XendNode.instance() \
3493 .physinfo_dict()['total_memory'] * 1024
3495 # make sure state is reset for info
3496 # TODO: we should eventually get rid of old_dom_states
3498 self.info.update_config(info)
3499 self._update_consoles(transaction)
3501 if refresh:
3502 self.refreshShutdown(info)
3504 log.trace("XendDomainInfo.update done on domain %s: %s",
3505 str(self.domid), self.info)
3507 def sxpr(self, ignore_store = False, legacy_only = True):
3508 result = self.info.to_sxp(domain = self,
3509 ignore_devices = ignore_store,
3510 legacy_only = legacy_only)
3512 return result
3514 # Xen API
3515 # ----------------------------------------------------------------
3517 def get_uuid(self):
3518 dom_uuid = self.info.get('uuid')
3519 if not dom_uuid: # if it doesn't exist, make one up
3520 dom_uuid = uuid.createString()
3521 self.info['uuid'] = dom_uuid
3522 return dom_uuid
3524 def get_memory_static_max(self):
3525 return self.info.get('memory_static_max', 0)
3526 def get_memory_static_min(self):
3527 return self.info.get('memory_static_min', 0)
3528 def get_memory_dynamic_max(self):
3529 return self.info.get('memory_dynamic_max', 0)
3530 def get_memory_dynamic_min(self):
3531 return self.info.get('memory_dynamic_min', 0)
3533 # only update memory-related config values if they maintain sanity
3534 def _safe_set_memory(self, key, newval):
3535 oldval = self.info.get(key, 0)
3536 try:
3537 self.info[key] = newval
3538 self.info._memory_sanity_check()
3539 except Exception, ex:
3540 self.info[key] = oldval
3541 raise
3543 def set_memory_static_max(self, val):
3544 self._safe_set_memory('memory_static_max', val)
3545 def set_memory_static_min(self, val):
3546 self._safe_set_memory('memory_static_min', val)
3547 def set_memory_dynamic_max(self, val):
3548 self._safe_set_memory('memory_dynamic_max', val)
3549 def set_memory_dynamic_min(self, val):
3550 self._safe_set_memory('memory_dynamic_min', val)
3552 def get_vcpus_params(self):
3553 if self.getDomid() is None:
3554 return self.info['vcpus_params']
3556 retval = xc.sched_credit_domain_get(self.getDomid())
3557 return retval
3558 def get_power_state(self):
3559 return XEN_API_VM_POWER_STATE[self._stateGet()]
3560 def get_platform(self):
3561 return self.info.get('platform', {})
3562 def get_pci_bus(self):
3563 return self.info.get('pci_bus', '')
3564 def get_tools_version(self):
3565 return self.info.get('tools_version', {})
3566 def get_metrics(self):
3567 return self.metrics.get_uuid();
3570 def get_security_label(self, xspol=None):
3571 import xen.util.xsm.xsm as security
3572 label = security.get_security_label(self, xspol)
3573 return label
3575 def set_security_label(self, seclab, old_seclab, xspol=None,
3576 xspol_old=None):
3577 """
3578 Set the security label of a domain from its old to
3579 a new value.
3580 @param seclab New security label formatted in the form
3581 <policy type>:<policy name>:<vm label>
3582 @param old_seclab The current security label that the
3583 VM must have.
3584 @param xspol An optional policy under which this
3585 update should be done. If not given,
3586 then the current active policy is used.
3587 @param xspol_old The old policy; only to be passed during
3588 the updating of a policy
3589 @return Returns return code, a string with errors from
3590 the hypervisor's operation, old label of the
3591 domain
3592 """
3593 rc = 0
3594 errors = ""
3595 old_label = ""
3596 new_ssidref = 0
3597 domid = self.getDomid()
3598 res_labels = None
3599 is_policy_update = (xspol_old != None)
3601 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3603 state = self._stateGet()
3604 # Relabel only HALTED or RUNNING or PAUSED domains
3605 if domid != 0 and \
3606 state not in \
3607 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3608 DOM_STATE_SUSPENDED ]:
3609 log.warn("Relabeling domain not possible in state '%s'" %
3610 DOM_STATES[state])
3611 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3613 # Remove security label. Works only for halted or suspended domains
3614 if not seclab or seclab == "":
3615 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3616 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3618 if self.info.has_key('security_label'):
3619 old_label = self.info['security_label']
3620 # Check label against expected one.
3621 if old_label != old_seclab:
3622 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3623 del self.info['security_label']
3624 xen.xend.XendDomain.instance().managed_config_save(self)
3625 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3627 tmp = seclab.split(":")
3628 if len(tmp) != 3:
3629 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3630 typ, policy, label = tmp
3632 poladmin = XSPolicyAdminInstance()
3633 if not xspol:
3634 xspol = poladmin.get_policy_by_name(policy)
3636 try:
3637 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3639 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3640 #if domain is running or paused try to relabel in hypervisor
3641 if not xspol:
3642 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3644 if typ != xspol.get_type_name() or \
3645 policy != xspol.get_name():
3646 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3648 if typ == xsconstants.ACM_POLICY_ID:
3649 new_ssidref = xspol.vmlabel_to_ssidref(label)
3650 if new_ssidref == xsconstants.INVALID_SSIDREF:
3651 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3653 # Check that all used resources are accessible under the
3654 # new label
3655 if not is_policy_update and \
3656 not security.resources_compatible_with_vmlabel(xspol,
3657 self, label):
3658 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3660 #Check label against expected one. Can only do this
3661 # if the policy hasn't changed underneath in the meantime
3662 if xspol_old == None:
3663 old_label = self.get_security_label()
3664 if old_label != old_seclab:
3665 log.info("old_label != old_seclab: %s != %s" %
3666 (old_label, old_seclab))
3667 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3669 # relabel domain in the hypervisor
3670 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3671 log.info("rc from relabeling in HV: %d" % rc)
3672 else:
3673 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3675 if rc == 0:
3676 # HALTED, RUNNING or PAUSED
3677 if domid == 0:
3678 if xspol:
3679 self.info['security_label'] = seclab
3680 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3681 else:
3682 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3683 else:
3684 if self.info.has_key('security_label'):
3685 old_label = self.info['security_label']
3686 # Check label against expected one, unless wildcard
3687 if old_label != old_seclab:
3688 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3690 self.info['security_label'] = seclab
3692 try:
3693 xen.xend.XendDomain.instance().managed_config_save(self)
3694 except:
3695 pass
3696 return (rc, errors, old_label, new_ssidref)
3697 finally:
3698 xen.xend.XendDomain.instance().policy_lock.release()
3700 def get_on_shutdown(self):
3701 after_shutdown = self.info.get('actions_after_shutdown')
3702 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3703 return XEN_API_ON_NORMAL_EXIT[-1]
3704 return after_shutdown
3706 def get_on_reboot(self):
3707 after_reboot = self.info.get('actions_after_reboot')
3708 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3709 return XEN_API_ON_NORMAL_EXIT[-1]
3710 return after_reboot
3712 def get_on_suspend(self):
3713 # TODO: not supported
3714 after_suspend = self.info.get('actions_after_suspend')
3715 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3716 return XEN_API_ON_NORMAL_EXIT[-1]
3717 return after_suspend
3719 def get_on_crash(self):
3720 after_crash = self.info.get('actions_after_crash')
3721 if not after_crash or after_crash not in \
3722 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3723 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3724 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3726 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3727 """ Get's a device configuration either from XendConfig or
3728 from the DevController.
3730 @param dev_class: device class, either, 'vbd' or 'vif'
3731 @param dev_uuid: device UUID
3733 @rtype: dictionary
3734 """
3735 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3737 # shortcut if the domain isn't started because
3738 # the devcontrollers will have no better information
3739 # than XendConfig.
3740 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3741 XEN_API_VM_POWER_STATE_SUSPENDED):
3742 if dev_config:
3743 return copy.deepcopy(dev_config)
3744 return None
3746 # instead of using dev_class, we use the dev_type
3747 # that is from XendConfig.
3748 controller = self.getDeviceController(dev_type)
3749 if not controller:
3750 return None
3752 all_configs = controller.getAllDeviceConfigurations()
3753 if not all_configs:
3754 return None
3756 updated_dev_config = copy.deepcopy(dev_config)
3757 for _devid, _devcfg in all_configs.items():
3758 if _devcfg.get('uuid') == dev_uuid:
3759 updated_dev_config.update(_devcfg)
3760 updated_dev_config['id'] = _devid
3761 return updated_dev_config
3763 return updated_dev_config
3765 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3766 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3767 if not config:
3768 return {}
3770 config['VM'] = self.get_uuid()
3772 if dev_class == 'vif':
3773 if not config.has_key('name'):
3774 config['name'] = config.get('vifname', '')
3775 if not config.has_key('MAC'):
3776 config['MAC'] = config.get('mac', '')
3777 if not config.has_key('type'):
3778 config['type'] = 'paravirtualised'
3779 if not config.has_key('device'):
3780 devid = config.get('id')
3781 if devid != None:
3782 config['device'] = 'eth%s' % devid
3783 else:
3784 config['device'] = ''
3786 if not config.has_key('network'):
3787 try:
3788 bridge = config.get('bridge', None)
3789 if bridge is None:
3790 from xen.util import Brctl
3791 if_to_br = dict([(i,b)
3792 for (b,ifs) in Brctl.get_state().items()
3793 for i in ifs])
3794 vifname = "vif%s.%s" % (self.getDomid(),
3795 config.get('id'))
3796 bridge = if_to_br.get(vifname, None)
3797 config['network'] = \
3798 XendNode.instance().bridge_to_network(
3799 config.get('bridge')).get_uuid()
3800 except Exception:
3801 log.exception('bridge_to_network')
3802 # Ignore this for now -- it may happen if the device
3803 # has been specified using the legacy methods, but at
3804 # some point we're going to have to figure out how to
3805 # handle that properly.
3807 config['MTU'] = 1500 # TODO
3809 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3810 xennode = XendNode.instance()
3811 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3812 config['io_read_kbs'] = rx_bps/1024
3813 config['io_write_kbs'] = tx_bps/1024
3814 rx, tx = xennode.get_vif_stat(self.domid, devid)
3815 config['io_total_read_kbs'] = rx/1024
3816 config['io_total_write_kbs'] = tx/1024
3817 else:
3818 config['io_read_kbs'] = 0.0
3819 config['io_write_kbs'] = 0.0
3820 config['io_total_read_kbs'] = 0.0
3821 config['io_total_write_kbs'] = 0.0
3823 config['security_label'] = config.get('security_label', '')
3825 if dev_class == 'vbd':
3827 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3828 controller = self.getDeviceController(dev_class)
3829 devid, _1, _2 = controller.getDeviceDetails(config)
3830 xennode = XendNode.instance()
3831 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3832 config['io_read_kbs'] = rd_blkps
3833 config['io_write_kbs'] = wr_blkps
3834 else:
3835 config['io_read_kbs'] = 0.0
3836 config['io_write_kbs'] = 0.0
3838 config['VDI'] = config.get('VDI', '')
3839 config['device'] = config.get('dev', '')
3840 if config['device'].startswith('ioemu:'):
3841 _, vbd_device = config['device'].split(':', 1)
3842 config['device'] = vbd_device
3843 if ':' in config['device']:
3844 vbd_name, vbd_type = config['device'].split(':', 1)
3845 config['device'] = vbd_name
3846 if vbd_type == 'cdrom':
3847 config['type'] = XEN_API_VBD_TYPE[0]
3848 else:
3849 config['type'] = XEN_API_VBD_TYPE[1]
3851 config['driver'] = 'paravirtualised' # TODO
3852 config['image'] = config.get('uname', '')
3854 if config.get('mode', 'r') == 'r':
3855 config['mode'] = 'RO'
3856 else:
3857 config['mode'] = 'RW'
3859 if dev_class == 'vtpm':
3860 if not config.has_key('type'):
3861 config['type'] = 'paravirtualised' # TODO
3862 if not config.has_key('backend'):
3863 config['backend'] = "00000000-0000-0000-0000-000000000000"
3865 return config
3867 def get_dev_property(self, dev_class, dev_uuid, field):
3868 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3869 try:
3870 return config[field]
3871 except KeyError:
3872 raise XendError('Invalid property for device: %s' % field)
3874 def set_dev_property(self, dev_class, dev_uuid, field, value):
3875 self.info['devices'][dev_uuid][1][field] = value
3877 def get_vcpus_util(self):
3878 vcpu_util = {}
3879 xennode = XendNode.instance()
3880 if 'VCPUs_max' in self.info and self.domid != None:
3881 for i in range(0, self.info['VCPUs_max']):
3882 util = xennode.get_vcpu_util(self.domid, i)
3883 vcpu_util[str(i)] = util
3885 return vcpu_util
3887 def get_consoles(self):
3888 return self.info.get('console_refs', [])
3890 def get_vifs(self):
3891 return self.info.get('vif_refs', [])
3893 def get_vbds(self):
3894 return self.info.get('vbd_refs', [])
3896 def get_vtpms(self):
3897 return self.info.get('vtpm_refs', [])
3899 def get_dpcis(self):
3900 return XendDPCI.get_by_VM(self.info.get('uuid'))
3902 def get_dscsis(self):
3903 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3905 def get_dscsi_HBAs(self):
3906 return XendDSCSI_HBA.get_by_VM(self.info.get('uuid'))
3908 def create_vbd(self, xenapi_vbd, vdi_image_path):
3909 """Create a VBD using a VDI from XendStorageRepository.
3911 @param xenapi_vbd: vbd struct from the Xen API
3912 @param vdi_image_path: VDI UUID
3913 @rtype: string
3914 @return: uuid of the device
3915 """
3916 xenapi_vbd['image'] = vdi_image_path
3917 if vdi_image_path.startswith('tap'):
3918 dev_uuid = self.info.device_add('tap2', cfg_xenapi = xenapi_vbd)
3919 else:
3920 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3922 if not dev_uuid:
3923 raise XendError('Failed to create device')
3925 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3926 XEN_API_VM_POWER_STATE_PAUSED):
3927 _, config = self.info['devices'][dev_uuid]
3929 if vdi_image_path.startswith('tap'):
3930 dev_control = self.getDeviceController('tap2')
3931 else:
3932 dev_control = self.getDeviceController('vbd')
3934 try:
3935 devid = dev_control.createDevice(config)
3936 dev_type = self.getBlockDeviceClass(devid)
3937 self._waitForDevice(dev_type, devid)
3938 self.info.device_update(dev_uuid,
3939 cfg_xenapi = {'devid': devid})
3940 except Exception, exn:
3941 log.exception(exn)
3942 del self.info['devices'][dev_uuid]
3943 self.info['vbd_refs'].remove(dev_uuid)
3944 raise
3946 return dev_uuid
3948 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3949 """Create a VBD using a VDI from XendStorageRepository.
3951 @param xenapi_vbd: vbd struct from the Xen API
3952 @param vdi_image_path: VDI UUID
3953 @rtype: string
3954 @return: uuid of the device
3955 """
3956 xenapi_vbd['image'] = vdi_image_path
3957 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3958 if not dev_uuid:
3959 raise XendError('Failed to create device')
3961 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3962 _, config = self.info['devices'][dev_uuid]
3963 config['devid'] = self.getDeviceController('tap').createDevice(config)
3965 return config['devid']
3967 def create_vif(self, xenapi_vif):
3968 """Create VIF device from the passed struct in Xen API format.
3970 @param xenapi_vif: Xen API VIF Struct.
3971 @rtype: string
3972 @return: UUID
3973 """
3974 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3975 if not dev_uuid:
3976 raise XendError('Failed to create device')
3978 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3979 XEN_API_VM_POWER_STATE_PAUSED):
3981 _, config = self.info['devices'][dev_uuid]
3982 dev_control = self.getDeviceController('vif')
3984 try:
3985 devid = dev_control.createDevice(config)
3986 dev_control.waitForDevice(devid)
3987 self.info.device_update(dev_uuid,
3988 cfg_xenapi = {'devid': devid})
3989 except Exception, exn:
3990 log.exception(exn)
3991 del self.info['devices'][dev_uuid]
3992 self.info['vif_refs'].remove(dev_uuid)
3993 raise
3995 return dev_uuid
3997 def create_vtpm(self, xenapi_vtpm):
3998 """Create a VTPM device from the passed struct in Xen API format.
4000 @return: uuid of the device
4001 @rtype: string
4002 """
4004 if self._stateGet() not in (DOM_STATE_HALTED,):
4005 raise VmError("Can only add vTPM to a halted domain.")
4006 if self.get_vtpms() != []:
4007 raise VmError('Domain already has a vTPM.')
4008 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
4009 if not dev_uuid:
4010 raise XendError('Failed to create device')
4012 return dev_uuid
4014 def create_console(self, xenapi_console):
4015 """ Create a console device from a Xen API struct.
4017 @return: uuid of device
4018 @rtype: string
4019 """
4020 if self._stateGet() not in (DOM_STATE_HALTED,):
4021 raise VmError("Can only add console to a halted domain.")
4023 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
4024 if not dev_uuid:
4025 raise XendError('Failed to create device')
4027 return dev_uuid
4029 def set_console_other_config(self, console_uuid, other_config):
4030 self.info.console_update(console_uuid, 'other_config', other_config)
4032 def create_dpci(self, xenapi_pci):
4033 """Create pci device from the passed struct in Xen API format.
4035 @param xenapi_pci: DPCI struct from Xen API
4036 @rtype: bool
4037 #@rtype: string
4038 @return: True if successfully created device
4039 #@return: UUID
4040 """
4042 dpci_uuid = uuid.createString()
4044 dpci_opts = []
4045 opts_dict = xenapi_pci.get('options')
4046 for k in opts_dict.keys():
4047 dpci_opts.append([k, opts_dict[k]])
4048 opts_sxp = pci_opts_list_to_sxp(dpci_opts)
4050 # Convert xenapi to sxp
4051 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
4053 dev_sxp = ['dev',
4054 ['domain', '0x%02x' % ppci.get_domain()],
4055 ['bus', '0x%02x' % ppci.get_bus()],
4056 ['slot', '0x%02x' % ppci.get_slot()],
4057 ['func', '0x%1x' % ppci.get_func()],
4058 ['vdevfn', '0x%02x' % xenapi_pci.get('hotplug_slot')],
4059 ['key', xenapi_pci['key']],
4060 ['uuid', dpci_uuid]]
4061 dev_sxp = sxp.merge(dev_sxp, opts_sxp)
4063 target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
4065 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4067 old_pci_sxp = self._getDeviceInfo_pci(0)
4069 if old_pci_sxp is None:
4070 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
4071 if not dev_uuid:
4072 raise XendError('Failed to create device')
4074 else:
4075 new_pci_sxp = ['pci']
4076 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
4077 new_pci_sxp.append(existing_dev)
4078 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
4080 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4081 self.info.device_update(dev_uuid, new_pci_sxp)
4083 xen.xend.XendDomain.instance().managed_config_save(self)
4085 else:
4086 try:
4087 self.device_configure(target_pci_sxp)
4089 except Exception, exn:
4090 raise XendError('Failed to create device')
4092 return dpci_uuid
4094 def create_dscsi(self, xenapi_dscsi):
4095 """Create scsi device from the passed struct in Xen API format.
4097 @param xenapi_dscsi: DSCSI struct from Xen API
4098 @rtype: string
4099 @return: UUID
4100 """
4102 dscsi_uuid = uuid.createString()
4104 # Convert xenapi to sxp
4105 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
4106 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
4107 target_vscsi_sxp = \
4108 ['vscsi',
4109 ['dev',
4110 ['devid', devid],
4111 ['p-devname', pscsi.get_dev_name()],
4112 ['p-dev', pscsi.get_physical_HCTL()],
4113 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
4114 ['state', xenbusState['Initialising']],
4115 ['uuid', dscsi_uuid]
4116 ],
4117 ['feature-host', 0]
4120 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4122 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4124 if cur_vscsi_sxp is None:
4125 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
4126 if not dev_uuid:
4127 raise XendError('Failed to create device')
4129 else:
4130 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4131 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
4132 new_vscsi_sxp.append(existing_dev)
4133 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
4135 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4136 self.info.device_update(dev_uuid, new_vscsi_sxp)
4138 xen.xend.XendDomain.instance().managed_config_save(self)
4140 else:
4141 try:
4142 self.device_configure(target_vscsi_sxp)
4143 except Exception, exn:
4144 log.exception('create_dscsi: %s', exn)
4145 raise XendError('Failed to create device')
4147 return dscsi_uuid
4149 def create_dscsi_HBA(self, xenapi_dscsi):
4150 """Create scsi devices from the passed struct in Xen API format.
4152 @param xenapi_dscsi: DSCSI_HBA struct from Xen API
4153 @rtype: string
4154 @return: UUID
4155 """
4157 dscsi_HBA_uuid = uuid.createString()
4159 # Convert xenapi to sxp
4160 feature_host = xenapi_dscsi.get('assignment_mode', 'HOST') == 'HOST' and 1 or 0
4161 target_vscsi_sxp = \
4162 ['vscsi',
4163 ['feature-host', feature_host],
4164 ['uuid', dscsi_HBA_uuid],
4166 pscsi_HBA = XendAPIStore.get(xenapi_dscsi.get('PSCSI_HBA'), 'PSCSI_HBA')
4167 devid = pscsi_HBA.get_physical_host()
4168 for pscsi_uuid in pscsi_HBA.get_PSCSIs():
4169 pscsi = XendAPIStore.get(pscsi_uuid, 'PSCSI')
4170 pscsi_HCTL = pscsi.get_physical_HCTL()
4171 dscsi_uuid = uuid.createString()
4172 dev = \
4173 ['dev',
4174 ['devid', devid],
4175 ['p-devname', pscsi.get_dev_name()],
4176 ['p-dev', pscsi_HCTL],
4177 ['v-dev', pscsi_HCTL],
4178 ['state', xenbusState['Initialising']],
4179 ['uuid', dscsi_uuid]
4181 target_vscsi_sxp.append(dev)
4183 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4184 if not self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp):
4185 raise XendError('Failed to create device')
4186 xen.xend.XendDomain.instance().managed_config_save(self)
4187 else:
4188 try:
4189 self.device_configure(target_vscsi_sxp)
4190 except Exception, exn:
4191 log.exception('create_dscsi_HBA: %s', exn)
4192 raise XendError('Failed to create device')
4194 return dscsi_HBA_uuid
4197 def change_vdi_of_vbd(self, xenapi_vbd, vdi_image_path):
4198 """Change current VDI with the new VDI.
4200 @param xenapi_vbd: vbd struct from the Xen API
4201 @param vdi_image_path: path of VDI
4202 """
4203 dev_uuid = xenapi_vbd['uuid']
4204 if dev_uuid not in self.info['devices']:
4205 raise XendError('Device does not exist')
4207 # Convert xenapi to sxp
4208 if vdi_image_path.startswith('tap'):
4209 dev_class = 'tap'
4210 else:
4211 dev_class = 'vbd'
4212 dev_sxp = [
4213 dev_class,
4214 ['uuid', dev_uuid],
4215 ['uname', vdi_image_path],
4216 ['dev', '%s:cdrom' % xenapi_vbd['device']],
4217 ['mode', 'r'],
4218 ['VDI', xenapi_vbd['VDI']]
4221 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4222 XEN_API_VM_POWER_STATE_PAUSED):
4223 self.device_configure(dev_sxp)
4224 else:
4225 self.info.device_update(dev_uuid, dev_sxp)
4228 def destroy_device_by_uuid(self, dev_type, dev_uuid):
4229 if dev_uuid not in self.info['devices']:
4230 raise XendError('Device does not exist')
4232 try:
4233 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4234 XEN_API_VM_POWER_STATE_PAUSED):
4235 _, config = self.info['devices'][dev_uuid]
4236 devid = config.get('devid')
4237 if devid != None:
4238 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
4239 else:
4240 raise XendError('Unable to get devid for device: %s:%s' %
4241 (dev_type, dev_uuid))
4242 finally:
4243 del self.info['devices'][dev_uuid]
4244 self.info['%s_refs' % dev_type].remove(dev_uuid)
4246 def destroy_vbd(self, dev_uuid):
4247 self.destroy_device_by_uuid('vbd', dev_uuid)
4249 def destroy_vif(self, dev_uuid):
4250 self.destroy_device_by_uuid('vif', dev_uuid)
4252 def destroy_vtpm(self, dev_uuid):
4253 self.destroy_device_by_uuid('vtpm', dev_uuid)
4255 def destroy_dpci(self, dev_uuid):
4257 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
4258 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
4260 old_pci_sxp = self._getDeviceInfo_pci(0)
4261 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4262 target_dev = None
4263 new_pci_sxp = ['pci']
4264 for dev in sxp.children(old_pci_sxp, 'dev'):
4265 pci_dev = {}
4266 pci_dev['domain'] = sxp.child_value(dev, 'domain')
4267 pci_dev['bus'] = sxp.child_value(dev, 'bus')
4268 pci_dev['slot'] = sxp.child_value(dev, 'slot')
4269 pci_dev['func'] = sxp.child_value(dev, 'func')
4270 if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
4271 target_dev = dev
4272 else:
4273 new_pci_sxp.append(dev)
4275 if target_dev is None:
4276 raise XendError('Failed to destroy device')
4278 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
4280 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4282 self.info.device_update(dev_uuid, new_pci_sxp)
4283 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
4284 del self.info['devices'][dev_uuid]
4285 xen.xend.XendDomain.instance().managed_config_save(self)
4287 else:
4288 try:
4289 self.device_configure(target_pci_sxp)
4291 except Exception, exn:
4292 raise XendError('Failed to destroy device')
4294 def destroy_dscsi(self, dev_uuid):
4295 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
4296 devid = dscsi.get_virtual_host()
4297 vHCTL = dscsi.get_virtual_HCTL()
4298 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4299 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4301 target_dev = None
4302 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4303 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4304 if vHCTL == sxp.child_value(dev, 'v-dev'):
4305 target_dev = dev
4306 else:
4307 new_vscsi_sxp.append(dev)
4309 if target_dev is None:
4310 raise XendError('Failed to destroy device')
4312 target_dev.append(['state', xenbusState['Closing']])
4313 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
4315 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4317 self.info.device_update(dev_uuid, new_vscsi_sxp)
4318 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
4319 del self.info['devices'][dev_uuid]
4320 xen.xend.XendDomain.instance().managed_config_save(self)
4322 else:
4323 try:
4324 self.device_configure(target_vscsi_sxp)
4325 except Exception, exn:
4326 log.exception('destroy_dscsi: %s', exn)
4327 raise XendError('Failed to destroy device')
4329 def destroy_dscsi_HBA(self, dev_uuid):
4330 dscsi_HBA = XendAPIStore.get(dev_uuid, 'DSCSI_HBA')
4331 devid = dscsi_HBA.get_virtual_host()
4332 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4333 feature_host = sxp.child_value(cur_vscsi_sxp, 'feature-host')
4335 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4336 new_vscsi_sxp = ['vscsi', ['feature-host', feature_host]]
4337 self.info.device_update(dev_uuid, new_vscsi_sxp)
4338 del self.info['devices'][dev_uuid]
4339 xen.xend.XendDomain.instance().managed_config_save(self)
4340 else:
4341 # If feature_host is 1, all devices are destroyed by just
4342 # one reconfiguration.
4343 # If feature_host is 0, we should reconfigure all devices
4344 # one-by-one to destroy all devices.
4345 # See reconfigureDevice@VSCSIController.
4346 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4347 target_vscsi_sxp = [
4348 'vscsi',
4349 dev + [['state', xenbusState['Closing']]],
4350 ['feature-host', feature_host]
4352 try:
4353 self.device_configure(target_vscsi_sxp)
4354 except Exception, exn:
4355 log.exception('destroy_dscsi_HBA: %s', exn)
4356 raise XendError('Failed to destroy device')
4357 if feature_host:
4358 break
4360 def destroy_xapi_instances(self):
4361 """Destroy Xen-API instances stored in XendAPIStore.
4362 """
4363 # Xen-API classes based on XendBase have their instances stored
4364 # in XendAPIStore. Cleanup these instances here, if they are supposed
4365 # to be destroyed when the parent domain is dead.
4367 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
4368 # XendBase and there's no need to remove them from XendAPIStore.
4370 from xen.xend import XendDomain
4371 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
4372 # domain still exists.
4373 return
4375 # Destroy the VMMetrics instance.
4376 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
4377 is not None:
4378 self.metrics.destroy()
4380 # Destroy DPCI instances.
4381 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
4382 XendAPIStore.deregister(dpci_uuid, "DPCI")
4384 # Destroy DSCSI instances.
4385 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
4386 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
4388 # Destroy DSCSI_HBA instances.
4389 for dscsi_HBA_uuid in XendDSCSI_HBA.get_by_VM(self.info.get('uuid')):
4390 XendAPIStore.deregister(dscsi_HBA_uuid, "DSCSI_HBA")
4392 def has_device(self, dev_class, dev_uuid):
4393 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
4395 def __str__(self):
4396 return '<domain id=%s name=%s memory=%s state=%s>' % \
4397 (str(self.domid), self.info['name_label'],
4398 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
4400 __repr__ = __str__