debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 20961:d28a351f0589

xend: Enlarge the memory balloon size for domain creation since shadow
pre-allocation size has changed from 1M to 4M.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 10 09:07:48 2010 +0000 (2010-02-10)
parents 2a775968c7a1
children 351a34c2fd48
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import stat
35 import traceback
36 from types import StringTypes
38 import xen.lowlevel.xc
39 from xen.util import asserts, auxbin
40 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
41 import xen.util.xsm.xsm as security
42 from xen.util import xsconstants
43 from xen.util import mkdir
44 from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
45 append_default_pci_opts, \
46 pci_dict_to_bdf_str, pci_dict_to_xc_str, \
47 pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
48 pci_dict_cmp, PCI_DEVFN, PCI_SLOT, PCI_FUNC, parse_hex
50 from xen.xend import balloon, sxp, uuid, image, arch
51 from xen.xend import XendOptions, XendNode, XendConfig
53 from xen.xend.XendConfig import scrub_password
54 from xen.xend.XendBootloader import bootloader, bootloader_tidy
55 from xen.xend.XendError import XendError, VmError
56 from xen.xend.XendDevices import XendDevices
57 from xen.xend.XendTask import XendTask
58 from xen.xend.xenstore.xstransact import xstransact, complete
59 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
60 from xen.xend.xenstore.xswatch import xswatch
61 from xen.xend.XendConstants import *
62 from xen.xend.XendAPIConstants import *
63 from xen.xend.server.DevConstants import xenbusState
64 from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString
66 from xen.xend.XendVMMetrics import XendVMMetrics
68 from xen.xend import XendAPIStore
69 from xen.xend.XendPPCI import XendPPCI
70 from xen.xend.XendDPCI import XendDPCI
71 from xen.xend.XendPSCSI import XendPSCSI
72 from xen.xend.XendDSCSI import XendDSCSI, XendDSCSI_HBA
74 MIGRATE_TIMEOUT = 30.0
75 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
77 xc = xen.lowlevel.xc.xc()
78 xoptions = XendOptions.instance()
80 log = logging.getLogger("xend.XendDomainInfo")
81 #log.setLevel(logging.TRACE)
84 def create(config):
85 """Creates and start a VM using the supplied configuration.
87 @param config: A configuration object involving lists of tuples.
88 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
90 @rtype: XendDomainInfo
91 @return: An up and running XendDomainInfo instance
92 @raise VmError: Invalid configuration or failure to start.
93 """
94 from xen.xend import XendDomain
95 domconfig = XendConfig.XendConfig(sxp_obj = config)
96 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
97 if othervm is None or othervm.domid is None:
98 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
99 if othervm is not None and othervm.domid is not None:
100 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
101 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
102 vm = XendDomainInfo(domconfig)
103 try:
104 vm.start()
105 except:
106 log.exception('Domain construction failed')
107 vm.destroy()
108 raise
110 return vm
112 def create_from_dict(config_dict):
113 """Creates and start a VM using the supplied configuration.
115 @param config_dict: An configuration dictionary.
117 @rtype: XendDomainInfo
118 @return: An up and running XendDomainInfo instance
119 @raise VmError: Invalid configuration or failure to start.
120 """
122 log.debug("XendDomainInfo.create_from_dict(%s)",
123 scrub_password(config_dict))
124 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
125 try:
126 vm.start()
127 except:
128 log.exception('Domain construction failed')
129 vm.destroy()
130 raise
131 return vm
133 def recreate(info, priv):
134 """Create the VM object for an existing domain. The domain must not
135 be dying, as the paths in the store should already have been removed,
136 and asking us to recreate them causes problems.
138 @param xeninfo: Parsed configuration
139 @type xeninfo: Dictionary
140 @param priv: Is a privileged domain (Dom 0)
141 @type priv: bool
143 @rtype: XendDomainInfo
144 @return: A up and running XendDomainInfo instance
145 @raise VmError: Invalid configuration.
146 @raise XendError: Errors with configuration.
147 """
149 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
151 assert not info['dying']
153 xeninfo = XendConfig.XendConfig(dominfo = info)
154 xeninfo['is_control_domain'] = priv
155 xeninfo['is_a_template'] = False
156 xeninfo['auto_power_on'] = False
157 domid = xeninfo['domid']
158 uuid1 = uuid.fromString(xeninfo['uuid'])
159 needs_reinitialising = False
161 dompath = GetDomainPath(domid)
162 if not dompath:
163 raise XendError('No domain path in store for existing '
164 'domain %d' % domid)
166 log.info("Recreating domain %d, UUID %s. at %s" %
167 (domid, xeninfo['uuid'], dompath))
169 # need to verify the path and uuid if not Domain-0
170 # if the required uuid and vm aren't set, then that means
171 # we need to recreate the dom with our own values
172 #
173 # NOTE: this is probably not desirable, really we should just
174 # abort or ignore, but there may be cases where xenstore's
175 # entry disappears (eg. xenstore-rm /)
176 #
177 try:
178 vmpath = xstransact.Read(dompath, "vm")
179 if not vmpath:
180 if not priv:
181 log.warn('/local/domain/%d/vm is missing. recreate is '
182 'confused, trying our best to recover' % domid)
183 needs_reinitialising = True
184 raise XendError('reinit')
186 uuid2_str = xstransact.Read(vmpath, "uuid")
187 if not uuid2_str:
188 log.warn('%s/uuid/ is missing. recreate is confused, '
189 'trying our best to recover' % vmpath)
190 needs_reinitialising = True
191 raise XendError('reinit')
193 uuid2 = uuid.fromString(uuid2_str)
194 if uuid1 != uuid2:
195 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
196 'Trying out best to recover' % domid)
197 needs_reinitialising = True
198 except XendError:
199 pass # our best shot at 'goto' in python :)
201 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
202 vmpath = vmpath)
204 if needs_reinitialising:
205 vm._recreateDom()
206 vm._removeVm()
207 vm._storeVmDetails()
208 vm._storeDomDetails()
210 vm.image = image.create(vm, vm.info)
211 vm.image.recreate()
213 vm._registerWatches()
214 vm.refreshShutdown(xeninfo)
216 # register the domain in the list
217 from xen.xend import XendDomain
218 XendDomain.instance().add_domain(vm)
220 return vm
223 def restore(config):
224 """Create a domain and a VM object to do a restore.
226 @param config: Domain SXP configuration
227 @type config: list of lists. (see C{create})
229 @rtype: XendDomainInfo
230 @return: A up and running XendDomainInfo instance
231 @raise VmError: Invalid configuration or failure to start.
232 @raise XendError: Errors with configuration.
233 """
235 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
236 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
237 resume = True)
238 try:
239 vm.resume()
240 return vm
241 except:
242 vm.destroy()
243 raise
245 def createDormant(domconfig):
246 """Create a dormant/inactive XenDomainInfo without creating VM.
247 This is for creating instances of persistent domains that are not
248 yet start.
250 @param domconfig: Parsed configuration
251 @type domconfig: XendConfig object
253 @rtype: XendDomainInfo
254 @return: A up and running XendDomainInfo instance
255 @raise XendError: Errors with configuration.
256 """
258 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
260 # domid does not make sense for non-running domains.
261 domconfig.pop('domid', None)
262 vm = XendDomainInfo(domconfig)
263 return vm
265 def domain_by_name(name):
266 """Get domain by name
268 @params name: Name of the domain
269 @type name: string
270 @return: XendDomainInfo or None
271 """
272 from xen.xend import XendDomain
273 return XendDomain.instance().domain_lookup_by_name_nr(name)
276 def shutdown_reason(code):
277 """Get a shutdown reason from a code.
279 @param code: shutdown code
280 @type code: int
281 @return: shutdown reason
282 @rtype: string
283 """
284 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
286 def dom_get(dom):
287 """Get info from xen for an existing domain.
289 @param dom: domain id
290 @type dom: int
291 @return: info or None
292 @rtype: dictionary
293 """
294 try:
295 domlist = xc.domain_getinfo(dom, 1)
296 if domlist and dom == domlist[0]['domid']:
297 return domlist[0]
298 except Exception, err:
299 # ignore missing domain
300 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
301 return None
303 from xen.xend.server.pciif import parse_pci_name, PciDevice,\
304 get_assigned_pci_devices, get_all_assigned_pci_devices
307 def do_FLR(domid, is_hvm):
308 dev_str_list = get_assigned_pci_devices(domid)
310 for dev_str in dev_str_list:
311 try:
312 dev = PciDevice(parse_pci_name(dev_str))
313 except Exception, e:
314 raise VmError("pci: failed to locate device and "+
315 "parse it's resources - "+str(e))
316 dev.do_FLR(is_hvm, xoptions.get_pci_dev_assign_strict_check())
318 class XendDomainInfo:
319 """An object represents a domain.
321 @TODO: try to unify dom and domid, they mean the same thing, but
322 xc refers to it as dom, and everywhere else, including
323 xenstore it is domid. The best way is to change xc's
324 python interface.
326 @ivar info: Parsed configuration
327 @type info: dictionary
328 @ivar domid: Domain ID (if VM has started)
329 @type domid: int or None
330 @ivar guest_bitsize: the bitsize of guest
331 @type guest_bitsize: int or None
332 @ivar alloc_mem: the memory domain allocated when booting
333 @type alloc_mem: int or None
334 @ivar vmpath: XenStore path to this VM.
335 @type vmpath: string
336 @ivar dompath: XenStore path to this Domain.
337 @type dompath: string
338 @ivar image: Reference to the VM Image.
339 @type image: xen.xend.image.ImageHandler
340 @ivar store_port: event channel to xenstored
341 @type store_port: int
342 @ivar console_port: event channel to xenconsoled
343 @type console_port: int
344 @ivar store_mfn: xenstored mfn
345 @type store_mfn: int
346 @ivar console_mfn: xenconsoled mfn
347 @type console_mfn: int
348 @ivar notes: OS image notes
349 @type notes: dictionary
350 @ivar vmWatch: reference to a watch on the xenstored vmpath
351 @type vmWatch: xen.xend.xenstore.xswatch
352 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
353 @type shutdownWatch: xen.xend.xenstore.xswatch
354 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
355 @type shutdownStartTime: float or None
356 @ivar restart_in_progress: Is a domain restart thread running?
357 @type restart_in_progress: bool
358 # @ivar state: Domain state
359 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
360 @ivar state_updated: lock for self.state
361 @type state_updated: threading.Condition
362 @ivar refresh_shutdown_lock: lock for polling shutdown state
363 @type refresh_shutdown_lock: threading.Condition
364 @ivar _deviceControllers: device controller cache for this domain
365 @type _deviceControllers: dict 'string' to DevControllers
366 """
368 def __init__(self, info, domid = None, dompath = None, augment = False,
369 priv = False, resume = False, vmpath = None):
370 """Constructor for a domain
372 @param info: parsed configuration
373 @type info: dictionary
374 @keyword domid: Set initial domain id (if any)
375 @type domid: int
376 @keyword dompath: Set initial dompath (if any)
377 @type dompath: string
378 @keyword augment: Augment given info with xenstored VM info
379 @type augment: bool
380 @keyword priv: Is a privileged domain (Dom 0)
381 @type priv: bool
382 @keyword resume: Is this domain being resumed?
383 @type resume: bool
384 """
386 self.info = info
387 if domid == None:
388 self.domid = self.info.get('domid')
389 else:
390 self.domid = domid
391 self.guest_bitsize = None
392 self.alloc_mem = None
394 maxmem = self.info.get('memory_static_max', 0)
395 memory = self.info.get('memory_dynamic_max', 0)
397 if maxmem > memory:
398 self.pod_enabled = True
399 else:
400 self.pod_enabled = False
402 #REMOVE: uuid is now generated in XendConfig
403 #if not self._infoIsSet('uuid'):
404 # self.info['uuid'] = uuid.toString(uuid.create())
406 # Find a unique /vm/<uuid>/<integer> path if not specified.
407 # This avoids conflict between pre-/post-migrate domains when doing
408 # localhost relocation.
409 self.vmpath = vmpath
410 i = 0
411 while self.vmpath == None:
412 self.vmpath = XS_VMROOT + self.info['uuid']
413 if i != 0:
414 self.vmpath = self.vmpath + '-' + str(i)
415 try:
416 if self._readVm("uuid"):
417 self.vmpath = None
418 i = i + 1
419 except:
420 pass
422 self.dompath = dompath
424 self.image = None
425 self.store_port = None
426 self.store_mfn = None
427 self.console_port = None
428 self.console_mfn = None
430 self.native_protocol = None
432 self.vmWatch = None
433 self.shutdownWatch = None
434 self.shutdownStartTime = None
435 self._resume = resume
436 self.restart_in_progress = False
438 self.state_updated = threading.Condition()
439 self.refresh_shutdown_lock = threading.Condition()
440 self._stateSet(DOM_STATE_HALTED)
442 self._deviceControllers = {}
444 for state in DOM_STATES_OLD:
445 self.info[state] = 0
447 if augment:
448 self._augmentInfo(priv)
450 self._checkName(self.info['name_label'])
452 self.metrics = XendVMMetrics(uuid.createString(), self)
455 #
456 # Public functions available through XMLRPC
457 #
460 def start(self, is_managed = False):
461 """Attempts to start the VM by do the appropriate
462 initialisation if it not started.
463 """
464 from xen.xend import XendDomain
466 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
467 try:
468 XendTask.log_progress(0, 30, self._constructDomain)
469 XendTask.log_progress(31, 60, self._initDomain)
471 XendTask.log_progress(61, 70, self._storeVmDetails)
472 XendTask.log_progress(71, 80, self._storeDomDetails)
473 XendTask.log_progress(81, 90, self._registerWatches)
474 XendTask.log_progress(91, 100, self.refreshShutdown)
476 xendomains = XendDomain.instance()
478 # save running configuration if XendDomains believe domain is
479 # persistent
480 if is_managed:
481 xendomains.managed_config_save(self)
482 except:
483 log.exception('VM start failed')
484 self.destroy()
485 raise
486 else:
487 raise XendError('VM already running')
489 def resume(self):
490 """Resumes a domain that has come back from suspension."""
491 state = self._stateGet()
492 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
493 try:
494 self._constructDomain()
496 try:
497 self._setCPUAffinity()
498 except:
499 # usually a CPU we want to set affinity to does not exist
500 # we just ignore it so that the domain can still be restored
501 log.warn("Cannot restore CPU affinity")
503 self._setSchedParams()
504 self._storeVmDetails()
505 self._createChannels()
506 self._createDevices()
507 self._storeDomDetails()
508 self._endRestore()
509 except:
510 log.exception('VM resume failed')
511 self.destroy()
512 raise
513 else:
514 raise XendError('VM is not suspended; it is %s'
515 % XEN_API_VM_POWER_STATE[state])
517 def shutdown(self, reason):
518 """Shutdown a domain by signalling this via xenstored."""
519 log.debug('XendDomainInfo.shutdown(%s)', reason)
520 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
521 raise XendError('Domain cannot be shutdown')
523 if self.domid == 0:
524 raise XendError('Domain 0 cannot be shutdown')
526 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
527 raise XendError('Invalid reason: %s' % reason)
528 self.storeDom("control/shutdown", reason)
530 # HVM domain shuts itself down only if it has PV drivers
531 if self.info.is_hvm():
532 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
533 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
534 if not hvm_pvdrv or hvm_s_state != 0:
535 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
536 log.info("HVM save:remote shutdown dom %d!", self.domid)
537 xc.domain_shutdown(self.domid, code)
539 def pause(self):
540 """Pause domain
542 @raise XendError: Failed pausing a domain
543 """
544 try:
545 if(self.domid):
546 # get all blktap2 devices
547 dev = xstransact.List(self.vmpath + '/device/tap2')
548 for x in dev:
549 path = self.getDeviceController('tap2').readBackend(x, 'params')
550 if path and path.startswith(TAPDISK_DEVICE):
551 try:
552 _minor, _dev, ctrl = parseDeviceString(path)
553 #pause the disk
554 f = open(ctrl + '/pause', 'w')
555 f.write('pause');
556 f.close()
557 except:
558 pass
559 except Exception, ex:
560 log.warn('Could not pause blktap disk.');
562 try:
563 xc.domain_pause(self.domid)
564 self._stateSet(DOM_STATE_PAUSED)
565 except Exception, ex:
566 log.exception(ex)
567 raise XendError("Domain unable to be paused: %s" % str(ex))
569 def unpause(self):
570 """Unpause domain
572 @raise XendError: Failed unpausing a domain
573 """
574 try:
575 if(self.domid):
576 dev = xstransact.List(self.vmpath + '/device/tap2')
577 for x in dev:
578 path = self.getDeviceController('tap2').readBackend(x, 'params')
579 if path and path.startswith(TAPDISK_DEVICE):
580 try:
581 #Figure out the sysfs path.
582 _minor, _dev, ctrl = parseDeviceString(path)
583 #unpause the disk
584 if(os.path.exists(ctrl + '/resume')):
585 f = open(ctrl + '/resume', 'w');
586 f.write('resume');
587 f.close();
588 except:
589 pass
591 except Exception, ex:
592 log.warn('Could not unpause blktap disk: %s' % str(ex));
594 try:
595 xc.domain_unpause(self.domid)
596 self._stateSet(DOM_STATE_RUNNING)
597 except Exception, ex:
598 log.exception(ex)
599 raise XendError("Domain unable to be unpaused: %s" % str(ex))
601 def send_sysrq(self, key):
602 """ Send a Sysrq equivalent key via xenstored."""
603 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
604 raise XendError("Domain '%s' is not started" % self.info['name_label'])
606 asserts.isCharConvertible(key)
607 self.storeDom("control/sysrq", '%c' % key)
609 def pci_device_configure_boot(self):
611 if not self.info.is_hvm():
612 return
614 devid = '0'
615 first = True
616 dev_info = self._getDeviceInfo_pci(devid)
617 if dev_info is None:
618 return
620 # get the virtual slot info from xenstore
621 dev_uuid = sxp.child_value(dev_info, 'uuid')
622 pci_conf = self.info['devices'][dev_uuid][1]
623 pci_devs = pci_conf['devs']
625 # Keep a set of keys that are done rather than
626 # just itterating through set(map(..., pci_devs))
627 # to preserve any order information present.
628 done = set()
629 for key in map(lambda x: x['key'], pci_devs):
630 if key in done:
631 continue
632 done |= set([key])
633 dev = filter(lambda x: x['key'] == key, pci_devs)
635 head_dev = dev.pop()
636 dev_sxp = pci_convert_dict_to_sxp(head_dev, 'Initialising',
637 'Booting')
638 self.pci_device_configure(dev_sxp, first_dev = first)
639 first = False
641 # That is all for single-function virtual devices
642 if len(dev) == 0:
643 continue
645 if int(head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
646 new_dev_info = self._getDeviceInfo_pci(devid)
647 if new_dev_info is None:
648 continue
649 new_dev_uuid = sxp.child_value(new_dev_info, 'uuid')
650 new_pci_conf = self.info['devices'][new_dev_uuid][1]
651 new_pci_devs = new_pci_conf['devs']
653 new_head_dev = filter(lambda x: pci_dict_cmp(x, head_dev),
654 new_pci_devs)[0]
656 if int(new_head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
657 continue
659 vdevfn = PCI_SLOT(int(new_head_dev['vdevfn'], 16))
660 new_dev = []
661 for i in dev:
662 i['vdevfn'] = '0x%02x' % \
663 PCI_DEVFN(vdevfn,
664 PCI_FUNC(int(i['vdevfn'], 16)))
665 new_dev.append(i)
667 dev = new_dev
669 for i in dev:
670 dev_sxp = pci_convert_dict_to_sxp(i, 'Initialising', 'Booting')
671 self.pci_device_configure(dev_sxp)
673 def hvm_pci_device_create(self, dev_config):
674 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
675 % scrub_password(dev_config))
677 if not self.info.is_hvm():
678 raise VmError("hvm_pci_device_create called on non-HVM guest")
680 #all the PCI devs share one conf node
681 devid = '0'
683 new_dev = dev_config['devs'][0]
684 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
686 #check conflict before trigger hotplug event
687 if dev_info is not None:
688 dev_uuid = sxp.child_value(dev_info, 'uuid')
689 pci_conf = self.info['devices'][dev_uuid][1]
690 pci_devs = pci_conf['devs']
691 for x in pci_devs:
692 if (int(x['vdevfn'], 16) == int(new_dev['vdevfn'], 16) and
693 not int(x['vdevfn'], 16) & AUTO_PHP_SLOT):
694 raise VmError("vdevfn %s already have a device." %
695 (new_dev['vdevfn']))
697 if (pci_dict_cmp(x, new_dev)):
698 raise VmError("device is already inserted")
700 # Test whether the devices can be assigned.
701 self.pci_dev_check_attachability_and_do_FLR(new_dev)
703 return self.hvm_pci_device_insert_dev(new_dev)
705 def iommu_check_pod_mode(self):
706 """ Disallow PCI device assignment if pod is enabled. """
707 if self.pod_enabled:
708 raise VmError("failed to assign device since pod is enabled")
710 def pci_dev_check_assignability_and_do_FLR(self, config):
711 """ In the case of static device assignment(i.e., the 'pci' string in
712 guest config file), we check if the device(s) specified in the 'pci'
713 can be assigned to guest or not; if yes, we do_FLR the device(s).
714 """
716 self.iommu_check_pod_mode()
717 pci_dev_ctrl = self.getDeviceController('pci')
718 return pci_dev_ctrl.dev_check_assignability_and_do_FLR(config)
720 def pci_dev_check_attachability_and_do_FLR(self, new_dev):
721 """ In the case of dynamic device assignment(i.e., xm pci-attach), we
722 check if the device can be attached to guest or not; if yes, we do_FLR
723 the device.
724 """
726 self.iommu_check_pod_mode()
728 # Test whether the devices can be assigned
730 pci_name = pci_dict_to_bdf_str(new_dev)
731 _all_assigned_pci_devices = get_all_assigned_pci_devices(self.domid)
732 if pci_name in _all_assigned_pci_devices:
733 raise VmError("failed to assign device %s that has"
734 " already been assigned to other domain." % pci_name)
736 # Test whether the device is owned by pciback or pci-stub.
737 try:
738 pci_device = PciDevice(new_dev)
739 except Exception, e:
740 raise VmError("pci: failed to locate device and "+
741 "parse its resources - "+str(e))
742 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
743 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
744 %pci_device.name)
746 strict_check = xoptions.get_pci_dev_assign_strict_check()
747 # Check non-page-aligned MMIO BAR.
748 if pci_device.has_non_page_aligned_bar and strict_check:
749 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
750 pci_device.name)
752 # PV guest has less checkings.
753 if not self.info.is_hvm():
754 # try to do FLR for PV guest
755 pci_device.do_FLR(self.info.is_hvm(), strict_check)
756 return
758 if not strict_check:
759 return
761 # Check if there is intermediate PCIe switch bewteen the device and
762 # Root Complex.
763 if pci_device.is_behind_switch_lacking_acs():
764 err_msg = 'pci: to avoid potential security issue, %s is not'+\
765 ' allowed to be assigned to guest since it is behind'+\
766 ' PCIe switch that does not support or enable ACS.'
767 raise VmError(err_msg % pci_device.name)
769 # Check the co-assignment.
770 # To pci-attach a device D to domN, we should ensure each of D's
771 # co-assignment devices hasn't been assigned, or has been assigned to
772 # domN.
773 coassignment_list = pci_device.find_coassigned_devices()
774 pci_device.devs_check_driver(coassignment_list)
775 assigned_pci_device_str_list = self._get_assigned_pci_devices()
776 for pci_str in coassignment_list:
777 if not (pci_str in _all_assigned_pci_devices):
778 continue
779 if not pci_str in assigned_pci_device_str_list:
780 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
781 " because one of its co-assignment device %s has been" + \
782 " assigned to other domain." \
783 )% (pci_device.name, self.info['name_label'], pci_str))
785 # try to do FLR for HVM guest
786 pci_device.do_FLR(self.info.is_hvm(), strict_check)
788 def hvm_pci_device_insert(self, dev_config):
789 log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
790 % scrub_password(dev_config))
792 if not self.info.is_hvm():
793 raise VmError("hvm_pci_device_create called on non-HVM guest")
795 new_dev = dev_config['devs'][0]
797 return self.hvm_pci_device_insert_dev(new_dev)
799 def hvm_pci_device_insert_dev(self, new_dev):
800 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
801 % scrub_password(new_dev))
803 if self.domid is not None:
804 opts = ''
805 optslist = []
806 pci_defopts = []
807 if 'pci_msitranslate' in self.info['platform']:
808 pci_defopts.append(['msitranslate',
809 str(self.info['platform']['pci_msitranslate'])])
810 if 'pci_power_mgmt' in self.info['platform']:
811 pci_defopts.append(['power_mgmt',
812 str(self.info['platform']['pci_power_mgmt'])])
813 if new_dev.has_key('opts'):
814 optslist += new_dev['opts']
816 if optslist or pci_defopts:
817 opts = ',' + serialise_pci_opts(
818 append_default_pci_opts(optslist, pci_defopts))
820 bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
821 int(new_dev['vdevfn'], 16), opts)
822 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
823 bdf = xc.assign_device(self.domid, pci_dict_to_xc_str(new_dev))
824 if bdf > 0:
825 raise VmError("Failed to assign device to IOMMU (%s)" % bdf_str)
826 log.debug("pci: assign device %s" % bdf_str)
827 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
829 vdevfn = xstransact.Read("/local/domain/0/device-model/%i/parameter"
830 % self.getDomid())
831 try:
832 vdevfn_int = int(vdevfn, 16)
833 except ValueError:
834 raise VmError(("Cannot pass-through PCI function '%s'. " +
835 "Device model reported an error: %s") %
836 (bdf_str, vdevfn))
837 else:
838 vdevfn = new_dev['vdevfn']
840 return vdevfn
843 def device_create(self, dev_config):
844 """Create a new device.
846 @param dev_config: device configuration
847 @type dev_config: SXP object (parsed config)
848 """
849 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
850 dev_type = sxp.name(dev_config)
851 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
852 dev_config_dict = self.info['devices'][dev_uuid][1]
853 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
855 if dev_type == 'vif':
856 for x in dev_config:
857 if x != 'vif' and x[0] == 'mac':
858 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
859 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
860 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
862 if self.domid is not None:
863 try:
864 dev_config_dict['devid'] = devid = \
865 self._createDevice(dev_type, dev_config_dict)
866 if dev_type == 'tap2':
867 # createDevice may create a blktap1 device if blktap2 is not
868 # installed or if the blktap driver is not supported in
869 # blktap1
870 dev_type = self.getBlockDeviceClass(devid)
871 self._waitForDevice(dev_type, devid)
872 except VmError, ex:
873 del self.info['devices'][dev_uuid]
874 if dev_type == 'pci':
875 for dev in dev_config_dict['devs']:
876 XendAPIStore.deregister(dev['uuid'], 'DPCI')
877 elif dev_type == 'vscsi':
878 for dev in dev_config_dict['devs']:
879 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
880 elif dev_type == 'tap' or dev_type == 'tap2':
881 self.info['vbd_refs'].remove(dev_uuid)
882 else:
883 self.info['%s_refs' % dev_type].remove(dev_uuid)
884 raise ex
885 else:
886 devid = None
888 xen.xend.XendDomain.instance().managed_config_save(self)
889 return self.getDeviceController(dev_type).sxpr(devid)
892 def pci_device_configure(self, dev_sxp, devid = 0, first_dev = False):
893 """Configure an existing pci device.
895 @param dev_sxp: device configuration
896 @type dev_sxp: SXP object (parsed config)
897 @param devid: device id
898 @type devid: int
899 @return: Returns True if successfully updated device
900 @rtype: boolean
901 """
902 log.debug("XendDomainInfo.pci_device_configure: %s"
903 % scrub_password(dev_sxp))
905 dev_class = sxp.name(dev_sxp)
907 if dev_class != 'pci':
908 return False
910 pci_state = sxp.child_value(dev_sxp, 'state')
911 pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
912 existing_dev_info = self._getDeviceInfo_pci(devid)
914 if existing_dev_info is None and pci_state != 'Initialising':
915 raise XendError("Cannot detach when pci platform does not exist")
917 pci_dev = sxp.children(dev_sxp, 'dev')[0]
918 dev_config = pci_convert_sxp_to_dict(dev_sxp)
919 dev = dev_config['devs'][0]
921 stubdomid = self.getStubdomDomid()
922 # Do HVM specific processing
923 if self.info.is_hvm():
924 from xen.xend import XendDomain
925 if pci_state == 'Initialising':
926 if stubdomid is not None :
927 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
929 # HVM PCI device attachment
930 if pci_sub_state == 'Booting':
931 vdevfn = self.hvm_pci_device_insert(dev_config)
932 else:
933 vdevfn = self.hvm_pci_device_create(dev_config)
934 # Update vdevfn
935 dev['vdevfn'] = vdevfn
936 for n in sxp.children(pci_dev):
937 if(n[0] == 'vdevfn'):
938 n[1] = vdevfn
939 else:
940 # HVM PCI device detachment
941 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
942 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
943 existing_pci_devs = existing_pci_conf['devs']
944 new_devs = filter(lambda x: pci_dict_cmp(x, dev),
945 existing_pci_devs)
946 if len(new_devs) < 0:
947 raise VmError("Device %s is not connected" %
948 pci_dict_to_bdf_str(dev))
949 new_dev = new_devs[0]
950 # Only tell qemu-dm to unplug function 0.
951 # When unplugging a function, all functions in the
952 # same vslot must be unplugged, and function 0 must
953 # be one of the functions present when a vslot is
954 # hot-plugged. Telling qemu-dm to unplug function 0
955 # also tells it to unplug all other functions in the
956 # same vslot.
957 if (PCI_FUNC(int(new_dev['vdevfn'], 16)) == 0):
958 self.hvm_destroyPCIDevice(new_dev)
959 if stubdomid is not None :
960 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
961 # Update vdevfn
962 dev['vdevfn'] = new_dev['vdevfn']
963 for n in sxp.children(pci_dev):
964 if(n[0] == 'vdevfn'):
965 n[1] = new_dev['vdevfn']
966 else:
967 # Do PV specific checking
968 if pci_state == 'Initialising':
969 # PV PCI device attachment
970 self.pci_dev_check_attachability_and_do_FLR(dev)
972 # If pci platform does not exist, create and exit.
973 if existing_dev_info is None :
974 self.device_create(dev_sxp)
975 return True
977 if first_dev is True :
978 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
979 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
980 devid = self._createDevice('pci', existing_pci_conf)
981 self.info['devices'][existing_dev_uuid][1]['devid'] = devid
983 if self.domid is not None:
984 # use DevController.reconfigureDevice to change device config
985 dev_control = self.getDeviceController(dev_class)
986 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
987 if not self.info.is_hvm() and not self.info.is_stubdom():
988 # in PV case, wait until backend state becomes connected.
989 dev_control.waitForDevice_reconfigure(devid)
990 num_devs = dev_control.cleanupDevice(devid)
992 # update XendConfig with new device info
993 if dev_uuid:
994 new_dev_sxp = dev_control.configuration(devid)
995 self.info.device_update(dev_uuid, new_dev_sxp)
997 # If there is no device left, destroy pci and remove config.
998 if num_devs == 0:
999 if self.info.is_hvm():
1000 self.destroyDevice('pci', devid, True)
1001 else:
1002 self.destroyDevice('pci', devid)
1003 del self.info['devices'][dev_uuid]
1004 else:
1005 new_dev_sxp = ['pci']
1006 for cur_dev in sxp.children(existing_dev_info, 'dev'):
1007 if pci_state == 'Closing':
1008 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
1009 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
1010 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
1011 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
1012 continue
1013 new_dev_sxp.append(cur_dev)
1015 if pci_state == 'Initialising' and pci_sub_state != 'Booting':
1016 for new_dev in sxp.children(dev_sxp, 'dev'):
1017 new_dev_sxp.append(new_dev)
1019 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
1020 self.info.device_update(dev_uuid, new_dev_sxp)
1022 # If there is no device left, remove config.
1023 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1024 del self.info['devices'][dev_uuid]
1026 xen.xend.XendDomain.instance().managed_config_save(self)
1028 return True
1030 def vscsi_device_configure(self, dev_sxp):
1031 """Configure an existing vscsi device.
1032 quoted pci funciton
1033 """
1034 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
1035 if not dev_info:
1036 return False
1037 for dev in sxp.children(dev_info, 'dev'):
1038 if p_devs is not None:
1039 if sxp.child_value(dev, 'p-dev') in p_devs:
1040 return True
1041 if v_devs is not None:
1042 if sxp.child_value(dev, 'v-dev') in v_devs:
1043 return True
1044 return False
1046 def _vscsi_be(be):
1047 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
1048 if be_xdi is not None:
1049 be_domid = be_xdi.getDomid()
1050 if be_domid is not None:
1051 return str(be_domid)
1052 return str(be)
1054 dev_class = sxp.name(dev_sxp)
1055 if dev_class != 'vscsi':
1056 return False
1058 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
1059 devs = dev_config['devs']
1060 v_devs = [d['v-dev'] for d in devs]
1061 state = devs[0]['state']
1062 req_devid = int(devs[0]['devid'])
1063 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
1065 if state == xenbusState['Initialising']:
1066 # new create
1067 # If request devid does not exist, create and exit.
1068 p_devs = [d['p-dev'] for d in devs]
1069 for dev_type, dev_info in self.info.all_devices_sxpr():
1070 if dev_type != 'vscsi':
1071 continue
1072 if _is_vscsi_defined(dev_info, p_devs = p_devs):
1073 raise XendError('The physical device "%s" is already defined' % \
1074 p_devs[0])
1075 if cur_dev_sxp is None:
1076 self.device_create(dev_sxp)
1077 return True
1079 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1080 raise XendError('The virtual device "%s" is already defined' % \
1081 v_devs[0])
1083 if int(dev_config['feature-host']) != \
1084 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
1085 raise XendError('The physical device "%s" cannot define '
1086 'because mode is different' % devs[0]['p-dev'])
1088 new_be = dev_config.get('backend', None)
1089 if new_be is not None:
1090 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
1091 if cur_be is None:
1092 cur_be = xen.xend.XendDomain.DOM0_ID
1093 new_be_dom = _vscsi_be(new_be)
1094 cur_be_dom = _vscsi_be(cur_be)
1095 if new_be_dom != cur_be_dom:
1096 raise XendError('The physical device "%s" cannot define '
1097 'because backend is different' % devs[0]['p-dev'])
1099 elif state == xenbusState['Closing']:
1100 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1101 raise XendError("Cannot detach vscsi device does not exist")
1103 if self.domid is not None:
1104 # use DevController.reconfigureDevice to change device config
1105 dev_control = self.getDeviceController(dev_class)
1106 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
1107 dev_control.waitForDevice_reconfigure(req_devid)
1108 num_devs = dev_control.cleanupDevice(req_devid)
1110 # update XendConfig with new device info
1111 if dev_uuid:
1112 new_dev_sxp = dev_control.configuration(req_devid)
1113 self.info.device_update(dev_uuid, new_dev_sxp)
1115 # If there is no device left, destroy vscsi and remove config.
1116 if num_devs == 0:
1117 self.destroyDevice('vscsi', req_devid)
1118 del self.info['devices'][dev_uuid]
1120 else:
1121 new_dev_sxp = ['vscsi']
1122 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1123 new_dev_sxp.append(cur_mode)
1124 try:
1125 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1126 new_dev_sxp.append(cur_be)
1127 except IndexError:
1128 pass
1130 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1131 if state == xenbusState['Closing']:
1132 if int(cur_mode[1]) == 1:
1133 continue
1134 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1135 continue
1136 new_dev_sxp.append(cur_dev)
1138 if state == xenbusState['Initialising']:
1139 for new_dev in sxp.children(dev_sxp, 'dev'):
1140 new_dev_sxp.append(new_dev)
1142 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1143 self.info.device_update(dev_uuid, new_dev_sxp)
1145 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1146 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1147 del self.info['devices'][dev_uuid]
1149 xen.xend.XendDomain.instance().managed_config_save(self)
1151 return True
1153 def vusb_device_configure(self, dev_sxp, devid):
1154 """Configure a virtual root port.
1155 """
1156 dev_class = sxp.name(dev_sxp)
1157 if dev_class != 'vusb':
1158 return False
1160 dev_config = {}
1161 ports = sxp.child(dev_sxp, 'port')
1162 for port in ports[1:]:
1163 try:
1164 num, bus = port
1165 dev_config['port-%i' % int(num)] = str(bus)
1166 except TypeError:
1167 pass
1169 dev_control = self.getDeviceController(dev_class)
1170 dev_control.reconfigureDevice(devid, dev_config)
1172 return True
1174 def device_configure(self, dev_sxp, devid = None):
1175 """Configure an existing device.
1177 @param dev_config: device configuration
1178 @type dev_config: SXP object (parsed config)
1179 @param devid: device id
1180 @type devid: int
1181 @return: Returns True if successfully updated device
1182 @rtype: boolean
1183 """
1185 # convert device sxp to a dict
1186 dev_class = sxp.name(dev_sxp)
1187 dev_config = {}
1189 if dev_class == 'pci':
1190 return self.pci_device_configure(dev_sxp)
1192 if dev_class == 'vscsi':
1193 return self.vscsi_device_configure(dev_sxp)
1195 if dev_class == 'vusb':
1196 return self.vusb_device_configure(dev_sxp, devid)
1198 for opt_val in dev_sxp[1:]:
1199 try:
1200 dev_config[opt_val[0]] = opt_val[1]
1201 except IndexError:
1202 pass
1204 dev_control = self.getDeviceController(dev_class)
1205 if devid is None:
1206 dev = dev_config.get('dev', '')
1207 if not dev:
1208 raise VmError('Block device must have virtual details specified')
1209 if 'ioemu:' in dev:
1210 (_, dev) = dev.split(':', 1)
1211 try:
1212 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1213 except ValueError:
1214 pass
1215 devid = dev_control.convertToDeviceNumber(dev)
1216 dev_info = self._getDeviceInfo_vbd(devid)
1217 if dev_info is None:
1218 raise VmError("Device %s not connected" % devid)
1219 dev_uuid = sxp.child_value(dev_info, 'uuid')
1221 if self.domid is not None:
1222 # use DevController.reconfigureDevice to change device config
1223 dev_control.reconfigureDevice(devid, dev_config)
1224 else:
1225 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1226 if (new_f['device-type'] == 'cdrom' and
1227 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1228 new_b['mode'] == 'r' and
1229 sxp.child_value(dev_info, 'mode') == 'r'):
1230 pass
1231 else:
1232 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1233 (dev_class, devid, dev_config))
1235 # update XendConfig with new device info
1236 self.info.device_update(dev_uuid, dev_sxp)
1237 xen.xend.XendDomain.instance().managed_config_save(self)
1239 return True
1241 def waitForDevices(self):
1242 """Wait for this domain's configured devices to connect.
1244 @raise VmError: if any device fails to initialise.
1245 """
1246 for devclass in XendDevices.valid_devices():
1247 self.getDeviceController(devclass).waitForDevices()
1249 def hvm_destroyPCIDevice(self, pci_dev):
1250 log.debug("hvm_destroyPCIDevice: %s", pci_dev)
1252 if not self.info.is_hvm():
1253 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1255 # Check the co-assignment.
1256 # To pci-detach a device D from domN, we should ensure: for each DD in the
1257 # list of D's co-assignment devices, DD is not assigned (to domN).
1259 from xen.xend.server.pciif import PciDevice
1260 try:
1261 pci_device = PciDevice(pci_dev)
1262 except Exception, e:
1263 raise VmError("pci: failed to locate device and "+
1264 "parse its resources - "+str(e))
1265 coassignment_list = pci_device.find_coassigned_devices()
1266 coassignment_list.remove(pci_device.name)
1267 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1268 for pci_str in coassignment_list:
1269 if xoptions.get_pci_dev_assign_strict_check() and \
1270 pci_str in assigned_pci_device_str_list:
1271 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1272 " because one of its co-assignment device %s is still " + \
1273 " assigned to the domain." \
1274 )% (pci_device.name, self.info['name_label'], pci_str))
1277 bdf_str = pci_dict_to_bdf_str(pci_dev)
1278 log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
1279 if self.domid is not None:
1280 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1282 return 0
1284 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1285 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1286 deviceClass, devid)
1288 if rm_cfg:
1289 # Convert devid to device number. A device number is
1290 # needed to remove its configuration.
1291 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1293 # Save current sxprs. A device number and a backend
1294 # path are needed to remove its configuration but sxprs
1295 # do not have those after calling destroyDevice.
1296 sxprs = self.getDeviceSxprs(deviceClass)
1298 rc = None
1299 if self.domid is not None:
1301 #new blktap implementation may need a sysfs write after everything is torn down.
1302 if deviceClass == 'tap2':
1303 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1304 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1305 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1306 backpath = xstransact.Read(frontpath, "backend")
1307 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1309 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1310 if not force and rm_cfg:
1311 # The backend path, other than the device itself,
1312 # has to be passed because its accompanied frontend
1313 # path may be void until its removal is actually
1314 # issued. It is probable because destroyDevice is
1315 # issued first.
1316 for dev_num, dev_info in sxprs:
1317 dev_num = int(dev_num)
1318 if dev_num == dev:
1319 for x in dev_info:
1320 if x[0] == 'backend':
1321 backend = x[1]
1322 break
1323 break
1324 self._waitForDevice_destroy(deviceClass, devid, backend)
1326 if rm_cfg and deviceClass != "vif2":
1327 if deviceClass == 'vif':
1328 if self.domid is not None:
1329 mac = ''
1330 for dev_num, dev_info in sxprs:
1331 dev_num = int(dev_num)
1332 if dev_num == dev:
1333 for x in dev_info:
1334 if x[0] == 'mac':
1335 mac = x[1]
1336 break
1337 break
1338 dev_info = self._getDeviceInfo_vif(mac)
1339 else:
1340 _, dev_info = sxprs[dev]
1341 else: # 'vbd' or 'tap' or 'tap2'
1342 dev_info = self._getDeviceInfo_vbd(dev)
1343 # To remove the UUID of the device from refs,
1344 # deviceClass must be always 'vbd'.
1345 deviceClass = 'vbd'
1346 if dev_info is None:
1347 raise XendError("Device %s is not defined" % devid)
1349 dev_uuid = sxp.child_value(dev_info, 'uuid')
1350 del self.info['devices'][dev_uuid]
1351 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1352 xen.xend.XendDomain.instance().managed_config_save(self)
1354 return rc
1356 def getDeviceSxprs(self, deviceClass):
1357 if deviceClass == 'pci':
1358 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1359 if dev_info is None:
1360 return []
1361 dev_uuid = sxp.child_value(dev_info, 'uuid')
1362 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1363 return pci_devs
1364 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1365 return self.getDeviceController(deviceClass).sxprs()
1366 else:
1367 sxprs = []
1368 dev_num = 0
1369 for dev_type, dev_info in self.info.all_devices_sxpr():
1370 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap', 'tap2']) or \
1371 (deviceClass != 'vbd' and dev_type != deviceClass):
1372 continue
1374 if deviceClass == 'vscsi':
1375 vscsi_devs = ['devs', []]
1376 for vscsi_dev in sxp.children(dev_info, 'dev'):
1377 vscsi_dev.append(['frontstate', None])
1378 vscsi_devs[1].append(vscsi_dev)
1379 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1380 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1381 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1382 elif deviceClass == 'vbd':
1383 dev = sxp.child_value(dev_info, 'dev')
1384 if 'ioemu:' in dev:
1385 (_, dev) = dev.split(':', 1)
1386 try:
1387 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1388 except ValueError:
1389 dev_name = dev
1390 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1391 sxprs.append([dev_num, dev_info])
1392 else:
1393 sxprs.append([dev_num, dev_info])
1394 dev_num += 1
1395 return sxprs
1397 def getBlockDeviceClass(self, devid):
1398 # if the domain is running we can get the device class from xenstore.
1399 # This is more accurate, as blktap1 devices show up as blktap2 devices
1400 # in the config.
1401 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1402 # All block devices have a vbd frontend, so we know the frontend path
1403 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1404 frontendPath = "%s/device/vbd/%s" % (self.dompath, dev)
1405 for devclass in XendDevices.valid_devices():
1406 for dev in xstransact.List("%s/device/%s" % (self.vmpath, devclass)):
1407 devFrontendPath = xstransact.Read("%s/device/%s/%s/frontend" % (self.vmpath, devclass, dev))
1408 if frontendPath == devFrontendPath:
1409 return devclass
1411 else: # the domain is not active so we must get the device class
1412 # from the config
1413 # To get a device number from the devid,
1414 # we temporarily use the device controller of VBD.
1415 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1416 dev_info = self._getDeviceInfo_vbd(dev)
1417 if dev_info:
1418 return dev_info[0]
1420 def _getDeviceInfo_vif(self, mac):
1421 for dev_type, dev_info in self.info.all_devices_sxpr():
1422 if dev_type != 'vif':
1423 continue
1424 if mac == sxp.child_value(dev_info, 'mac'):
1425 return dev_info
1427 def _getDeviceInfo_vbd(self, devid):
1428 for dev_type, dev_info in self.info.all_devices_sxpr():
1429 if dev_type != 'vbd' and dev_type != 'tap' and dev_type != 'tap2':
1430 continue
1431 dev = sxp.child_value(dev_info, 'dev')
1432 dev = dev.split(':')[0]
1433 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1434 if devid == dev:
1435 return dev_info
1437 def _getDeviceInfo_pci(self, devid):
1438 for dev_type, dev_info in self.info.all_devices_sxpr():
1439 if dev_type != 'pci':
1440 continue
1441 return dev_info
1442 return None
1444 def _getDeviceInfo_vscsi(self, devid):
1445 devid = int(devid)
1446 for dev_type, dev_info in self.info.all_devices_sxpr():
1447 if dev_type != 'vscsi':
1448 continue
1449 devs = sxp.children(dev_info, 'dev')
1450 if devid == int(sxp.child_value(devs[0], 'devid')):
1451 return dev_info
1452 return None
1454 def _getDeviceInfo_vusb(self, devid):
1455 for dev_type, dev_info in self.info.all_devices_sxpr():
1456 if dev_type != 'vusb':
1457 continue
1458 return dev_info
1459 return None
1461 def _get_assigned_pci_devices(self, devid = 0):
1462 if self.domid is not None:
1463 return get_assigned_pci_devices(self.domid)
1465 dev_info = self._getDeviceInfo_pci(devid)
1466 if dev_info is None:
1467 return []
1468 dev_uuid = sxp.child_value(dev_info, 'uuid')
1469 pci_conf = self.info['devices'][dev_uuid][1]
1470 return map(pci_dict_to_bdf_str, pci_conf['devs'])
1472 def setMemoryTarget(self, target):
1473 """Set the memory target of this domain.
1474 @param target: In MiB.
1475 """
1476 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1477 self.info['name_label'], str(self.domid), target)
1479 MiB = 1024 * 1024
1480 memory_cur = self.get_memory_dynamic_max() / MiB
1482 if self.domid == 0:
1483 dom0_min_mem = xoptions.get_dom0_min_mem()
1484 if target < memory_cur and dom0_min_mem > target:
1485 raise XendError("memory_dynamic_max too small")
1487 self._safe_set_memory('memory_dynamic_min', target * MiB)
1488 self._safe_set_memory('memory_dynamic_max', target * MiB)
1490 if self.domid >= 0:
1491 if target > memory_cur:
1492 balloon.free((target - memory_cur) * 1024, self)
1493 self.storeVm("memory", target)
1494 self.storeDom("memory/target", target << 10)
1495 xc.domain_set_target_mem(self.domid,
1496 (target * 1024))
1497 xen.xend.XendDomain.instance().managed_config_save(self)
1499 def setMemoryMaximum(self, limit):
1500 """Set the maximum memory limit of this domain
1501 @param limit: In MiB.
1502 """
1503 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1504 self.info['name_label'], str(self.domid), limit)
1506 maxmem_cur = self.get_memory_static_max()
1507 MiB = 1024 * 1024
1508 self._safe_set_memory('memory_static_max', limit * MiB)
1510 if self.domid >= 0:
1511 maxmem = int(limit) * 1024
1512 try:
1513 return xc.domain_setmaxmem(self.domid, maxmem)
1514 except Exception, ex:
1515 self._safe_set_memory('memory_static_max', maxmem_cur)
1516 raise XendError(str(ex))
1517 xen.xend.XendDomain.instance().managed_config_save(self)
1520 def getVCPUInfo(self):
1521 try:
1522 # We include the domain name and ID, to help xm.
1523 sxpr = ['domain',
1524 ['domid', self.domid],
1525 ['name', self.info['name_label']],
1526 ['vcpu_count', self.info['VCPUs_max']]]
1528 for i in range(0, self.info['VCPUs_max']):
1529 if self.domid is not None:
1530 info = xc.vcpu_getinfo(self.domid, i)
1532 sxpr.append(['vcpu',
1533 ['number', i],
1534 ['online', info['online']],
1535 ['blocked', info['blocked']],
1536 ['running', info['running']],
1537 ['cpu_time', info['cpu_time'] / 1e9],
1538 ['cpu', info['cpu']],
1539 ['cpumap', info['cpumap']]])
1540 else:
1541 sxpr.append(['vcpu',
1542 ['number', i],
1543 ['online', 0],
1544 ['blocked', 0],
1545 ['running', 0],
1546 ['cpu_time', 0.0],
1547 ['cpu', -1],
1548 ['cpumap', self.info['cpus'][i] and \
1549 self.info['cpus'][i] or range(64)]])
1551 return sxpr
1553 except RuntimeError, exn:
1554 raise XendError(str(exn))
1557 def getDomInfo(self):
1558 return dom_get(self.domid)
1561 # internal functions ... TODO: re-categorised
1564 def _augmentInfo(self, priv):
1565 """Augment self.info, as given to us through L{recreate}, with
1566 values taken from the store. This recovers those values known
1567 to xend but not to the hypervisor.
1568 """
1569 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1570 if priv:
1571 augment_entries.remove('memory')
1572 augment_entries.remove('maxmem')
1573 augment_entries.remove('vcpus')
1574 augment_entries.remove('vcpu_avail')
1576 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1577 for k in augment_entries])
1579 # make returned lists into a dictionary
1580 vm_config = dict(zip(augment_entries, vm_config))
1582 for arg in augment_entries:
1583 val = vm_config[arg]
1584 if val != None:
1585 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1586 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1587 self.info[xapiarg] = val
1588 elif arg == "memory":
1589 self.info["static_memory_min"] = val
1590 elif arg == "maxmem":
1591 self.info["static_memory_max"] = val
1592 else:
1593 self.info[arg] = val
1595 # read CPU Affinity
1596 self.info['cpus'] = []
1597 vcpus_info = self.getVCPUInfo()
1598 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1599 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1601 # For dom0, we ignore any stored value for the vcpus fields, and
1602 # read the current value from Xen instead. This allows boot-time
1603 # settings to take precedence over any entries in the store.
1604 if priv:
1605 xeninfo = dom_get(self.domid)
1606 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1607 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1609 # read image value
1610 image_sxp = self._readVm('image')
1611 if image_sxp:
1612 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1614 # read devices
1615 devices = []
1616 for devclass in XendDevices.valid_devices():
1617 devconfig = self.getDeviceController(devclass).configurations()
1618 if devconfig:
1619 devices.extend(devconfig)
1621 if not self.info['devices'] and devices is not None:
1622 for device in devices:
1623 self.info.device_add(device[0], cfg_sxp = device)
1625 self._update_consoles()
1627 def _update_consoles(self, transaction = None):
1628 if self.domid == None or self.domid == 0:
1629 return
1631 # Update VT100 port if it exists
1632 if transaction is None:
1633 self.console_port = self.readDom('console/port')
1634 else:
1635 self.console_port = self.readDomTxn(transaction, 'console/port')
1636 if self.console_port is not None:
1637 serial_consoles = self.info.console_get_all('vt100')
1638 if not serial_consoles:
1639 cfg = self.info.console_add('vt100', self.console_port)
1640 self._createDevice('console', cfg)
1641 else:
1642 console_uuid = serial_consoles[0].get('uuid')
1643 self.info.console_update(console_uuid, 'location',
1644 self.console_port)
1647 # Update VNC port if it exists and write to xenstore
1648 if transaction is None:
1649 vnc_port = self.readDom('console/vnc-port')
1650 else:
1651 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1652 if vnc_port is not None:
1653 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1654 if dev_type == 'vfb':
1655 old_location = dev_info.get('location')
1656 listen_host = dev_info.get('vnclisten', \
1657 XendOptions.instance().get_vnclisten_address())
1658 new_location = '%s:%s' % (listen_host, str(vnc_port))
1659 if old_location == new_location:
1660 break
1662 dev_info['location'] = new_location
1663 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1664 vfb_ctrl = self.getDeviceController('vfb')
1665 vfb_ctrl.reconfigureDevice(0, dev_info)
1666 break
1669 # Function to update xenstore /vm/*
1672 def _readVm(self, *args):
1673 return xstransact.Read(self.vmpath, *args)
1675 def _writeVm(self, *args):
1676 return xstransact.Write(self.vmpath, *args)
1678 def _removeVm(self, *args):
1679 return xstransact.Remove(self.vmpath, *args)
1681 def _gatherVm(self, *args):
1682 return xstransact.Gather(self.vmpath, *args)
1684 def _listRecursiveVm(self, *args):
1685 return xstransact.ListRecursive(self.vmpath, *args)
1687 def storeVm(self, *args):
1688 return xstransact.Store(self.vmpath, *args)
1690 def permissionsVm(self, *args):
1691 return xstransact.SetPermissions(self.vmpath, *args)
1694 # Function to update xenstore /dom/*
1697 def readDom(self, *args):
1698 return xstransact.Read(self.dompath, *args)
1700 def gatherDom(self, *args):
1701 return xstransact.Gather(self.dompath, *args)
1703 def _writeDom(self, *args):
1704 return xstransact.Write(self.dompath, *args)
1706 def _removeDom(self, *args):
1707 return xstransact.Remove(self.dompath, *args)
1709 def storeDom(self, *args):
1710 return xstransact.Store(self.dompath, *args)
1713 def readDomTxn(self, transaction, *args):
1714 paths = map(lambda x: self.dompath + "/" + x, args)
1715 return transaction.read(*paths)
1717 def gatherDomTxn(self, transaction, *args):
1718 paths = map(lambda x: self.dompath + "/" + x, args)
1719 return transaction.gather(*paths)
1721 def _writeDomTxn(self, transaction, *args):
1722 paths = map(lambda x: self.dompath + "/" + x, args)
1723 return transaction.write(*paths)
1725 def _removeDomTxn(self, transaction, *args):
1726 paths = map(lambda x: self.dompath + "/" + x, args)
1727 return transaction.remove(*paths)
1729 def storeDomTxn(self, transaction, *args):
1730 paths = map(lambda x: self.dompath + "/" + x, args)
1731 return transaction.store(*paths)
1734 def _recreateDom(self):
1735 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1737 def _recreateDomFunc(self, t):
1738 t.remove()
1739 t.mkdir()
1740 t.set_permissions({'dom' : self.domid, 'read' : True})
1741 t.write('vm', self.vmpath)
1742 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1743 # XCP Windows paravirtualized guests use data/
1744 for i in [ 'device', 'control', 'error', 'memory', 'guest', \
1745 'hvmpv', 'data' ]:
1746 t.mkdir(i)
1747 t.set_permissions(i, {'dom' : self.domid})
1749 def _storeDomDetails(self):
1750 to_store = {
1751 'domid': str(self.domid),
1752 'vm': self.vmpath,
1753 'name': self.info['name_label'],
1754 'console/limit': str(xoptions.get_console_limit() * 1024),
1755 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1756 'description': str(self.info['description']),
1759 def f(n, v):
1760 if v is not None:
1761 if type(v) == bool:
1762 to_store[n] = v and "1" or "0"
1763 else:
1764 to_store[n] = str(v)
1766 # Figure out if we need to tell xenconsoled to ignore this guest's
1767 # console - device model will handle console if it is running
1768 constype = "ioemu"
1769 if 'device_model' not in self.info['platform']:
1770 constype = "xenconsoled"
1772 f('console/port', self.console_port)
1773 f('console/ring-ref', self.console_mfn)
1774 f('console/type', constype)
1775 f('store/port', self.store_port)
1776 f('store/ring-ref', self.store_mfn)
1778 if arch.type == "x86":
1779 f('control/platform-feature-multiprocessor-suspend', True)
1781 # elfnotes
1782 for n, v in self.info.get_notes().iteritems():
1783 n = n.lower().replace('_', '-')
1784 if n == 'features':
1785 for v in v.split('|'):
1786 v = v.replace('_', '-')
1787 if v.startswith('!'):
1788 f('image/%s/%s' % (n, v[1:]), False)
1789 else:
1790 f('image/%s/%s' % (n, v), True)
1791 else:
1792 f('image/%s' % n, v)
1794 if self.info.has_key('security_label'):
1795 f('security_label', self.info['security_label'])
1797 to_store.update(self._vcpuDomDetails())
1799 log.debug("Storing domain details: %s", scrub_password(to_store))
1801 self._writeDom(to_store)
1803 def _vcpuDomDetails(self):
1804 def availability(n):
1805 if self.info['vcpu_avail'] & (1 << n):
1806 return 'online'
1807 else:
1808 return 'offline'
1810 result = {}
1811 for v in range(0, self.info['VCPUs_max']):
1812 result["cpu/%d/availability" % v] = availability(v)
1813 return result
1816 # xenstore watches
1819 def _registerWatches(self):
1820 """Register a watch on this VM's entries in the store, and the
1821 domain's control/shutdown node, so that when they are changed
1822 externally, we keep up to date. This should only be called by {@link
1823 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1824 details have been written, but before the new instance is returned."""
1825 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1826 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1827 self._handleShutdownWatch)
1829 def _storeChanged(self, _):
1830 log.trace("XendDomainInfo.storeChanged");
1832 changed = False
1834 # Check whether values in the configuration have
1835 # changed in Xenstore.
1837 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1838 'rtc/timeoffset']
1840 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1841 for k in cfg_vm])
1843 # convert two lists into a python dictionary
1844 vm_details = dict(zip(cfg_vm, vm_details))
1846 for arg, val in vm_details.items():
1847 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1848 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1849 if val != None and val != self.info[xapiarg]:
1850 self.info[xapiarg] = val
1851 changed = True
1852 elif arg == "memory":
1853 if val != None and val != self.info["static_memory_min"]:
1854 self.info["static_memory_min"] = val
1855 changed = True
1856 elif arg == "maxmem":
1857 if val != None and val != self.info["static_memory_max"]:
1858 self.info["static_memory_max"] = val
1859 changed = True
1861 # Check whether image definition has been updated
1862 image_sxp = self._readVm('image')
1863 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1864 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1865 changed = True
1867 # Update the rtc_timeoffset to be preserved across reboot.
1868 # NB. No need to update xenstore domain section.
1869 val = int(vm_details.get("rtc/timeoffset", 0))
1870 self.info["platform"]["rtc_timeoffset"] = val
1872 if changed:
1873 # Update the domain section of the store, as this contains some
1874 # parameters derived from the VM configuration.
1875 self.refresh_shutdown_lock.acquire()
1876 try:
1877 state = self._stateGet()
1878 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1879 self._storeDomDetails()
1880 finally:
1881 self.refresh_shutdown_lock.release()
1883 return 1
1885 def _handleShutdownWatch(self, _):
1886 log.debug('XendDomainInfo.handleShutdownWatch')
1888 reason = self.readDom('control/shutdown')
1890 if reason and reason != 'suspend':
1891 sst = self.readDom('xend/shutdown_start_time')
1892 now = time.time()
1893 if sst:
1894 self.shutdownStartTime = float(sst)
1895 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1896 else:
1897 self.shutdownStartTime = now
1898 self.storeDom('xend/shutdown_start_time', now)
1899 timeout = SHUTDOWN_TIMEOUT
1901 log.trace(
1902 "Scheduling refreshShutdown on domain %d in %ds.",
1903 self.domid, timeout)
1904 threading.Timer(timeout, self.refreshShutdown).start()
1906 return True
1910 # Public Attributes for the VM
1914 def getDomid(self):
1915 return self.domid
1917 def getStubdomDomid(self):
1918 dom_list = xstransact.List('/local/domain')
1919 for d in dom_list:
1920 target = xstransact.Read('/local/domain/' + d + '/target')
1921 if target is not None and int(target) is self.domid :
1922 return int(d)
1923 return None
1925 def setName(self, name, to_store = True):
1926 self._checkName(name)
1927 self.info['name_label'] = name
1928 if to_store:
1929 self.storeVm("name", name)
1931 def getName(self):
1932 return self.info['name_label']
1934 def getDomainPath(self):
1935 return self.dompath
1937 def getShutdownReason(self):
1938 return self.readDom('control/shutdown')
1940 def getStorePort(self):
1941 """For use only by image.py and XendCheckpoint.py."""
1942 return self.store_port
1944 def getConsolePort(self):
1945 """For use only by image.py and XendCheckpoint.py"""
1946 return self.console_port
1948 def getFeatures(self):
1949 """For use only by image.py."""
1950 return self.info['features']
1952 def getVCpuCount(self):
1953 return self.info['VCPUs_max']
1955 def getVCpuAvail(self):
1956 return self.info['vcpu_avail']
1958 def setVCpuCount(self, vcpus):
1959 def vcpus_valid(n):
1960 if vcpus <= 0:
1961 raise XendError('Zero or less VCPUs is invalid')
1962 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1963 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1964 vcpus_valid(vcpus)
1966 self.info['vcpu_avail'] = (1 << vcpus) - 1
1967 if self.domid >= 0:
1968 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1969 self._writeDom(self._vcpuDomDetails())
1970 self.info['VCPUs_live'] = vcpus
1971 else:
1972 if self.info['VCPUs_max'] > vcpus:
1973 # decreasing
1974 del self.info['cpus'][vcpus:]
1975 elif self.info['VCPUs_max'] < vcpus:
1976 # increasing
1977 for c in range(self.info['VCPUs_max'], vcpus):
1978 self.info['cpus'].append(list())
1979 self.info['VCPUs_max'] = vcpus
1980 xen.xend.XendDomain.instance().managed_config_save(self)
1981 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1982 vcpus)
1984 def getMemoryTarget(self):
1985 """Get this domain's target memory size, in KB."""
1986 return self.info['memory_dynamic_max'] / 1024
1988 def getMemoryMaximum(self):
1989 """Get this domain's maximum memory size, in KB."""
1990 # remember, info now stores memory in bytes
1991 return self.info['memory_static_max'] / 1024
1993 def getResume(self):
1994 return str(self._resume)
1996 def setResume(self, isresume):
1997 self._resume = isresume
1999 def getCpus(self):
2000 return self.info['cpus']
2002 def setCpus(self, cpumap):
2003 self.info['cpus'] = cpumap
2005 def getCap(self):
2006 return self.info['vcpus_params']['cap']
2008 def setCap(self, cpu_cap):
2009 self.info['vcpus_params']['cap'] = cpu_cap
2011 def getWeight(self):
2012 return self.info['vcpus_params']['weight']
2014 def setWeight(self, cpu_weight):
2015 self.info['vcpus_params']['weight'] = cpu_weight
2017 def getRestartCount(self):
2018 return self._readVm('xend/restart_count')
2020 def refreshShutdown(self, xeninfo = None):
2021 """ Checks the domain for whether a shutdown is required.
2023 Called from XendDomainInfo and also image.py for HVM images.
2024 """
2026 # If set at the end of this method, a restart is required, with the
2027 # given reason. This restart has to be done out of the scope of
2028 # refresh_shutdown_lock.
2029 restart_reason = None
2031 self.refresh_shutdown_lock.acquire()
2032 try:
2033 if xeninfo is None:
2034 xeninfo = dom_get(self.domid)
2035 if xeninfo is None:
2036 # The domain no longer exists. This will occur if we have
2037 # scheduled a timer to check for shutdown timeouts and the
2038 # shutdown succeeded. It will also occur if someone
2039 # destroys a domain beneath us. We clean up the domain,
2040 # just in case, but we can't clean up the VM, because that
2041 # VM may have migrated to a different domain on this
2042 # machine.
2043 self.cleanupDomain()
2044 self._stateSet(DOM_STATE_HALTED)
2045 return
2047 if xeninfo['dying']:
2048 # Dying means that a domain has been destroyed, but has not
2049 # yet been cleaned up by Xen. This state could persist
2050 # indefinitely if, for example, another domain has some of its
2051 # pages mapped. We might like to diagnose this problem in the
2052 # future, but for now all we do is make sure that it's not us
2053 # holding the pages, by calling cleanupDomain. We can't
2054 # clean up the VM, as above.
2055 self.cleanupDomain()
2056 self._stateSet(DOM_STATE_SHUTDOWN)
2057 return
2059 elif xeninfo['crashed']:
2060 if self.readDom('xend/shutdown_completed'):
2061 # We've seen this shutdown already, but we are preserving
2062 # the domain for debugging. Leave it alone.
2063 return
2065 log.warn('Domain has crashed: name=%s id=%d.',
2066 self.info['name_label'], self.domid)
2067 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
2069 restart_reason = 'crash'
2070 self._stateSet(DOM_STATE_HALTED)
2072 elif xeninfo['shutdown']:
2073 self._stateSet(DOM_STATE_SHUTDOWN)
2074 if self.readDom('xend/shutdown_completed'):
2075 # We've seen this shutdown already, but we are preserving
2076 # the domain for debugging. Leave it alone.
2077 return
2079 else:
2080 reason = shutdown_reason(xeninfo['shutdown_reason'])
2082 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
2083 self.info['name_label'], self.domid, reason)
2084 self._writeVm(LAST_SHUTDOWN_REASON, reason)
2086 self._clearRestart()
2088 if reason == 'suspend':
2089 self._stateSet(DOM_STATE_SUSPENDED)
2090 # Don't destroy the domain. XendCheckpoint will do
2091 # this once it has finished. However, stop watching
2092 # the VM path now, otherwise we will end up with one
2093 # watch for the old domain, and one for the new.
2094 self._unwatchVm()
2095 elif reason in ('poweroff', 'reboot'):
2096 restart_reason = reason
2097 else:
2098 self.destroy()
2100 elif self.dompath is None:
2101 # We have yet to manage to call introduceDomain on this
2102 # domain. This can happen if a restore is in progress, or has
2103 # failed. Ignore this domain.
2104 pass
2105 else:
2106 # Domain is alive. If we are shutting it down, log a message
2107 # if it seems unresponsive.
2108 if xeninfo['paused']:
2109 self._stateSet(DOM_STATE_PAUSED)
2110 else:
2111 self._stateSet(DOM_STATE_RUNNING)
2113 if self.shutdownStartTime:
2114 timeout = (SHUTDOWN_TIMEOUT - time.time() +
2115 self.shutdownStartTime)
2116 if (timeout < 0 and not self.readDom('xend/unresponsive')):
2117 log.info(
2118 "Domain shutdown timeout expired: name=%s id=%s",
2119 self.info['name_label'], self.domid)
2120 self.storeDom('xend/unresponsive', 'True')
2121 finally:
2122 self.refresh_shutdown_lock.release()
2124 if restart_reason and not self.restart_in_progress:
2125 self.restart_in_progress = True
2126 threading.Thread(target = self._maybeRestart,
2127 args = (restart_reason,)).start()
2131 # Restart functions - handling whether we come back up on shutdown.
2134 def _clearRestart(self):
2135 self._removeDom("xend/shutdown_start_time")
2137 def _maybeDumpCore(self, reason):
2138 if reason == 'crash':
2139 if xoptions.get_enable_dump() or self.get_on_crash() \
2140 in ['coredump_and_destroy', 'coredump_and_restart']:
2141 try:
2142 self.dumpCore()
2143 except XendError:
2144 # This error has been logged -- there's nothing more
2145 # we can do in this context.
2146 pass
2148 def _maybeRestart(self, reason):
2149 # Before taking configured action, dump core if configured to do so.
2151 self._maybeDumpCore(reason)
2153 # Dispatch to the correct method based upon the configured on_{reason}
2154 # behaviour.
2155 actions = {"destroy" : self.destroy,
2156 "restart" : self._restart,
2157 "preserve" : self._preserve,
2158 "rename-restart" : self._renameRestart,
2159 "coredump-destroy" : self.destroy,
2160 "coredump-restart" : self._restart}
2162 action_conf = {
2163 'poweroff': 'actions_after_shutdown',
2164 'reboot': 'actions_after_reboot',
2165 'crash': 'actions_after_crash',
2168 action_target = self.info.get(action_conf.get(reason))
2169 func = actions.get(action_target, None)
2170 if func and callable(func):
2171 func()
2172 else:
2173 self.destroy() # default to destroy
2175 def _renameRestart(self):
2176 self._restart(True)
2178 def _restart(self, rename = False):
2179 """Restart the domain after it has exited.
2181 @param rename True if the old domain is to be renamed and preserved,
2182 False if it is to be destroyed.
2183 """
2184 from xen.xend import XendDomain
2186 if self._readVm(RESTART_IN_PROGRESS):
2187 log.error('Xend failed during restart of domain %s. '
2188 'Refusing to restart to avoid loops.',
2189 str(self.domid))
2190 self.destroy()
2191 return
2193 old_domid = self.domid
2194 self._writeVm(RESTART_IN_PROGRESS, 'True')
2196 elapse = time.time() - self.info['start_time']
2197 if elapse < MINIMUM_RESTART_TIME:
2198 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2199 'Refusing to restart to avoid loops.',
2200 self.info['name_label'], elapse)
2201 self.destroy()
2202 return
2204 prev_vm_xend = self._listRecursiveVm('xend')
2205 new_dom_info = self.info
2206 try:
2207 if rename:
2208 new_dom_info = self._preserveForRestart()
2209 else:
2210 self._unwatchVm()
2211 self.destroy()
2213 # new_dom's VM will be the same as this domain's VM, except where
2214 # the rename flag has instructed us to call preserveForRestart.
2215 # In that case, it is important that we remove the
2216 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2217 # once the new one is available.
2219 new_dom = None
2220 try:
2221 new_dom = XendDomain.instance().domain_create_from_dict(
2222 new_dom_info)
2223 for x in prev_vm_xend[0][1]:
2224 new_dom._writeVm('xend/%s' % x[0], x[1])
2225 new_dom.waitForDevices()
2226 new_dom.unpause()
2227 rst_cnt = new_dom._readVm('xend/restart_count')
2228 rst_cnt = int(rst_cnt) + 1
2229 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2230 new_dom._removeVm(RESTART_IN_PROGRESS)
2231 except:
2232 if new_dom:
2233 new_dom._removeVm(RESTART_IN_PROGRESS)
2234 new_dom.destroy()
2235 else:
2236 self._removeVm(RESTART_IN_PROGRESS)
2237 raise
2238 except:
2239 log.exception('Failed to restart domain %s.', str(old_domid))
2241 def _preserveForRestart(self):
2242 """Preserve a domain that has been shut down, by giving it a new UUID,
2243 cloning the VM details, and giving it a new name. This allows us to
2244 keep this domain for debugging, but restart a new one in its place
2245 preserving the restart semantics (name and UUID preserved).
2246 """
2248 new_uuid = uuid.createString()
2249 new_name = 'Domain-%s' % new_uuid
2250 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2251 self.info['name_label'], self.domid, self.info['uuid'],
2252 new_name, new_uuid)
2253 self._unwatchVm()
2254 self._releaseDevices()
2255 # Remove existing vm node in xenstore
2256 self._removeVm()
2257 new_dom_info = self.info.copy()
2258 new_dom_info['name_label'] = self.info['name_label']
2259 new_dom_info['uuid'] = self.info['uuid']
2260 self.info['name_label'] = new_name
2261 self.info['uuid'] = new_uuid
2262 self.vmpath = XS_VMROOT + new_uuid
2263 # Write out new vm node to xenstore
2264 self._storeVmDetails()
2265 self._preserve()
2266 return new_dom_info
2269 def _preserve(self):
2270 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2271 self.domid)
2272 self._unwatchVm()
2273 self.storeDom('xend/shutdown_completed', 'True')
2274 self._stateSet(DOM_STATE_HALTED)
2277 # Debugging ..
2280 def dumpCore(self, corefile = None):
2281 """Create a core dump for this domain.
2283 @raise: XendError if core dumping failed.
2284 """
2286 if not corefile:
2287 # To prohibit directory traversal
2288 based_name = os.path.basename(self.info['name_label'])
2290 coredir = "/var/xen/dump/%s" % (based_name)
2291 if not os.path.exists(coredir):
2292 try:
2293 mkdir.parents(coredir, stat.S_IRWXU)
2294 except Exception, ex:
2295 log.error("Cannot create directory: %s" % str(ex))
2297 if not os.path.isdir(coredir):
2298 # Use former directory to dump core
2299 coredir = '/var/xen/dump'
2301 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2302 corefile = "%s/%s-%s.%s.core" % (coredir, this_time,
2303 self.info['name_label'], self.domid)
2305 if os.path.isdir(corefile):
2306 raise XendError("Cannot dump core in a directory: %s" %
2307 corefile)
2309 try:
2310 try:
2311 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2312 xc.domain_dumpcore(self.domid, corefile)
2313 except RuntimeError, ex:
2314 corefile_incomp = corefile+'-incomplete'
2315 try:
2316 os.rename(corefile, corefile_incomp)
2317 except:
2318 pass
2320 log.error("core dump failed: id = %s name = %s: %s",
2321 self.domid, self.info['name_label'], str(ex))
2322 raise XendError("Failed to dump core: %s" % str(ex))
2323 finally:
2324 self._removeVm(DUMPCORE_IN_PROGRESS)
2327 # Device creation/deletion functions
2330 def _createDevice(self, deviceClass, devConfig):
2331 return self.getDeviceController(deviceClass).createDevice(devConfig)
2333 def _waitForDevice(self, deviceClass, devid):
2334 return self.getDeviceController(deviceClass).waitForDevice(devid)
2336 def _waitForDeviceUUID(self, dev_uuid):
2337 deviceClass, config = self.info['devices'].get(dev_uuid)
2338 self._waitForDevice(deviceClass, config['devid'])
2340 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2341 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2342 devid, backpath)
2344 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2345 return self.getDeviceController(deviceClass).reconfigureDevice(
2346 devid, devconfig)
2348 def _createDevices(self):
2349 """Create the devices for a vm.
2351 @raise: VmError for invalid devices
2352 """
2353 if self.image:
2354 self.image.prepareEnvironment()
2356 vscsi_uuidlist = {}
2357 vscsi_devidlist = []
2358 ordered_refs = self.info.ordered_device_refs()
2359 for dev_uuid in ordered_refs:
2360 devclass, config = self.info['devices'][dev_uuid]
2361 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2362 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2363 dev_uuid = config.get('uuid')
2365 if devclass == 'pci':
2366 self.pci_dev_check_assignability_and_do_FLR(config)
2368 if devclass != 'pci' or not self.info.is_hvm() :
2369 devid = self._createDevice(devclass, config)
2371 # store devid in XendConfig for caching reasons
2372 if dev_uuid in self.info['devices']:
2373 self.info['devices'][dev_uuid][1]['devid'] = devid
2375 elif devclass == 'vscsi':
2376 vscsi_config = config.get('devs', [])[0]
2377 devid = vscsi_config.get('devid', '')
2378 dev_uuid = config.get('uuid')
2379 vscsi_uuidlist[devid] = dev_uuid
2380 vscsi_devidlist.append(devid)
2382 #It is necessary to sorted it for /dev/sdxx in guest.
2383 if len(vscsi_uuidlist) > 0:
2384 vscsi_devidlist.sort()
2385 for vscsiid in vscsi_devidlist:
2386 dev_uuid = vscsi_uuidlist[vscsiid]
2387 devclass, config = self.info['devices'][dev_uuid]
2388 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2389 dev_uuid = config.get('uuid')
2390 devid = self._createDevice(devclass, config)
2391 # store devid in XendConfig for caching reasons
2392 if dev_uuid in self.info['devices']:
2393 self.info['devices'][dev_uuid][1]['devid'] = devid
2396 if self.image:
2397 self.image.createDeviceModel()
2399 #if have pass-through devs, need the virtual pci slots info from qemu
2400 self.pci_device_configure_boot()
2402 def _releaseDevices(self, suspend = False):
2403 """Release all domain's devices. Nothrow guarantee."""
2404 if self.image:
2405 try:
2406 log.debug("Destroying device model")
2407 self.image.destroyDeviceModel()
2408 except Exception, e:
2409 log.exception("Device model destroy failed %s" % str(e))
2410 else:
2411 log.debug("No device model")
2413 log.debug("Releasing devices")
2414 t = xstransact("%s/device" % self.vmpath)
2415 try:
2416 for devclass in XendDevices.valid_devices():
2417 for dev in t.list(devclass):
2418 try:
2419 log.debug("Removing %s", dev);
2420 self.destroyDevice(devclass, dev, False);
2421 except:
2422 # Log and swallow any exceptions in removal --
2423 # there's nothing more we can do.
2424 log.exception("Device release failed: %s; %s; %s",
2425 self.info['name_label'],
2426 devclass, dev)
2427 finally:
2428 t.abort()
2430 def getDeviceController(self, name):
2431 """Get the device controller for this domain, and if it
2432 doesn't exist, create it.
2434 @param name: device class name
2435 @type name: string
2436 @rtype: subclass of DevController
2437 """
2438 if name not in self._deviceControllers:
2439 devController = XendDevices.make_controller(name, self)
2440 if not devController:
2441 raise XendError("Unknown device type: %s" % name)
2442 self._deviceControllers[name] = devController
2444 return self._deviceControllers[name]
2447 # Migration functions (public)
2450 def testMigrateDevices(self, network, dst):
2451 """ Notify all device about intention of migration
2452 @raise: XendError for a device that cannot be migrated
2453 """
2454 for (n, c) in self.info.all_devices_sxpr():
2455 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2456 if rc != 0:
2457 raise XendError("Device of type '%s' refuses migration." % n)
2459 def migrateDevices(self, network, dst, step, domName=''):
2460 """Notify the devices about migration
2461 """
2462 ctr = 0
2463 try:
2464 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2465 self.migrateDevice(dev_type, dev_conf, network, dst,
2466 step, domName)
2467 ctr = ctr + 1
2468 except:
2469 for dev_type, dev_conf in self.info.all_devices_sxpr():
2470 if ctr == 0:
2471 step = step - 1
2472 ctr = ctr - 1
2473 self._recoverMigrateDevice(dev_type, dev_conf, network,
2474 dst, step, domName)
2475 raise
2477 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2478 step, domName=''):
2479 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2480 network, dst, step, domName)
2482 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2483 dst, step, domName=''):
2484 return self.getDeviceController(deviceClass).recover_migrate(
2485 deviceConfig, network, dst, step, domName)
2487 def setChangeHomeServer(self, chs):
2488 if chs is not None:
2489 self.info['change_home_server'] = bool(chs)
2490 else:
2491 if self.info.has_key('change_home_server'):
2492 del self.info['change_home_server']
2495 ## private:
2497 def _constructDomain(self):
2498 """Construct the domain.
2500 @raise: VmError on error
2501 """
2503 log.debug('XendDomainInfo.constructDomain')
2505 self.shutdownStartTime = None
2506 self.restart_in_progress = False
2508 hap = 0
2509 hvm = self.info.is_hvm()
2510 if hvm:
2511 hap = self.info.is_hap()
2512 info = xc.xeninfo()
2513 if 'hvm' not in info['xen_caps']:
2514 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2515 "supported by your CPU and enabled in your "
2516 "BIOS?")
2518 # Hack to pre-reserve some memory for initial domain creation.
2519 # There is an implicit memory overhead for any domain creation. This
2520 # overhead is greater for some types of domain than others. For
2521 # example, an x86 HVM domain will have a default shadow-pagetable
2522 # allocation of 4MB. We free up 16MB here to be on the safe side.
2523 balloon.free(16*1024, self) # 16MB should be plenty
2525 ssidref = 0
2526 if security.on() == xsconstants.XS_POLICY_USE:
2527 ssidref = security.calc_dom_ssidref_from_info(self.info)
2528 if security.has_authorization(ssidref) == False:
2529 raise VmError("VM is not authorized to run.")
2531 s3_integrity = 0
2532 if self.info.has_key('s3_integrity'):
2533 s3_integrity = self.info['s3_integrity']
2535 oos = self.info['platform'].get('oos', 1)
2536 oos_off = 1 - int(oos)
2538 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
2540 try:
2541 self.domid = xc.domain_create(
2542 domid = 0,
2543 ssidref = ssidref,
2544 handle = uuid.fromString(self.info['uuid']),
2545 flags = flags,
2546 target = self.info.target())
2547 except Exception, e:
2548 # may get here if due to ACM the operation is not permitted
2549 if security.on() == xsconstants.XS_POLICY_ACM:
2550 raise VmError('Domain in conflict set with running domain?')
2551 log.exception(e)
2553 if not self.domid or self.domid < 0:
2554 failmsg = 'Creating domain failed: name=%s' % self.info['name_label']
2555 if self.domid:
2556 failmsg += ', error=%i' % int(self.domid)
2557 raise VmError(failmsg)
2559 self.dompath = GetDomainPath(self.domid)
2561 self._recreateDom()
2563 # Set TSC mode of domain
2564 tsc_mode = self.info["platform"].get("tsc_mode")
2565 if arch.type == "x86" and tsc_mode is not None:
2566 xc.domain_set_tsc_info(self.domid, int(tsc_mode))
2568 # Set timer configuration of domain
2569 timer_mode = self.info["platform"].get("timer_mode")
2570 if hvm and timer_mode is not None:
2571 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2572 long(timer_mode))
2574 # Set Viridian interface configuration of domain
2575 viridian = self.info["platform"].get("viridian")
2576 if arch.type == "x86" and hvm and viridian is not None:
2577 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2579 # If nomigrate is set, disable migration
2580 nomigrate = self.info["platform"].get("nomigrate")
2581 if nomigrate is not None and long(nomigrate) != 0:
2582 xc.domain_disable_migrate(self.domid)
2584 # Optionally enable virtual HPET
2585 hpet = self.info["platform"].get("hpet")
2586 if hvm and hpet is not None:
2587 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2588 long(hpet))
2590 # Optionally enable periodic vpt aligning
2591 vpt_align = self.info["platform"].get("vpt_align")
2592 if hvm and vpt_align is not None:
2593 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2594 long(vpt_align))
2596 # Set maximum number of vcpus in domain
2597 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2599 # Check for cpu_{cap|weight} validity for credit scheduler
2600 if XendNode.instance().xenschedinfo() == 'credit':
2601 cap = self.getCap()
2602 weight = self.getWeight()
2604 assert type(weight) == int
2605 assert type(cap) == int
2607 if weight < 1 or weight > 65535:
2608 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2610 if cap < 0 or cap > self.getVCpuCount() * 100:
2611 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2612 (self.getVCpuCount() * 100))
2614 # Test whether the devices can be assigned with VT-d
2615 self.info.update_platform_pci()
2616 pci = self.info["platform"].get("pci")
2617 pci_str = ''
2618 if pci and len(pci) > 0:
2619 pci = map(lambda x: x[0:4], pci) # strip options
2620 pci_str = str(pci)
2622 # This test is done for both pv and hvm guest.
2623 for p in pci:
2624 pci_name = '%04x:%02x:%02x.%x' % \
2625 (parse_hex(p[0]), parse_hex(p[1]), parse_hex(p[2]), parse_hex(p[3]))
2626 try:
2627 pci_device = PciDevice(parse_pci_name(pci_name))
2628 except Exception, e:
2629 raise VmError("pci: failed to locate device and "+
2630 "parse its resources - "+str(e))
2631 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
2632 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
2633 %pci_device.name)
2634 if pci_name in get_all_assigned_pci_devices():
2635 raise VmError("failed to assign device %s that has"
2636 " already been assigned to other domain." % pci_name)
2638 if hvm and pci_str != '':
2639 bdf = xc.test_assign_device(0, pci_str)
2640 if bdf != 0:
2641 if bdf == -1:
2642 raise VmError("failed to assign device: maybe the platform"
2643 " doesn't support VT-d, or VT-d isn't enabled"
2644 " properly?")
2645 bus = (bdf >> 16) & 0xff
2646 devfn = (bdf >> 8) & 0xff
2647 dev = (devfn >> 3) & 0x1f
2648 func = devfn & 0x7
2649 raise VmError("failed to assign device %02x:%02x.%x: maybe it has"
2650 " already been assigned to other domain, or maybe"
2651 " it doesn't exist." % (bus, dev, func))
2653 # register the domain in the list
2654 from xen.xend import XendDomain
2655 XendDomain.instance().add_domain(self)
2657 def _introduceDomain(self):
2658 assert self.domid is not None
2659 assert self.store_mfn is not None
2660 assert self.store_port is not None
2662 try:
2663 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2664 except RuntimeError, exn:
2665 raise XendError(str(exn))
2667 def _setTarget(self, target):
2668 assert self.domid is not None
2670 try:
2671 SetTarget(self.domid, target)
2672 self.storeDom('target', target)
2673 except RuntimeError, exn:
2674 raise XendError(str(exn))
2677 def _setCPUAffinity(self):
2678 """ Repin domain vcpus if a restricted cpus list is provided.
2679 Returns the choosen node number.
2680 """
2682 def has_cpus():
2683 if self.info['cpus'] is not None:
2684 for c in self.info['cpus']:
2685 if c:
2686 return True
2687 return False
2689 def has_cpumap():
2690 if self.info.has_key('vcpus_params'):
2691 for k, v in self.info['vcpus_params'].items():
2692 if k.startswith('cpumap'):
2693 return True
2694 return False
2696 index = 0
2697 if has_cpumap():
2698 for v in range(0, self.info['VCPUs_max']):
2699 if self.info['vcpus_params'].has_key('cpumap%i' % v):
2700 cpumask = map(int, self.info['vcpus_params']['cpumap%i' % v].split(','))
2701 xc.vcpu_setaffinity(self.domid, v, cpumask)
2702 elif has_cpus():
2703 for v in range(0, self.info['VCPUs_max']):
2704 if self.info['cpus'][v]:
2705 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2706 else:
2707 def find_relaxed_node(node_list):
2708 import sys
2709 nr_nodes = info['max_node_id']+1
2710 if node_list is None:
2711 node_list = range(0, nr_nodes)
2712 nodeload = [0]
2713 nodeload = nodeload * nr_nodes
2714 from xen.xend import XendDomain
2715 doms = XendDomain.instance().list('all')
2716 for dom in filter (lambda d: d.domid != self.domid, doms):
2717 cpuinfo = dom.getVCPUInfo()
2718 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2719 if sxp.child_value(vcpu, 'online') == 0: continue
2720 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2721 for i in range(0, nr_nodes):
2722 node_cpumask = info['node_to_cpu'][i]
2723 for j in node_cpumask:
2724 if j in cpumap:
2725 nodeload[i] += 1
2726 break
2727 for i in range(0, nr_nodes):
2728 if len(info['node_to_cpu'][i]) == 0:
2729 nodeload[i] += 8
2730 else:
2731 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2732 if i not in node_list:
2733 nodeload[i] += 8
2734 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
2736 info = xc.physinfo()
2737 if info['nr_nodes'] > 1:
2738 node_memory_list = info['node_to_memory']
2739 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2740 candidate_node_list = []
2741 for i in range(0, info['max_node_id']+1):
2742 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2743 candidate_node_list.append(i)
2744 best_node = find_relaxed_node(candidate_node_list)[0]
2745 cpumask = info['node_to_cpu'][best_node]
2746 best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['max_node_id']+1)))
2747 for node_idx in best_nodes:
2748 if len(cpumask) >= self.info['VCPUs_max']:
2749 break
2750 cpumask = cpumask + info['node_to_cpu'][node_idx]
2751 log.debug("allocating additional NUMA node %d", node_idx)
2752 for v in range(0, self.info['VCPUs_max']):
2753 xc.vcpu_setaffinity(self.domid, v, cpumask)
2754 return index
2756 def _freeDMAmemory(self, node):
2758 # If we are PV and have PCI devices the guest will
2759 # turn on a SWIOTLB. The SWIOTLB _MUST_ be located in the DMA32
2760 # zone (under 4GB). To do so, we need to balloon down Dom0 to where
2761 # there is enough (64MB) memory under the 4GB mark. This balloon-ing
2762 # might take more memory out than just 64MB thought :-(
2763 if not self.info.is_pv_and_has_pci():
2764 return
2766 retries = 2000
2767 ask_for_mem = 0
2768 need_mem = 0
2769 try:
2770 while (retries > 0):
2771 physinfo = xc.physinfo()
2772 free_mem = physinfo['free_memory']
2773 max_node_id = physinfo['max_node_id']
2774 node_to_dma32_mem = physinfo['node_to_dma32_mem']
2775 if (node > max_node_id):
2776 return
2777 # Extra 2MB above 64GB seems to do the trick.
2778 need_mem = 64 * 1024 + 2048 - node_to_dma32_mem[node]
2779 # our starting point. We ask just for the difference to
2780 # be have an extra 64MB under 4GB.
2781 ask_for_mem = max(need_mem, ask_for_mem);
2782 if (need_mem > 0):
2783 log.debug('_freeDMAmemory (%d) Need %dKiB DMA memory. '
2784 'Asking for %dKiB', retries, need_mem,
2785 ask_for_mem)
2787 balloon.free(ask_for_mem, self)
2788 ask_for_mem = ask_for_mem + 2048
2789 else:
2790 # OK. We got enough DMA memory.
2791 break
2792 retries = retries - 1
2793 except:
2794 # This is best-try after all.
2795 need_mem = max(1, need_mem)
2796 pass
2798 if (need_mem > 0):
2799 log.warn('We tried our best to balloon down DMA memory to '
2800 'accomodate your PV guest. We need %dKiB extra memory.',
2801 need_mem)
2803 def _setSchedParams(self):
2804 if XendNode.instance().xenschedinfo() == 'credit':
2805 from xen.xend import XendDomain
2806 XendDomain.instance().domain_sched_credit_set(self.getDomid(),
2807 self.getWeight(),
2808 self.getCap())
2810 def _initDomain(self):
2811 log.debug('XendDomainInfo.initDomain: %s %s',
2812 self.domid,
2813 self.info['vcpus_params']['weight'])
2815 self._configureBootloader()
2817 try:
2818 self.image = image.create(self, self.info)
2820 # repin domain vcpus if a restricted cpus list is provided
2821 # this is done prior to memory allocation to aide in memory
2822 # distribution for NUMA systems.
2823 node = self._setCPUAffinity()
2825 # Set scheduling parameters.
2826 self._setSchedParams()
2828 # Use architecture- and image-specific calculations to determine
2829 # the various headrooms necessary, given the raw configured
2830 # values. maxmem, memory, and shadow are all in KiB.
2831 # but memory_static_max etc are all stored in bytes now.
2832 memory = self.image.getRequiredAvailableMemory(
2833 self.info['memory_dynamic_max'] / 1024)
2834 maxmem = self.image.getRequiredAvailableMemory(
2835 self.info['memory_static_max'] / 1024)
2836 shadow = self.image.getRequiredShadowMemory(
2837 self.info['shadow_memory'] * 1024,
2838 self.info['memory_static_max'] / 1024)
2840 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2841 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2842 # takes MiB and we must not round down and end up under-providing.
2843 shadow = ((shadow + 1023) / 1024) * 1024
2845 # set memory limit
2846 xc.domain_setmaxmem(self.domid, maxmem)
2848 vtd_mem = 0
2849 info = xc.physinfo()
2850 if 'hvm_directio' in info['virt_caps']:
2851 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2852 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2853 # Round vtd_mem up to a multiple of a MiB.
2854 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2856 self.guest_bitsize = self.image.getBitSize()
2857 # Make sure there's enough RAM available for the domain
2858 balloon.free(memory + shadow + vtd_mem, self)
2860 # Set up the shadow memory
2861 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2862 self.info['shadow_memory'] = shadow_cur
2864 # machine address size
2865 if self.info.has_key('machine_address_size'):
2866 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2867 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2869 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2870 log.debug("_initDomain: suppressing spurious page faults")
2871 xc.domain_suppress_spurious_page_faults(self.domid)
2873 self._createChannels()
2875 channel_details = self.image.createImage()
2877 self.store_mfn = channel_details['store_mfn']
2878 if 'console_mfn' in channel_details:
2879 self.console_mfn = channel_details['console_mfn']
2880 if 'notes' in channel_details:
2881 self.info.set_notes(channel_details['notes'])
2882 if 'native_protocol' in channel_details:
2883 self.native_protocol = channel_details['native_protocol'];
2885 self._introduceDomain()
2886 if self.info.target():
2887 self._setTarget(self.info.target())
2889 self._freeDMAmemory(node)
2891 self._createDevices()
2893 self.image.cleanupTmpImages()
2895 self.info['start_time'] = time.time()
2897 self._stateSet(DOM_STATE_RUNNING)
2898 except VmError, exn:
2899 log.exception("XendDomainInfo.initDomain: exception occurred")
2900 if self.image:
2901 self.image.cleanupTmpImages()
2902 raise exn
2903 except RuntimeError, exn:
2904 log.exception("XendDomainInfo.initDomain: exception occurred")
2905 if self.image:
2906 self.image.cleanupTmpImages()
2907 raise VmError(str(exn))
2910 def cleanupDomain(self):
2911 """Cleanup domain resources; release devices. Idempotent. Nothrow
2912 guarantee."""
2914 self.refresh_shutdown_lock.acquire()
2915 try:
2916 self.unwatchShutdown()
2917 self._releaseDevices()
2918 bootloader_tidy(self)
2920 if self.image:
2921 self.image = None
2923 try:
2924 self._removeDom()
2925 except:
2926 log.exception("Removing domain path failed.")
2928 self._stateSet(DOM_STATE_HALTED)
2929 self.domid = None # Do not push into _stateSet()!
2930 finally:
2931 self.refresh_shutdown_lock.release()
2934 def unwatchShutdown(self):
2935 """Remove the watch on the domain's control/shutdown node, if any.
2936 Idempotent. Nothrow guarantee. Expects to be protected by the
2937 refresh_shutdown_lock."""
2939 try:
2940 try:
2941 if self.shutdownWatch:
2942 self.shutdownWatch.unwatch()
2943 finally:
2944 self.shutdownWatch = None
2945 except:
2946 log.exception("Unwatching control/shutdown failed.")
2948 def waitForShutdown(self):
2949 self.state_updated.acquire()
2950 try:
2951 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2952 self.state_updated.wait(timeout=1.0)
2953 finally:
2954 self.state_updated.release()
2956 def waitForSuspend(self):
2957 """Wait for the guest to respond to a suspend request by
2958 shutting down. If the guest hasn't re-written control/shutdown
2959 after a certain amount of time, it's obviously not listening and
2960 won't suspend, so we give up. HVM guests with no PV drivers
2961 should already be shutdown.
2962 """
2963 state = "suspend"
2964 nr_tries = 60
2966 self.state_updated.acquire()
2967 try:
2968 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2969 self.state_updated.wait(1.0)
2970 if state == "suspend":
2971 if nr_tries == 0:
2972 msg = ('Timeout waiting for domain %s to suspend'
2973 % self.domid)
2974 self._writeDom('control/shutdown', '')
2975 raise XendError(msg)
2976 state = self.readDom('control/shutdown')
2977 nr_tries -= 1
2978 finally:
2979 self.state_updated.release()
2982 # TODO: recategorise - called from XendCheckpoint
2985 def completeRestore(self, store_mfn, console_mfn):
2987 log.debug("XendDomainInfo.completeRestore")
2989 self.store_mfn = store_mfn
2990 self.console_mfn = console_mfn
2992 self._introduceDomain()
2993 self.image = image.create(self, self.info)
2994 if self.image:
2995 self.image.createDeviceModel(True)
2996 self._storeDomDetails()
2997 self._registerWatches()
2998 self.refreshShutdown()
3000 log.debug("XendDomainInfo.completeRestore done")
3003 def _endRestore(self):
3004 self.setResume(False)
3007 # VM Destroy
3010 def _prepare_phantom_paths(self):
3011 # get associated devices to destroy
3012 # build list of phantom devices to be removed after normal devices
3013 plist = []
3014 if self.domid is not None:
3015 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
3016 try:
3017 for dev in t.list():
3018 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
3019 % (self.dompath, dev))
3020 if backend_phantom_vbd is not None:
3021 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
3022 % backend_phantom_vbd)
3023 plist.append(backend_phantom_vbd)
3024 plist.append(frontend_phantom_vbd)
3025 finally:
3026 t.abort()
3027 return plist
3029 def _cleanup_phantom_devs(self, plist):
3030 # remove phantom devices
3031 if not plist == []:
3032 time.sleep(2)
3033 for paths in plist:
3034 if paths.find('backend') != -1:
3035 # Modify online status /before/ updating state (latter is watched by
3036 # drivers, so this ordering avoids a race).
3037 xstransact.Write(paths, 'online', "0")
3038 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
3039 # force
3040 xstransact.Remove(paths)
3042 def destroy(self):
3043 """Cleanup VM and destroy domain. Nothrow guarantee."""
3045 if self.domid is None:
3046 return
3047 from xen.xend import XendDomain
3048 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
3050 paths = self._prepare_phantom_paths()
3052 if self.dompath is not None:
3053 try:
3054 xc.domain_destroy_hook(self.domid)
3055 xc.domain_pause(self.domid)
3056 do_FLR(self.domid, self.info.is_hvm())
3057 xc.domain_destroy(self.domid)
3058 for state in DOM_STATES_OLD:
3059 self.info[state] = 0
3060 self._stateSet(DOM_STATE_HALTED)
3061 except:
3062 log.exception("XendDomainInfo.destroy: domain destruction failed.")
3064 XendDomain.instance().remove_domain(self)
3065 self.cleanupDomain()
3067 if self.info.is_hvm() or self.guest_bitsize != 32:
3068 if self.alloc_mem:
3069 import MemoryPool
3070 log.debug("%s KiB need to add to Memory pool" %self.alloc_mem)
3071 MemoryPool.instance().increase_memory(self.alloc_mem)
3073 self._cleanup_phantom_devs(paths)
3074 self._cleanupVm()
3076 if ("transient" in self.info["other_config"] and \
3077 bool(self.info["other_config"]["transient"])) or \
3078 ("change_home_server" in self.info and \
3079 bool(self.info["change_home_server"])):
3080 XendDomain.instance().domain_delete_by_dominfo(self)
3083 def resetDomain(self):
3084 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
3086 old_domid = self.domid
3087 prev_vm_xend = self._listRecursiveVm('xend')
3088 new_dom_info = self.info
3089 try:
3090 self._unwatchVm()
3091 self.destroy()
3093 new_dom = None
3094 try:
3095 from xen.xend import XendDomain
3096 new_dom_info['domid'] = None
3097 new_dom = XendDomain.instance().domain_create_from_dict(
3098 new_dom_info)
3099 for x in prev_vm_xend[0][1]:
3100 new_dom._writeVm('xend/%s' % x[0], x[1])
3101 new_dom.waitForDevices()
3102 new_dom.unpause()
3103 except:
3104 if new_dom:
3105 new_dom.destroy()
3106 raise
3107 except:
3108 log.exception('Failed to reset domain %s.', str(old_domid))
3111 def resumeDomain(self):
3112 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
3114 # resume a suspended domain (e.g. after live checkpoint, or after
3115 # a later error during save or migate); checks that the domain
3116 # is currently suspended first so safe to call from anywhere
3118 xeninfo = dom_get(self.domid)
3119 if xeninfo is None:
3120 return
3121 if not xeninfo['shutdown']:
3122 return
3123 reason = shutdown_reason(xeninfo['shutdown_reason'])
3124 if reason != 'suspend':
3125 return
3127 try:
3128 # could also fetch a parsed note from xenstore
3129 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
3130 if not fast:
3131 self._releaseDevices()
3132 self.testDeviceComplete()
3133 self.testvifsComplete()
3134 log.debug("XendDomainInfo.resumeDomain: devices released")
3136 self._resetChannels()
3138 self._removeDom('control/shutdown')
3139 self._removeDom('device-misc/vif/nextDeviceID')
3141 self._createChannels()
3142 self._introduceDomain()
3143 self._storeDomDetails()
3145 self._createDevices()
3146 log.debug("XendDomainInfo.resumeDomain: devices created")
3148 xc.domain_resume(self.domid, fast)
3149 ResumeDomain(self.domid)
3150 except:
3151 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
3152 self.image.resumeDeviceModel()
3153 log.debug("XendDomainInfo.resumeDomain: completed")
3157 # Channels for xenstore and console
3160 def _createChannels(self):
3161 """Create the channels to the domain.
3162 """
3163 self.store_port = self._createChannel()
3164 self.console_port = self._createChannel()
3167 def _createChannel(self):
3168 """Create an event channel to the domain.
3169 """
3170 try:
3171 if self.domid != None:
3172 return xc.evtchn_alloc_unbound(domid = self.domid,
3173 remote_dom = 0)
3174 except:
3175 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
3176 raise
3178 def _resetChannels(self):
3179 """Reset all event channels in the domain.
3180 """
3181 try:
3182 if self.domid != None:
3183 return xc.evtchn_reset(dom = self.domid)
3184 except:
3185 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
3186 raise
3190 # Bootloader configuration
3193 def _configureBootloader(self):
3194 """Run the bootloader if we're configured to do so."""
3196 blexec = self.info['PV_bootloader']
3197 bootloader_args = self.info['PV_bootloader_args']
3198 kernel = self.info['PV_kernel']
3199 ramdisk = self.info['PV_ramdisk']
3200 args = self.info['PV_args']
3201 boot = self.info['HVM_boot_policy']
3203 if boot:
3204 # HVM booting.
3205 pass
3206 elif not blexec and kernel:
3207 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
3208 # will be picked up by image.py.
3209 pass
3210 else:
3211 # Boot using bootloader
3212 if not blexec or blexec == 'pygrub':
3213 blexec = auxbin.pathTo('pygrub')
3215 blcfg = None
3216 disks = [x for x in self.info['vbd_refs']
3217 if self.info['devices'][x][1]['bootable']]
3219 if not disks:
3220 msg = "Had a bootloader specified, but no disks are bootable"
3221 log.error(msg)
3222 raise VmError(msg)
3224 devinfo = self.info['devices'][disks[0]]
3225 devtype = devinfo[0]
3226 disk = devinfo[1]['uname']
3228 fn = blkdev_uname_to_file(disk)
3230 # If this is a drbd volume, check if we need to activate it
3231 if disk.find(":") != -1:
3232 (disktype, diskname) = disk.split(':', 1)
3233 if disktype == 'drbd':
3234 (drbdadmstdin, drbdadmstdout) = os.popen2(["/sbin/drbdadm", "state", diskname])
3235 (state, junk) = drbdadmstdout.readline().split('/', 1)
3236 if state == 'Secondary':
3237 os.system('/sbin/drbdadm primary ' + diskname)
3239 taptype = blkdev_uname_to_taptype(disk)
3240 mounted = devtype in ['tap', 'tap2'] and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
3241 if mounted:
3242 # This is a file, not a device. pygrub can cope with a
3243 # file if it's raw, but if it's QCOW or other such formats
3244 # used through blktap, then we need to mount it first.
3246 log.info("Mounting %s on %s." %
3247 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3249 vbd = {
3250 'mode': 'RO',
3251 'device': BOOTLOADER_LOOPBACK_DEVICE,
3254 from xen.xend import XendDomain
3255 dom0 = XendDomain.instance().privilegedDomain()
3256 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
3257 fn = BOOTLOADER_LOOPBACK_DEVICE
3259 try:
3260 blcfg = bootloader(blexec, fn, self, False,
3261 bootloader_args, kernel, ramdisk, args)
3262 finally:
3263 if mounted:
3264 log.info("Unmounting %s from %s." %
3265 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3267 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
3269 if blcfg is None:
3270 msg = "Had a bootloader specified, but can't find disk"
3271 log.error(msg)
3272 raise VmError(msg)
3274 self.info.update_with_image_sxp(blcfg, True)
3278 # VM Functions
3281 def _readVMDetails(self, params):
3282 """Read the specified parameters from the store.
3283 """
3284 try:
3285 return self._gatherVm(*params)
3286 except ValueError:
3287 # One of the int/float entries in params has a corresponding store
3288 # entry that is invalid. We recover, because older versions of
3289 # Xend may have put the entry there (memory/target, for example),
3290 # but this is in general a bad situation to have reached.
3291 log.exception(
3292 "Store corrupted at %s! Domain %d's configuration may be "
3293 "affected.", self.vmpath, self.domid)
3294 return []
3296 def _cleanupVm(self):
3297 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
3299 self._unwatchVm()
3301 try:
3302 self._removeVm()
3303 except:
3304 log.exception("Removing VM path failed.")
3307 def checkLiveMigrateMemory(self):
3308 """ Make sure there's enough memory to migrate this domain """
3309 overhead_kb = 0
3310 if arch.type == "x86":
3311 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3312 # the minimum that Xen would allocate if no value were given.
3313 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3314 (self.info['memory_static_max'] / 1024 / 1024) * 4
3315 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3316 # The domain might already have some shadow memory
3317 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3318 if overhead_kb > 0:
3319 balloon.free(overhead_kb, self)
3321 def _unwatchVm(self):
3322 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3323 guarantee."""
3324 try:
3325 try:
3326 if self.vmWatch:
3327 self.vmWatch.unwatch()
3328 finally:
3329 self.vmWatch = None
3330 except:
3331 log.exception("Unwatching VM path failed.")
3333 def testDeviceComplete(self):
3334 """ For Block IO migration safety we must ensure that
3335 the device has shutdown correctly, i.e. all blocks are
3336 flushed to disk
3337 """
3338 start = time.time()
3339 while True:
3340 test = 0
3341 diff = time.time() - start
3342 vbds = self.getDeviceController('vbd').deviceIDs()
3343 taps = self.getDeviceController('tap').deviceIDs()
3344 tap2s = self.getDeviceController('tap2').deviceIDs()
3345 for i in vbds + taps + tap2s:
3346 test = 1
3347 log.info("Dev %s still active, looping...", i)
3348 time.sleep(0.1)
3350 if test == 0:
3351 break
3352 if diff >= MIGRATE_TIMEOUT:
3353 log.info("Dev still active but hit max loop timeout")
3354 break
3356 def testvifsComplete(self):
3357 """ In case vifs are released and then created for the same
3358 domain, we need to wait the device shut down.
3359 """
3360 start = time.time()
3361 while True:
3362 test = 0
3363 diff = time.time() - start
3364 for i in self.getDeviceController('vif').deviceIDs():
3365 test = 1
3366 log.info("Dev %s still active, looping...", i)
3367 time.sleep(0.1)
3369 if test == 0:
3370 break
3371 if diff >= MIGRATE_TIMEOUT:
3372 log.info("Dev still active but hit max loop timeout")
3373 break
3375 def _storeVmDetails(self):
3376 to_store = {}
3378 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3379 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3380 if self._infoIsSet(info_key):
3381 to_store[key] = str(self.info[info_key])
3383 if self._infoIsSet("static_memory_min"):
3384 to_store["memory"] = str(self.info["static_memory_min"])
3385 if self._infoIsSet("static_memory_max"):
3386 to_store["maxmem"] = str(self.info["static_memory_max"])
3388 image_sxpr = self.info.image_sxpr()
3389 if image_sxpr:
3390 to_store['image'] = sxp.to_string(image_sxpr)
3392 if not self._readVm('xend/restart_count'):
3393 to_store['xend/restart_count'] = str(0)
3395 log.debug("Storing VM details: %s", scrub_password(to_store))
3397 self._writeVm(to_store)
3398 self._setVmPermissions()
3400 def _setVmPermissions(self):
3401 """Allow the guest domain to read its UUID. We don't allow it to
3402 access any other entry, for security."""
3403 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3404 { 'dom' : self.domid,
3405 'read' : True,
3406 'write' : False })
3409 # Utility functions
3412 def __getattr__(self, name):
3413 if name == "state":
3414 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3415 log.warn("".join(traceback.format_stack()))
3416 return self._stateGet()
3417 else:
3418 raise AttributeError(name)
3420 def __setattr__(self, name, value):
3421 if name == "state":
3422 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3423 log.warn("".join(traceback.format_stack()))
3424 self._stateSet(value)
3425 else:
3426 self.__dict__[name] = value
3428 def _stateSet(self, state):
3429 self.state_updated.acquire()
3430 try:
3431 # TODO Not sure this is correct...
3432 # _stateGet is live now. Why not fire event
3433 # even when it hasn't changed?
3434 if self._stateGet() != state:
3435 self.state_updated.notifyAll()
3436 import XendAPI
3437 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3438 'power_state')
3439 finally:
3440 self.state_updated.release()
3442 def _stateGet(self):
3443 # Lets try and reconsitute the state from xc
3444 # first lets try and get the domain info
3445 # from xc - this will tell us if the domain
3446 # exists
3447 info = dom_get(self.getDomid())
3448 if info is None or info['shutdown']:
3449 # We are either HALTED or SUSPENDED
3450 # check saved image exists
3451 from xen.xend import XendDomain
3452 managed_config_path = \
3453 XendDomain.instance()._managed_check_point_path( \
3454 self.get_uuid())
3455 if os.path.exists(managed_config_path):
3456 return XEN_API_VM_POWER_STATE_SUSPENDED
3457 else:
3458 return XEN_API_VM_POWER_STATE_HALTED
3459 elif info['crashed']:
3460 # Crashed
3461 return XEN_API_VM_POWER_STATE_CRASHED
3462 else:
3463 # We are either RUNNING or PAUSED
3464 if info['paused']:
3465 return XEN_API_VM_POWER_STATE_PAUSED
3466 else:
3467 return XEN_API_VM_POWER_STATE_RUNNING
3469 def _infoIsSet(self, name):
3470 return name in self.info and self.info[name] is not None
3472 def _checkName(self, name):
3473 """Check if a vm name is valid. Valid names contain alphabetic
3474 characters, digits, or characters in '_-.:+'.
3475 The same name cannot be used for more than one vm at the same time.
3477 @param name: name
3478 @raise: VmError if invalid
3479 """
3480 from xen.xend import XendDomain
3482 if name is None or name == '':
3483 raise VmError('Missing VM Name')
3485 if not re.search(r'^[A-Za-z0-9_\-\.\:\+]+$', name):
3486 raise VmError('Invalid VM Name')
3488 dom = XendDomain.instance().domain_lookup_nr(name)
3489 if dom and dom.info['uuid'] != self.info['uuid']:
3490 raise VmError("VM name '%s' already exists%s" %
3491 (name,
3492 dom.domid is not None and
3493 (" as domain %s" % str(dom.domid)) or ""))
3496 def update(self, info = None, refresh = True, transaction = None):
3497 """Update with info from xc.domain_getinfo().
3498 """
3499 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3500 str(self.domid))
3502 if not info:
3503 info = dom_get(self.domid)
3504 if not info:
3505 return
3507 if info["maxmem_kb"] < 0:
3508 info["maxmem_kb"] = XendNode.instance() \
3509 .physinfo_dict()['total_memory'] * 1024
3511 # make sure state is reset for info
3512 # TODO: we should eventually get rid of old_dom_states
3514 self.info.update_config(info)
3515 self._update_consoles(transaction)
3517 if refresh:
3518 self.refreshShutdown(info)
3520 log.trace("XendDomainInfo.update done on domain %s: %s",
3521 str(self.domid), self.info)
3523 def sxpr(self, ignore_store = False, legacy_only = True):
3524 result = self.info.to_sxp(domain = self,
3525 ignore_devices = ignore_store,
3526 legacy_only = legacy_only)
3528 return result
3530 # Xen API
3531 # ----------------------------------------------------------------
3533 def get_uuid(self):
3534 dom_uuid = self.info.get('uuid')
3535 if not dom_uuid: # if it doesn't exist, make one up
3536 dom_uuid = uuid.createString()
3537 self.info['uuid'] = dom_uuid
3538 return dom_uuid
3540 def get_memory_static_max(self):
3541 return self.info.get('memory_static_max', 0)
3542 def get_memory_static_min(self):
3543 return self.info.get('memory_static_min', 0)
3544 def get_memory_dynamic_max(self):
3545 return self.info.get('memory_dynamic_max', 0)
3546 def get_memory_dynamic_min(self):
3547 return self.info.get('memory_dynamic_min', 0)
3549 # only update memory-related config values if they maintain sanity
3550 def _safe_set_memory(self, key, newval):
3551 oldval = self.info.get(key, 0)
3552 try:
3553 self.info[key] = newval
3554 self.info._memory_sanity_check()
3555 except Exception, ex:
3556 self.info[key] = oldval
3557 raise
3559 def set_memory_static_max(self, val):
3560 self._safe_set_memory('memory_static_max', val)
3561 def set_memory_static_min(self, val):
3562 self._safe_set_memory('memory_static_min', val)
3563 def set_memory_dynamic_max(self, val):
3564 self._safe_set_memory('memory_dynamic_max', val)
3565 def set_memory_dynamic_min(self, val):
3566 self._safe_set_memory('memory_dynamic_min', val)
3568 def get_vcpus_params(self):
3569 if self.getDomid() is None:
3570 return self.info['vcpus_params']
3572 retval = xc.sched_credit_domain_get(self.getDomid())
3573 return retval
3574 def get_power_state(self):
3575 return XEN_API_VM_POWER_STATE[self._stateGet()]
3576 def get_platform(self):
3577 return self.info.get('platform', {})
3578 def get_pci_bus(self):
3579 return self.info.get('pci_bus', '')
3580 def get_tools_version(self):
3581 return self.info.get('tools_version', {})
3582 def get_metrics(self):
3583 return self.metrics.get_uuid();
3586 def get_security_label(self, xspol=None):
3587 import xen.util.xsm.xsm as security
3588 label = security.get_security_label(self, xspol)
3589 return label
3591 def set_security_label(self, seclab, old_seclab, xspol=None,
3592 xspol_old=None):
3593 """
3594 Set the security label of a domain from its old to
3595 a new value.
3596 @param seclab New security label formatted in the form
3597 <policy type>:<policy name>:<vm label>
3598 @param old_seclab The current security label that the
3599 VM must have.
3600 @param xspol An optional policy under which this
3601 update should be done. If not given,
3602 then the current active policy is used.
3603 @param xspol_old The old policy; only to be passed during
3604 the updating of a policy
3605 @return Returns return code, a string with errors from
3606 the hypervisor's operation, old label of the
3607 domain
3608 """
3609 rc = 0
3610 errors = ""
3611 old_label = ""
3612 new_ssidref = 0
3613 domid = self.getDomid()
3614 res_labels = None
3615 is_policy_update = (xspol_old != None)
3617 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3619 state = self._stateGet()
3620 # Relabel only HALTED or RUNNING or PAUSED domains
3621 if domid != 0 and \
3622 state not in \
3623 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3624 DOM_STATE_SUSPENDED ]:
3625 log.warn("Relabeling domain not possible in state '%s'" %
3626 DOM_STATES[state])
3627 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3629 # Remove security label. Works only for halted or suspended domains
3630 if not seclab or seclab == "":
3631 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3632 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3634 if self.info.has_key('security_label'):
3635 old_label = self.info['security_label']
3636 # Check label against expected one.
3637 if old_label != old_seclab:
3638 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3639 del self.info['security_label']
3640 xen.xend.XendDomain.instance().managed_config_save(self)
3641 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3643 tmp = seclab.split(":")
3644 if len(tmp) != 3:
3645 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3646 typ, policy, label = tmp
3648 poladmin = XSPolicyAdminInstance()
3649 if not xspol:
3650 xspol = poladmin.get_policy_by_name(policy)
3652 try:
3653 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3655 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3656 #if domain is running or paused try to relabel in hypervisor
3657 if not xspol:
3658 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3660 if typ != xspol.get_type_name() or \
3661 policy != xspol.get_name():
3662 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3664 if typ == xsconstants.ACM_POLICY_ID:
3665 new_ssidref = xspol.vmlabel_to_ssidref(label)
3666 if new_ssidref == xsconstants.INVALID_SSIDREF:
3667 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3669 # Check that all used resources are accessible under the
3670 # new label
3671 if not is_policy_update and \
3672 not security.resources_compatible_with_vmlabel(xspol,
3673 self, label):
3674 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3676 #Check label against expected one. Can only do this
3677 # if the policy hasn't changed underneath in the meantime
3678 if xspol_old == None:
3679 old_label = self.get_security_label()
3680 if old_label != old_seclab:
3681 log.info("old_label != old_seclab: %s != %s" %
3682 (old_label, old_seclab))
3683 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3685 # relabel domain in the hypervisor
3686 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3687 log.info("rc from relabeling in HV: %d" % rc)
3688 else:
3689 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3691 if rc == 0:
3692 # HALTED, RUNNING or PAUSED
3693 if domid == 0:
3694 if xspol:
3695 self.info['security_label'] = seclab
3696 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3697 else:
3698 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3699 else:
3700 if self.info.has_key('security_label'):
3701 old_label = self.info['security_label']
3702 # Check label against expected one, unless wildcard
3703 if old_label != old_seclab:
3704 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3706 self.info['security_label'] = seclab
3708 try:
3709 xen.xend.XendDomain.instance().managed_config_save(self)
3710 except:
3711 pass
3712 return (rc, errors, old_label, new_ssidref)
3713 finally:
3714 xen.xend.XendDomain.instance().policy_lock.release()
3716 def get_on_shutdown(self):
3717 after_shutdown = self.info.get('actions_after_shutdown')
3718 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3719 return XEN_API_ON_NORMAL_EXIT[-1]
3720 return after_shutdown
3722 def get_on_reboot(self):
3723 after_reboot = self.info.get('actions_after_reboot')
3724 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3725 return XEN_API_ON_NORMAL_EXIT[-1]
3726 return after_reboot
3728 def get_on_suspend(self):
3729 # TODO: not supported
3730 after_suspend = self.info.get('actions_after_suspend')
3731 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3732 return XEN_API_ON_NORMAL_EXIT[-1]
3733 return after_suspend
3735 def get_on_crash(self):
3736 after_crash = self.info.get('actions_after_crash')
3737 if not after_crash or after_crash not in \
3738 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3739 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3740 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3742 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3743 """ Get's a device configuration either from XendConfig or
3744 from the DevController.
3746 @param dev_class: device class, either, 'vbd' or 'vif'
3747 @param dev_uuid: device UUID
3749 @rtype: dictionary
3750 """
3751 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3753 # shortcut if the domain isn't started because
3754 # the devcontrollers will have no better information
3755 # than XendConfig.
3756 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3757 XEN_API_VM_POWER_STATE_SUSPENDED):
3758 if dev_config:
3759 return copy.deepcopy(dev_config)
3760 return None
3762 # instead of using dev_class, we use the dev_type
3763 # that is from XendConfig.
3764 controller = self.getDeviceController(dev_type)
3765 if not controller:
3766 return None
3768 all_configs = controller.getAllDeviceConfigurations()
3769 if not all_configs:
3770 return None
3772 updated_dev_config = copy.deepcopy(dev_config)
3773 for _devid, _devcfg in all_configs.items():
3774 if _devcfg.get('uuid') == dev_uuid:
3775 updated_dev_config.update(_devcfg)
3776 updated_dev_config['id'] = _devid
3777 return updated_dev_config
3779 return updated_dev_config
3781 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3782 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3783 if not config:
3784 return {}
3786 config['VM'] = self.get_uuid()
3788 if dev_class == 'vif':
3789 if not config.has_key('name'):
3790 config['name'] = config.get('vifname', '')
3791 if not config.has_key('MAC'):
3792 config['MAC'] = config.get('mac', '')
3793 if not config.has_key('type'):
3794 config['type'] = 'paravirtualised'
3795 if not config.has_key('device'):
3796 devid = config.get('id')
3797 if devid != None:
3798 config['device'] = 'eth%s' % devid
3799 else:
3800 config['device'] = ''
3802 if not config.has_key('network'):
3803 try:
3804 bridge = config.get('bridge', None)
3805 if bridge is None:
3806 from xen.util import Brctl
3807 if_to_br = dict([(i,b)
3808 for (b,ifs) in Brctl.get_state().items()
3809 for i in ifs])
3810 vifname = "vif%s.%s" % (self.getDomid(),
3811 config.get('id'))
3812 bridge = if_to_br.get(vifname, None)
3813 config['network'] = \
3814 XendNode.instance().bridge_to_network(
3815 config.get('bridge')).get_uuid()
3816 except Exception:
3817 log.exception('bridge_to_network')
3818 # Ignore this for now -- it may happen if the device
3819 # has been specified using the legacy methods, but at
3820 # some point we're going to have to figure out how to
3821 # handle that properly.
3823 config['MTU'] = 1500 # TODO
3825 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3826 xennode = XendNode.instance()
3827 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3828 config['io_read_kbs'] = rx_bps/1024
3829 config['io_write_kbs'] = tx_bps/1024
3830 rx, tx = xennode.get_vif_stat(self.domid, devid)
3831 config['io_total_read_kbs'] = rx/1024
3832 config['io_total_write_kbs'] = tx/1024
3833 else:
3834 config['io_read_kbs'] = 0.0
3835 config['io_write_kbs'] = 0.0
3836 config['io_total_read_kbs'] = 0.0
3837 config['io_total_write_kbs'] = 0.0
3839 config['security_label'] = config.get('security_label', '')
3841 if dev_class == 'vbd':
3843 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3844 controller = self.getDeviceController(dev_class)
3845 devid, _1, _2 = controller.getDeviceDetails(config)
3846 xennode = XendNode.instance()
3847 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3848 config['io_read_kbs'] = rd_blkps
3849 config['io_write_kbs'] = wr_blkps
3850 else:
3851 config['io_read_kbs'] = 0.0
3852 config['io_write_kbs'] = 0.0
3854 config['VDI'] = config.get('VDI', '')
3855 config['device'] = config.get('dev', '')
3856 if config['device'].startswith('ioemu:'):
3857 _, vbd_device = config['device'].split(':', 1)
3858 config['device'] = vbd_device
3859 if ':' in config['device']:
3860 vbd_name, vbd_type = config['device'].split(':', 1)
3861 config['device'] = vbd_name
3862 if vbd_type == 'cdrom':
3863 config['type'] = XEN_API_VBD_TYPE[0]
3864 else:
3865 config['type'] = XEN_API_VBD_TYPE[1]
3867 config['driver'] = 'paravirtualised' # TODO
3868 config['image'] = config.get('uname', '')
3870 if config.get('mode', 'r') == 'r':
3871 config['mode'] = 'RO'
3872 else:
3873 config['mode'] = 'RW'
3875 if dev_class == 'vtpm':
3876 if not config.has_key('type'):
3877 config['type'] = 'paravirtualised' # TODO
3878 if not config.has_key('backend'):
3879 config['backend'] = "00000000-0000-0000-0000-000000000000"
3881 return config
3883 def get_dev_property(self, dev_class, dev_uuid, field):
3884 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3885 try:
3886 return config[field]
3887 except KeyError:
3888 raise XendError('Invalid property for device: %s' % field)
3890 def set_dev_property(self, dev_class, dev_uuid, field, value):
3891 self.info['devices'][dev_uuid][1][field] = value
3893 def get_vcpus_util(self):
3894 vcpu_util = {}
3895 xennode = XendNode.instance()
3896 if 'VCPUs_max' in self.info and self.domid != None:
3897 for i in range(0, self.info['VCPUs_max']):
3898 util = xennode.get_vcpu_util(self.domid, i)
3899 vcpu_util[str(i)] = util
3901 return vcpu_util
3903 def get_consoles(self):
3904 return self.info.get('console_refs', [])
3906 def get_vifs(self):
3907 return self.info.get('vif_refs', [])
3909 def get_vbds(self):
3910 return self.info.get('vbd_refs', [])
3912 def get_vtpms(self):
3913 return self.info.get('vtpm_refs', [])
3915 def get_dpcis(self):
3916 return XendDPCI.get_by_VM(self.info.get('uuid'))
3918 def get_dscsis(self):
3919 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3921 def get_dscsi_HBAs(self):
3922 return XendDSCSI_HBA.get_by_VM(self.info.get('uuid'))
3924 def create_vbd(self, xenapi_vbd, vdi_image_path):
3925 """Create a VBD using a VDI from XendStorageRepository.
3927 @param xenapi_vbd: vbd struct from the Xen API
3928 @param vdi_image_path: VDI UUID
3929 @rtype: string
3930 @return: uuid of the device
3931 """
3932 xenapi_vbd['image'] = vdi_image_path
3933 if vdi_image_path.startswith('tap'):
3934 dev_uuid = self.info.device_add('tap2', cfg_xenapi = xenapi_vbd)
3935 else:
3936 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3938 if not dev_uuid:
3939 raise XendError('Failed to create device')
3941 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3942 XEN_API_VM_POWER_STATE_PAUSED):
3943 _, config = self.info['devices'][dev_uuid]
3945 if vdi_image_path.startswith('tap'):
3946 dev_control = self.getDeviceController('tap2')
3947 else:
3948 dev_control = self.getDeviceController('vbd')
3950 try:
3951 devid = dev_control.createDevice(config)
3952 dev_type = self.getBlockDeviceClass(devid)
3953 self._waitForDevice(dev_type, devid)
3954 self.info.device_update(dev_uuid,
3955 cfg_xenapi = {'devid': devid})
3956 except Exception, exn:
3957 log.exception(exn)
3958 del self.info['devices'][dev_uuid]
3959 self.info['vbd_refs'].remove(dev_uuid)
3960 raise
3962 return dev_uuid
3964 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3965 """Create a VBD using a VDI from XendStorageRepository.
3967 @param xenapi_vbd: vbd struct from the Xen API
3968 @param vdi_image_path: VDI UUID
3969 @rtype: string
3970 @return: uuid of the device
3971 """
3972 xenapi_vbd['image'] = vdi_image_path
3973 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3974 if not dev_uuid:
3975 raise XendError('Failed to create device')
3977 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3978 _, config = self.info['devices'][dev_uuid]
3979 config['devid'] = self.getDeviceController('tap').createDevice(config)
3981 return config['devid']
3983 def create_vif(self, xenapi_vif):
3984 """Create VIF device from the passed struct in Xen API format.
3986 @param xenapi_vif: Xen API VIF Struct.
3987 @rtype: string
3988 @return: UUID
3989 """
3990 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3991 if not dev_uuid:
3992 raise XendError('Failed to create device')
3994 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3995 XEN_API_VM_POWER_STATE_PAUSED):
3997 _, config = self.info['devices'][dev_uuid]
3998 dev_control = self.getDeviceController('vif')
4000 try:
4001 devid = dev_control.createDevice(config)
4002 dev_control.waitForDevice(devid)
4003 self.info.device_update(dev_uuid,
4004 cfg_xenapi = {'devid': devid})
4005 except Exception, exn:
4006 log.exception(exn)
4007 del self.info['devices'][dev_uuid]
4008 self.info['vif_refs'].remove(dev_uuid)
4009 raise
4011 return dev_uuid
4013 def create_vtpm(self, xenapi_vtpm):
4014 """Create a VTPM device from the passed struct in Xen API format.
4016 @return: uuid of the device
4017 @rtype: string
4018 """
4020 if self._stateGet() not in (DOM_STATE_HALTED,):
4021 raise VmError("Can only add vTPM to a halted domain.")
4022 if self.get_vtpms() != []:
4023 raise VmError('Domain already has a vTPM.')
4024 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
4025 if not dev_uuid:
4026 raise XendError('Failed to create device')
4028 return dev_uuid
4030 def create_console(self, xenapi_console):
4031 """ Create a console device from a Xen API struct.
4033 @return: uuid of device
4034 @rtype: string
4035 """
4036 if self._stateGet() not in (DOM_STATE_HALTED,):
4037 raise VmError("Can only add console to a halted domain.")
4039 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
4040 if not dev_uuid:
4041 raise XendError('Failed to create device')
4043 return dev_uuid
4045 def set_console_other_config(self, console_uuid, other_config):
4046 self.info.console_update(console_uuid, 'other_config', other_config)
4048 def create_dpci(self, xenapi_pci):
4049 """Create pci device from the passed struct in Xen API format.
4051 @param xenapi_pci: DPCI struct from Xen API
4052 @rtype: bool
4053 #@rtype: string
4054 @return: True if successfully created device
4055 #@return: UUID
4056 """
4058 dpci_uuid = uuid.createString()
4060 dpci_opts = []
4061 opts_dict = xenapi_pci.get('options')
4062 for k in opts_dict.keys():
4063 dpci_opts.append([k, opts_dict[k]])
4064 opts_sxp = pci_opts_list_to_sxp(dpci_opts)
4066 # Convert xenapi to sxp
4067 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
4069 dev_sxp = ['dev',
4070 ['domain', '0x%02x' % ppci.get_domain()],
4071 ['bus', '0x%02x' % ppci.get_bus()],
4072 ['slot', '0x%02x' % ppci.get_slot()],
4073 ['func', '0x%1x' % ppci.get_func()],
4074 ['vdevfn', '0x%02x' % xenapi_pci.get('hotplug_slot')],
4075 ['key', xenapi_pci['key']],
4076 ['uuid', dpci_uuid]]
4077 dev_sxp = sxp.merge(dev_sxp, opts_sxp)
4079 target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
4081 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4083 old_pci_sxp = self._getDeviceInfo_pci(0)
4085 if old_pci_sxp is None:
4086 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
4087 if not dev_uuid:
4088 raise XendError('Failed to create device')
4090 else:
4091 new_pci_sxp = ['pci']
4092 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
4093 new_pci_sxp.append(existing_dev)
4094 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
4096 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4097 self.info.device_update(dev_uuid, new_pci_sxp)
4099 xen.xend.XendDomain.instance().managed_config_save(self)
4101 else:
4102 try:
4103 self.device_configure(target_pci_sxp)
4105 except Exception, exn:
4106 raise XendError('Failed to create device')
4108 return dpci_uuid
4110 def create_dscsi(self, xenapi_dscsi):
4111 """Create scsi device from the passed struct in Xen API format.
4113 @param xenapi_dscsi: DSCSI struct from Xen API
4114 @rtype: string
4115 @return: UUID
4116 """
4118 dscsi_uuid = uuid.createString()
4120 # Convert xenapi to sxp
4121 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
4122 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
4123 target_vscsi_sxp = \
4124 ['vscsi',
4125 ['dev',
4126 ['devid', devid],
4127 ['p-devname', pscsi.get_dev_name()],
4128 ['p-dev', pscsi.get_physical_HCTL()],
4129 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
4130 ['state', xenbusState['Initialising']],
4131 ['uuid', dscsi_uuid]
4132 ],
4133 ['feature-host', 0]
4136 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4138 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4140 if cur_vscsi_sxp is None:
4141 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
4142 if not dev_uuid:
4143 raise XendError('Failed to create device')
4145 else:
4146 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4147 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
4148 new_vscsi_sxp.append(existing_dev)
4149 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
4151 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4152 self.info.device_update(dev_uuid, new_vscsi_sxp)
4154 xen.xend.XendDomain.instance().managed_config_save(self)
4156 else:
4157 try:
4158 self.device_configure(target_vscsi_sxp)
4159 except Exception, exn:
4160 log.exception('create_dscsi: %s', exn)
4161 raise XendError('Failed to create device')
4163 return dscsi_uuid
4165 def create_dscsi_HBA(self, xenapi_dscsi):
4166 """Create scsi devices from the passed struct in Xen API format.
4168 @param xenapi_dscsi: DSCSI_HBA struct from Xen API
4169 @rtype: string
4170 @return: UUID
4171 """
4173 dscsi_HBA_uuid = uuid.createString()
4175 # Convert xenapi to sxp
4176 feature_host = xenapi_dscsi.get('assignment_mode', 'HOST') == 'HOST' and 1 or 0
4177 target_vscsi_sxp = \
4178 ['vscsi',
4179 ['feature-host', feature_host],
4180 ['uuid', dscsi_HBA_uuid],
4182 pscsi_HBA = XendAPIStore.get(xenapi_dscsi.get('PSCSI_HBA'), 'PSCSI_HBA')
4183 devid = pscsi_HBA.get_physical_host()
4184 for pscsi_uuid in pscsi_HBA.get_PSCSIs():
4185 pscsi = XendAPIStore.get(pscsi_uuid, 'PSCSI')
4186 pscsi_HCTL = pscsi.get_physical_HCTL()
4187 dscsi_uuid = uuid.createString()
4188 dev = \
4189 ['dev',
4190 ['devid', devid],
4191 ['p-devname', pscsi.get_dev_name()],
4192 ['p-dev', pscsi_HCTL],
4193 ['v-dev', pscsi_HCTL],
4194 ['state', xenbusState['Initialising']],
4195 ['uuid', dscsi_uuid]
4197 target_vscsi_sxp.append(dev)
4199 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4200 if not self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp):
4201 raise XendError('Failed to create device')
4202 xen.xend.XendDomain.instance().managed_config_save(self)
4203 else:
4204 try:
4205 self.device_configure(target_vscsi_sxp)
4206 except Exception, exn:
4207 log.exception('create_dscsi_HBA: %s', exn)
4208 raise XendError('Failed to create device')
4210 return dscsi_HBA_uuid
4213 def change_vdi_of_vbd(self, xenapi_vbd, vdi_image_path):
4214 """Change current VDI with the new VDI.
4216 @param xenapi_vbd: vbd struct from the Xen API
4217 @param vdi_image_path: path of VDI
4218 """
4219 dev_uuid = xenapi_vbd['uuid']
4220 if dev_uuid not in self.info['devices']:
4221 raise XendError('Device does not exist')
4223 # Convert xenapi to sxp
4224 if vdi_image_path.startswith('tap'):
4225 dev_class = 'tap'
4226 else:
4227 dev_class = 'vbd'
4228 dev_sxp = [
4229 dev_class,
4230 ['uuid', dev_uuid],
4231 ['uname', vdi_image_path],
4232 ['dev', '%s:cdrom' % xenapi_vbd['device']],
4233 ['mode', 'r'],
4234 ['VDI', xenapi_vbd['VDI']]
4237 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4238 XEN_API_VM_POWER_STATE_PAUSED):
4239 self.device_configure(dev_sxp)
4240 else:
4241 self.info.device_update(dev_uuid, dev_sxp)
4244 def destroy_device_by_uuid(self, dev_type, dev_uuid):
4245 if dev_uuid not in self.info['devices']:
4246 raise XendError('Device does not exist')
4248 try:
4249 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4250 XEN_API_VM_POWER_STATE_PAUSED):
4251 _, config = self.info['devices'][dev_uuid]
4252 devid = config.get('devid')
4253 if devid != None:
4254 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
4255 else:
4256 raise XendError('Unable to get devid for device: %s:%s' %
4257 (dev_type, dev_uuid))
4258 finally:
4259 del self.info['devices'][dev_uuid]
4260 self.info['%s_refs' % dev_type].remove(dev_uuid)
4262 def destroy_vbd(self, dev_uuid):
4263 self.destroy_device_by_uuid('vbd', dev_uuid)
4265 def destroy_vif(self, dev_uuid):
4266 self.destroy_device_by_uuid('vif', dev_uuid)
4268 def destroy_vtpm(self, dev_uuid):
4269 self.destroy_device_by_uuid('vtpm', dev_uuid)
4271 def destroy_dpci(self, dev_uuid):
4273 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
4274 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
4276 old_pci_sxp = self._getDeviceInfo_pci(0)
4277 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4278 target_dev = None
4279 new_pci_sxp = ['pci']
4280 for dev in sxp.children(old_pci_sxp, 'dev'):
4281 pci_dev = {}
4282 pci_dev['domain'] = sxp.child_value(dev, 'domain')
4283 pci_dev['bus'] = sxp.child_value(dev, 'bus')
4284 pci_dev['slot'] = sxp.child_value(dev, 'slot')
4285 pci_dev['func'] = sxp.child_value(dev, 'func')
4286 if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
4287 target_dev = dev
4288 else:
4289 new_pci_sxp.append(dev)
4291 if target_dev is None:
4292 raise XendError('Failed to destroy device')
4294 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
4296 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4298 self.info.device_update(dev_uuid, new_pci_sxp)
4299 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
4300 del self.info['devices'][dev_uuid]
4301 xen.xend.XendDomain.instance().managed_config_save(self)
4303 else:
4304 try:
4305 self.device_configure(target_pci_sxp)
4307 except Exception, exn:
4308 raise XendError('Failed to destroy device')
4310 def destroy_dscsi(self, dev_uuid):
4311 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
4312 devid = dscsi.get_virtual_host()
4313 vHCTL = dscsi.get_virtual_HCTL()
4314 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4315 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4317 target_dev = None
4318 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4319 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4320 if vHCTL == sxp.child_value(dev, 'v-dev'):
4321 target_dev = dev
4322 else:
4323 new_vscsi_sxp.append(dev)
4325 if target_dev is None:
4326 raise XendError('Failed to destroy device')
4328 target_dev.append(['state', xenbusState['Closing']])
4329 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
4331 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4333 self.info.device_update(dev_uuid, new_vscsi_sxp)
4334 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
4335 del self.info['devices'][dev_uuid]
4336 xen.xend.XendDomain.instance().managed_config_save(self)
4338 else:
4339 try:
4340 self.device_configure(target_vscsi_sxp)
4341 except Exception, exn:
4342 log.exception('destroy_dscsi: %s', exn)
4343 raise XendError('Failed to destroy device')
4345 def destroy_dscsi_HBA(self, dev_uuid):
4346 dscsi_HBA = XendAPIStore.get(dev_uuid, 'DSCSI_HBA')
4347 devid = dscsi_HBA.get_virtual_host()
4348 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4349 feature_host = sxp.child_value(cur_vscsi_sxp, 'feature-host')
4351 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4352 new_vscsi_sxp = ['vscsi', ['feature-host', feature_host]]
4353 self.info.device_update(dev_uuid, new_vscsi_sxp)
4354 del self.info['devices'][dev_uuid]
4355 xen.xend.XendDomain.instance().managed_config_save(self)
4356 else:
4357 # If feature_host is 1, all devices are destroyed by just
4358 # one reconfiguration.
4359 # If feature_host is 0, we should reconfigure all devices
4360 # one-by-one to destroy all devices.
4361 # See reconfigureDevice@VSCSIController.
4362 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4363 target_vscsi_sxp = [
4364 'vscsi',
4365 dev + [['state', xenbusState['Closing']]],
4366 ['feature-host', feature_host]
4368 try:
4369 self.device_configure(target_vscsi_sxp)
4370 except Exception, exn:
4371 log.exception('destroy_dscsi_HBA: %s', exn)
4372 raise XendError('Failed to destroy device')
4373 if feature_host:
4374 break
4376 def destroy_xapi_instances(self):
4377 """Destroy Xen-API instances stored in XendAPIStore.
4378 """
4379 # Xen-API classes based on XendBase have their instances stored
4380 # in XendAPIStore. Cleanup these instances here, if they are supposed
4381 # to be destroyed when the parent domain is dead.
4383 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
4384 # XendBase and there's no need to remove them from XendAPIStore.
4386 from xen.xend import XendDomain
4387 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
4388 # domain still exists.
4389 return
4391 # Destroy the VMMetrics instance.
4392 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
4393 is not None:
4394 self.metrics.destroy()
4396 # Destroy DPCI instances.
4397 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
4398 XendAPIStore.deregister(dpci_uuid, "DPCI")
4400 # Destroy DSCSI instances.
4401 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
4402 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
4404 # Destroy DSCSI_HBA instances.
4405 for dscsi_HBA_uuid in XendDSCSI_HBA.get_by_VM(self.info.get('uuid')):
4406 XendAPIStore.deregister(dscsi_HBA_uuid, "DSCSI_HBA")
4408 def has_device(self, dev_class, dev_uuid):
4409 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
4411 def __str__(self):
4412 return '<domain id=%s name=%s memory=%s state=%s>' % \
4413 (str(self.domid), self.info['name_label'],
4414 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
4416 __repr__ = __str__