debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 20932:2a775968c7a1

xend: Disallow "/" in domain names

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 03 09:45:02 2010 +0000 (2010-02-03)
parents d1efaaee441b
children d28a351f0589
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import stat
35 import traceback
36 from types import StringTypes
38 import xen.lowlevel.xc
39 from xen.util import asserts, auxbin
40 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
41 import xen.util.xsm.xsm as security
42 from xen.util import xsconstants
43 from xen.util import mkdir
44 from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
45 append_default_pci_opts, \
46 pci_dict_to_bdf_str, pci_dict_to_xc_str, \
47 pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
48 pci_dict_cmp, PCI_DEVFN, PCI_SLOT, PCI_FUNC, parse_hex
50 from xen.xend import balloon, sxp, uuid, image, arch
51 from xen.xend import XendOptions, XendNode, XendConfig
53 from xen.xend.XendConfig import scrub_password
54 from xen.xend.XendBootloader import bootloader, bootloader_tidy
55 from xen.xend.XendError import XendError, VmError
56 from xen.xend.XendDevices import XendDevices
57 from xen.xend.XendTask import XendTask
58 from xen.xend.xenstore.xstransact import xstransact, complete
59 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
60 from xen.xend.xenstore.xswatch import xswatch
61 from xen.xend.XendConstants import *
62 from xen.xend.XendAPIConstants import *
63 from xen.xend.server.DevConstants import xenbusState
64 from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString
66 from xen.xend.XendVMMetrics import XendVMMetrics
68 from xen.xend import XendAPIStore
69 from xen.xend.XendPPCI import XendPPCI
70 from xen.xend.XendDPCI import XendDPCI
71 from xen.xend.XendPSCSI import XendPSCSI
72 from xen.xend.XendDSCSI import XendDSCSI, XendDSCSI_HBA
74 MIGRATE_TIMEOUT = 30.0
75 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
77 xc = xen.lowlevel.xc.xc()
78 xoptions = XendOptions.instance()
80 log = logging.getLogger("xend.XendDomainInfo")
81 #log.setLevel(logging.TRACE)
84 def create(config):
85 """Creates and start a VM using the supplied configuration.
87 @param config: A configuration object involving lists of tuples.
88 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
90 @rtype: XendDomainInfo
91 @return: An up and running XendDomainInfo instance
92 @raise VmError: Invalid configuration or failure to start.
93 """
94 from xen.xend import XendDomain
95 domconfig = XendConfig.XendConfig(sxp_obj = config)
96 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
97 if othervm is None or othervm.domid is None:
98 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
99 if othervm is not None and othervm.domid is not None:
100 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
101 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
102 vm = XendDomainInfo(domconfig)
103 try:
104 vm.start()
105 except:
106 log.exception('Domain construction failed')
107 vm.destroy()
108 raise
110 return vm
112 def create_from_dict(config_dict):
113 """Creates and start a VM using the supplied configuration.
115 @param config_dict: An configuration dictionary.
117 @rtype: XendDomainInfo
118 @return: An up and running XendDomainInfo instance
119 @raise VmError: Invalid configuration or failure to start.
120 """
122 log.debug("XendDomainInfo.create_from_dict(%s)",
123 scrub_password(config_dict))
124 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
125 try:
126 vm.start()
127 except:
128 log.exception('Domain construction failed')
129 vm.destroy()
130 raise
131 return vm
133 def recreate(info, priv):
134 """Create the VM object for an existing domain. The domain must not
135 be dying, as the paths in the store should already have been removed,
136 and asking us to recreate them causes problems.
138 @param xeninfo: Parsed configuration
139 @type xeninfo: Dictionary
140 @param priv: Is a privileged domain (Dom 0)
141 @type priv: bool
143 @rtype: XendDomainInfo
144 @return: A up and running XendDomainInfo instance
145 @raise VmError: Invalid configuration.
146 @raise XendError: Errors with configuration.
147 """
149 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
151 assert not info['dying']
153 xeninfo = XendConfig.XendConfig(dominfo = info)
154 xeninfo['is_control_domain'] = priv
155 xeninfo['is_a_template'] = False
156 xeninfo['auto_power_on'] = False
157 domid = xeninfo['domid']
158 uuid1 = uuid.fromString(xeninfo['uuid'])
159 needs_reinitialising = False
161 dompath = GetDomainPath(domid)
162 if not dompath:
163 raise XendError('No domain path in store for existing '
164 'domain %d' % domid)
166 log.info("Recreating domain %d, UUID %s. at %s" %
167 (domid, xeninfo['uuid'], dompath))
169 # need to verify the path and uuid if not Domain-0
170 # if the required uuid and vm aren't set, then that means
171 # we need to recreate the dom with our own values
172 #
173 # NOTE: this is probably not desirable, really we should just
174 # abort or ignore, but there may be cases where xenstore's
175 # entry disappears (eg. xenstore-rm /)
176 #
177 try:
178 vmpath = xstransact.Read(dompath, "vm")
179 if not vmpath:
180 if not priv:
181 log.warn('/local/domain/%d/vm is missing. recreate is '
182 'confused, trying our best to recover' % domid)
183 needs_reinitialising = True
184 raise XendError('reinit')
186 uuid2_str = xstransact.Read(vmpath, "uuid")
187 if not uuid2_str:
188 log.warn('%s/uuid/ is missing. recreate is confused, '
189 'trying our best to recover' % vmpath)
190 needs_reinitialising = True
191 raise XendError('reinit')
193 uuid2 = uuid.fromString(uuid2_str)
194 if uuid1 != uuid2:
195 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
196 'Trying out best to recover' % domid)
197 needs_reinitialising = True
198 except XendError:
199 pass # our best shot at 'goto' in python :)
201 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
202 vmpath = vmpath)
204 if needs_reinitialising:
205 vm._recreateDom()
206 vm._removeVm()
207 vm._storeVmDetails()
208 vm._storeDomDetails()
210 vm.image = image.create(vm, vm.info)
211 vm.image.recreate()
213 vm._registerWatches()
214 vm.refreshShutdown(xeninfo)
216 # register the domain in the list
217 from xen.xend import XendDomain
218 XendDomain.instance().add_domain(vm)
220 return vm
223 def restore(config):
224 """Create a domain and a VM object to do a restore.
226 @param config: Domain SXP configuration
227 @type config: list of lists. (see C{create})
229 @rtype: XendDomainInfo
230 @return: A up and running XendDomainInfo instance
231 @raise VmError: Invalid configuration or failure to start.
232 @raise XendError: Errors with configuration.
233 """
235 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
236 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
237 resume = True)
238 try:
239 vm.resume()
240 return vm
241 except:
242 vm.destroy()
243 raise
245 def createDormant(domconfig):
246 """Create a dormant/inactive XenDomainInfo without creating VM.
247 This is for creating instances of persistent domains that are not
248 yet start.
250 @param domconfig: Parsed configuration
251 @type domconfig: XendConfig object
253 @rtype: XendDomainInfo
254 @return: A up and running XendDomainInfo instance
255 @raise XendError: Errors with configuration.
256 """
258 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
260 # domid does not make sense for non-running domains.
261 domconfig.pop('domid', None)
262 vm = XendDomainInfo(domconfig)
263 return vm
265 def domain_by_name(name):
266 """Get domain by name
268 @params name: Name of the domain
269 @type name: string
270 @return: XendDomainInfo or None
271 """
272 from xen.xend import XendDomain
273 return XendDomain.instance().domain_lookup_by_name_nr(name)
276 def shutdown_reason(code):
277 """Get a shutdown reason from a code.
279 @param code: shutdown code
280 @type code: int
281 @return: shutdown reason
282 @rtype: string
283 """
284 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
286 def dom_get(dom):
287 """Get info from xen for an existing domain.
289 @param dom: domain id
290 @type dom: int
291 @return: info or None
292 @rtype: dictionary
293 """
294 try:
295 domlist = xc.domain_getinfo(dom, 1)
296 if domlist and dom == domlist[0]['domid']:
297 return domlist[0]
298 except Exception, err:
299 # ignore missing domain
300 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
301 return None
303 from xen.xend.server.pciif import parse_pci_name, PciDevice,\
304 get_assigned_pci_devices, get_all_assigned_pci_devices
307 def do_FLR(domid, is_hvm):
308 dev_str_list = get_assigned_pci_devices(domid)
310 for dev_str in dev_str_list:
311 try:
312 dev = PciDevice(parse_pci_name(dev_str))
313 except Exception, e:
314 raise VmError("pci: failed to locate device and "+
315 "parse it's resources - "+str(e))
316 dev.do_FLR(is_hvm, xoptions.get_pci_dev_assign_strict_check())
318 class XendDomainInfo:
319 """An object represents a domain.
321 @TODO: try to unify dom and domid, they mean the same thing, but
322 xc refers to it as dom, and everywhere else, including
323 xenstore it is domid. The best way is to change xc's
324 python interface.
326 @ivar info: Parsed configuration
327 @type info: dictionary
328 @ivar domid: Domain ID (if VM has started)
329 @type domid: int or None
330 @ivar guest_bitsize: the bitsize of guest
331 @type guest_bitsize: int or None
332 @ivar alloc_mem: the memory domain allocated when booting
333 @type alloc_mem: int or None
334 @ivar vmpath: XenStore path to this VM.
335 @type vmpath: string
336 @ivar dompath: XenStore path to this Domain.
337 @type dompath: string
338 @ivar image: Reference to the VM Image.
339 @type image: xen.xend.image.ImageHandler
340 @ivar store_port: event channel to xenstored
341 @type store_port: int
342 @ivar console_port: event channel to xenconsoled
343 @type console_port: int
344 @ivar store_mfn: xenstored mfn
345 @type store_mfn: int
346 @ivar console_mfn: xenconsoled mfn
347 @type console_mfn: int
348 @ivar notes: OS image notes
349 @type notes: dictionary
350 @ivar vmWatch: reference to a watch on the xenstored vmpath
351 @type vmWatch: xen.xend.xenstore.xswatch
352 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
353 @type shutdownWatch: xen.xend.xenstore.xswatch
354 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
355 @type shutdownStartTime: float or None
356 @ivar restart_in_progress: Is a domain restart thread running?
357 @type restart_in_progress: bool
358 # @ivar state: Domain state
359 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
360 @ivar state_updated: lock for self.state
361 @type state_updated: threading.Condition
362 @ivar refresh_shutdown_lock: lock for polling shutdown state
363 @type refresh_shutdown_lock: threading.Condition
364 @ivar _deviceControllers: device controller cache for this domain
365 @type _deviceControllers: dict 'string' to DevControllers
366 """
368 def __init__(self, info, domid = None, dompath = None, augment = False,
369 priv = False, resume = False, vmpath = None):
370 """Constructor for a domain
372 @param info: parsed configuration
373 @type info: dictionary
374 @keyword domid: Set initial domain id (if any)
375 @type domid: int
376 @keyword dompath: Set initial dompath (if any)
377 @type dompath: string
378 @keyword augment: Augment given info with xenstored VM info
379 @type augment: bool
380 @keyword priv: Is a privileged domain (Dom 0)
381 @type priv: bool
382 @keyword resume: Is this domain being resumed?
383 @type resume: bool
384 """
386 self.info = info
387 if domid == None:
388 self.domid = self.info.get('domid')
389 else:
390 self.domid = domid
391 self.guest_bitsize = None
392 self.alloc_mem = None
394 maxmem = self.info.get('memory_static_max', 0)
395 memory = self.info.get('memory_dynamic_max', 0)
397 if maxmem > memory:
398 self.pod_enabled = True
399 else:
400 self.pod_enabled = False
402 #REMOVE: uuid is now generated in XendConfig
403 #if not self._infoIsSet('uuid'):
404 # self.info['uuid'] = uuid.toString(uuid.create())
406 # Find a unique /vm/<uuid>/<integer> path if not specified.
407 # This avoids conflict between pre-/post-migrate domains when doing
408 # localhost relocation.
409 self.vmpath = vmpath
410 i = 0
411 while self.vmpath == None:
412 self.vmpath = XS_VMROOT + self.info['uuid']
413 if i != 0:
414 self.vmpath = self.vmpath + '-' + str(i)
415 try:
416 if self._readVm("uuid"):
417 self.vmpath = None
418 i = i + 1
419 except:
420 pass
422 self.dompath = dompath
424 self.image = None
425 self.store_port = None
426 self.store_mfn = None
427 self.console_port = None
428 self.console_mfn = None
430 self.native_protocol = None
432 self.vmWatch = None
433 self.shutdownWatch = None
434 self.shutdownStartTime = None
435 self._resume = resume
436 self.restart_in_progress = False
438 self.state_updated = threading.Condition()
439 self.refresh_shutdown_lock = threading.Condition()
440 self._stateSet(DOM_STATE_HALTED)
442 self._deviceControllers = {}
444 for state in DOM_STATES_OLD:
445 self.info[state] = 0
447 if augment:
448 self._augmentInfo(priv)
450 self._checkName(self.info['name_label'])
452 self.metrics = XendVMMetrics(uuid.createString(), self)
455 #
456 # Public functions available through XMLRPC
457 #
460 def start(self, is_managed = False):
461 """Attempts to start the VM by do the appropriate
462 initialisation if it not started.
463 """
464 from xen.xend import XendDomain
466 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
467 try:
468 XendTask.log_progress(0, 30, self._constructDomain)
469 XendTask.log_progress(31, 60, self._initDomain)
471 XendTask.log_progress(61, 70, self._storeVmDetails)
472 XendTask.log_progress(71, 80, self._storeDomDetails)
473 XendTask.log_progress(81, 90, self._registerWatches)
474 XendTask.log_progress(91, 100, self.refreshShutdown)
476 xendomains = XendDomain.instance()
478 # save running configuration if XendDomains believe domain is
479 # persistent
480 if is_managed:
481 xendomains.managed_config_save(self)
482 except:
483 log.exception('VM start failed')
484 self.destroy()
485 raise
486 else:
487 raise XendError('VM already running')
489 def resume(self):
490 """Resumes a domain that has come back from suspension."""
491 state = self._stateGet()
492 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
493 try:
494 self._constructDomain()
496 try:
497 self._setCPUAffinity()
498 except:
499 # usually a CPU we want to set affinity to does not exist
500 # we just ignore it so that the domain can still be restored
501 log.warn("Cannot restore CPU affinity")
503 self._setSchedParams()
504 self._storeVmDetails()
505 self._createChannels()
506 self._createDevices()
507 self._storeDomDetails()
508 self._endRestore()
509 except:
510 log.exception('VM resume failed')
511 self.destroy()
512 raise
513 else:
514 raise XendError('VM is not suspended; it is %s'
515 % XEN_API_VM_POWER_STATE[state])
517 def shutdown(self, reason):
518 """Shutdown a domain by signalling this via xenstored."""
519 log.debug('XendDomainInfo.shutdown(%s)', reason)
520 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
521 raise XendError('Domain cannot be shutdown')
523 if self.domid == 0:
524 raise XendError('Domain 0 cannot be shutdown')
526 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
527 raise XendError('Invalid reason: %s' % reason)
528 self.storeDom("control/shutdown", reason)
530 # HVM domain shuts itself down only if it has PV drivers
531 if self.info.is_hvm():
532 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
533 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
534 if not hvm_pvdrv or hvm_s_state != 0:
535 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
536 log.info("HVM save:remote shutdown dom %d!", self.domid)
537 xc.domain_shutdown(self.domid, code)
539 def pause(self):
540 """Pause domain
542 @raise XendError: Failed pausing a domain
543 """
544 try:
545 if(self.domid):
546 # get all blktap2 devices
547 dev = xstransact.List(self.vmpath + '/device/tap2')
548 for x in dev:
549 path = self.getDeviceController('tap2').readBackend(x, 'params')
550 if path and path.startswith(TAPDISK_DEVICE):
551 try:
552 _minor, _dev, ctrl = parseDeviceString(path)
553 #pause the disk
554 f = open(ctrl + '/pause', 'w')
555 f.write('pause');
556 f.close()
557 except:
558 pass
559 except Exception, ex:
560 log.warn('Could not pause blktap disk.');
562 try:
563 xc.domain_pause(self.domid)
564 self._stateSet(DOM_STATE_PAUSED)
565 except Exception, ex:
566 log.exception(ex)
567 raise XendError("Domain unable to be paused: %s" % str(ex))
569 def unpause(self):
570 """Unpause domain
572 @raise XendError: Failed unpausing a domain
573 """
574 try:
575 if(self.domid):
576 dev = xstransact.List(self.vmpath + '/device/tap2')
577 for x in dev:
578 path = self.getDeviceController('tap2').readBackend(x, 'params')
579 if path and path.startswith(TAPDISK_DEVICE):
580 try:
581 #Figure out the sysfs path.
582 _minor, _dev, ctrl = parseDeviceString(path)
583 #unpause the disk
584 if(os.path.exists(ctrl + '/resume')):
585 f = open(ctrl + '/resume', 'w');
586 f.write('resume');
587 f.close();
588 except:
589 pass
591 except Exception, ex:
592 log.warn('Could not unpause blktap disk: %s' % str(ex));
594 try:
595 xc.domain_unpause(self.domid)
596 self._stateSet(DOM_STATE_RUNNING)
597 except Exception, ex:
598 log.exception(ex)
599 raise XendError("Domain unable to be unpaused: %s" % str(ex))
601 def send_sysrq(self, key):
602 """ Send a Sysrq equivalent key via xenstored."""
603 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
604 raise XendError("Domain '%s' is not started" % self.info['name_label'])
606 asserts.isCharConvertible(key)
607 self.storeDom("control/sysrq", '%c' % key)
609 def pci_device_configure_boot(self):
611 if not self.info.is_hvm():
612 return
614 devid = '0'
615 first = True
616 dev_info = self._getDeviceInfo_pci(devid)
617 if dev_info is None:
618 return
620 # get the virtual slot info from xenstore
621 dev_uuid = sxp.child_value(dev_info, 'uuid')
622 pci_conf = self.info['devices'][dev_uuid][1]
623 pci_devs = pci_conf['devs']
625 # Keep a set of keys that are done rather than
626 # just itterating through set(map(..., pci_devs))
627 # to preserve any order information present.
628 done = set()
629 for key in map(lambda x: x['key'], pci_devs):
630 if key in done:
631 continue
632 done |= set([key])
633 dev = filter(lambda x: x['key'] == key, pci_devs)
635 head_dev = dev.pop()
636 dev_sxp = pci_convert_dict_to_sxp(head_dev, 'Initialising',
637 'Booting')
638 self.pci_device_configure(dev_sxp, first_dev = first)
639 first = False
641 # That is all for single-function virtual devices
642 if len(dev) == 0:
643 continue
645 if int(head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
646 new_dev_info = self._getDeviceInfo_pci(devid)
647 if new_dev_info is None:
648 continue
649 new_dev_uuid = sxp.child_value(new_dev_info, 'uuid')
650 new_pci_conf = self.info['devices'][new_dev_uuid][1]
651 new_pci_devs = new_pci_conf['devs']
653 new_head_dev = filter(lambda x: pci_dict_cmp(x, head_dev),
654 new_pci_devs)[0]
656 if int(new_head_dev['vdevfn'], 16) & AUTO_PHP_SLOT:
657 continue
659 vdevfn = PCI_SLOT(int(new_head_dev['vdevfn'], 16))
660 new_dev = []
661 for i in dev:
662 i['vdevfn'] = '0x%02x' % \
663 PCI_DEVFN(vdevfn,
664 PCI_FUNC(int(i['vdevfn'], 16)))
665 new_dev.append(i)
667 dev = new_dev
669 for i in dev:
670 dev_sxp = pci_convert_dict_to_sxp(i, 'Initialising', 'Booting')
671 self.pci_device_configure(dev_sxp)
673 def hvm_pci_device_create(self, dev_config):
674 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
675 % scrub_password(dev_config))
677 if not self.info.is_hvm():
678 raise VmError("hvm_pci_device_create called on non-HVM guest")
680 #all the PCI devs share one conf node
681 devid = '0'
683 new_dev = dev_config['devs'][0]
684 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
686 #check conflict before trigger hotplug event
687 if dev_info is not None:
688 dev_uuid = sxp.child_value(dev_info, 'uuid')
689 pci_conf = self.info['devices'][dev_uuid][1]
690 pci_devs = pci_conf['devs']
691 for x in pci_devs:
692 if (int(x['vdevfn'], 16) == int(new_dev['vdevfn'], 16) and
693 not int(x['vdevfn'], 16) & AUTO_PHP_SLOT):
694 raise VmError("vdevfn %s already have a device." %
695 (new_dev['vdevfn']))
697 if (pci_dict_cmp(x, new_dev)):
698 raise VmError("device is already inserted")
700 # Test whether the devices can be assigned.
701 self.pci_dev_check_attachability_and_do_FLR(new_dev)
703 return self.hvm_pci_device_insert_dev(new_dev)
705 def iommu_check_pod_mode(self):
706 """ Disallow PCI device assignment if pod is enabled. """
707 if self.pod_enabled:
708 raise VmError("failed to assign device since pod is enabled")
710 def pci_dev_check_assignability_and_do_FLR(self, config):
711 """ In the case of static device assignment(i.e., the 'pci' string in
712 guest config file), we check if the device(s) specified in the 'pci'
713 can be assigned to guest or not; if yes, we do_FLR the device(s).
714 """
716 self.iommu_check_pod_mode()
717 pci_dev_ctrl = self.getDeviceController('pci')
718 return pci_dev_ctrl.dev_check_assignability_and_do_FLR(config)
720 def pci_dev_check_attachability_and_do_FLR(self, new_dev):
721 """ In the case of dynamic device assignment(i.e., xm pci-attach), we
722 check if the device can be attached to guest or not; if yes, we do_FLR
723 the device.
724 """
726 self.iommu_check_pod_mode()
728 # Test whether the devices can be assigned
730 pci_name = pci_dict_to_bdf_str(new_dev)
731 _all_assigned_pci_devices = get_all_assigned_pci_devices(self.domid)
732 if pci_name in _all_assigned_pci_devices:
733 raise VmError("failed to assign device %s that has"
734 " already been assigned to other domain." % pci_name)
736 # Test whether the device is owned by pciback or pci-stub.
737 try:
738 pci_device = PciDevice(new_dev)
739 except Exception, e:
740 raise VmError("pci: failed to locate device and "+
741 "parse its resources - "+str(e))
742 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
743 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
744 %pci_device.name)
746 strict_check = xoptions.get_pci_dev_assign_strict_check()
747 # Check non-page-aligned MMIO BAR.
748 if pci_device.has_non_page_aligned_bar and strict_check:
749 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
750 pci_device.name)
752 # PV guest has less checkings.
753 if not self.info.is_hvm():
754 # try to do FLR for PV guest
755 pci_device.do_FLR(self.info.is_hvm(), strict_check)
756 return
758 if not strict_check:
759 return
761 # Check if there is intermediate PCIe switch bewteen the device and
762 # Root Complex.
763 if pci_device.is_behind_switch_lacking_acs():
764 err_msg = 'pci: to avoid potential security issue, %s is not'+\
765 ' allowed to be assigned to guest since it is behind'+\
766 ' PCIe switch that does not support or enable ACS.'
767 raise VmError(err_msg % pci_device.name)
769 # Check the co-assignment.
770 # To pci-attach a device D to domN, we should ensure each of D's
771 # co-assignment devices hasn't been assigned, or has been assigned to
772 # domN.
773 coassignment_list = pci_device.find_coassigned_devices()
774 pci_device.devs_check_driver(coassignment_list)
775 assigned_pci_device_str_list = self._get_assigned_pci_devices()
776 for pci_str in coassignment_list:
777 if not (pci_str in _all_assigned_pci_devices):
778 continue
779 if not pci_str in assigned_pci_device_str_list:
780 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
781 " because one of its co-assignment device %s has been" + \
782 " assigned to other domain." \
783 )% (pci_device.name, self.info['name_label'], pci_str))
785 # try to do FLR for HVM guest
786 pci_device.do_FLR(self.info.is_hvm(), strict_check)
788 def hvm_pci_device_insert(self, dev_config):
789 log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
790 % scrub_password(dev_config))
792 if not self.info.is_hvm():
793 raise VmError("hvm_pci_device_create called on non-HVM guest")
795 new_dev = dev_config['devs'][0]
797 return self.hvm_pci_device_insert_dev(new_dev)
799 def hvm_pci_device_insert_dev(self, new_dev):
800 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
801 % scrub_password(new_dev))
803 if self.domid is not None:
804 opts = ''
805 optslist = []
806 pci_defopts = []
807 if 'pci_msitranslate' in self.info['platform']:
808 pci_defopts.append(['msitranslate',
809 str(self.info['platform']['pci_msitranslate'])])
810 if 'pci_power_mgmt' in self.info['platform']:
811 pci_defopts.append(['power_mgmt',
812 str(self.info['platform']['pci_power_mgmt'])])
813 if new_dev.has_key('opts'):
814 optslist += new_dev['opts']
816 if optslist or pci_defopts:
817 opts = ',' + serialise_pci_opts(
818 append_default_pci_opts(optslist, pci_defopts))
820 bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
821 int(new_dev['vdevfn'], 16), opts)
822 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
823 bdf = xc.assign_device(self.domid, pci_dict_to_xc_str(new_dev))
824 if bdf > 0:
825 raise VmError("Failed to assign device to IOMMU (%s)" % bdf_str)
826 log.debug("pci: assign device %s" % bdf_str)
827 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
829 vdevfn = xstransact.Read("/local/domain/0/device-model/%i/parameter"
830 % self.getDomid())
831 try:
832 vdevfn_int = int(vdevfn, 16)
833 except ValueError:
834 raise VmError(("Cannot pass-through PCI function '%s'. " +
835 "Device model reported an error: %s") %
836 (bdf_str, vdevfn))
837 else:
838 vdevfn = new_dev['vdevfn']
840 return vdevfn
843 def device_create(self, dev_config):
844 """Create a new device.
846 @param dev_config: device configuration
847 @type dev_config: SXP object (parsed config)
848 """
849 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
850 dev_type = sxp.name(dev_config)
851 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
852 dev_config_dict = self.info['devices'][dev_uuid][1]
853 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
855 if dev_type == 'vif':
856 for x in dev_config:
857 if x != 'vif' and x[0] == 'mac':
858 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
859 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
860 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
862 if self.domid is not None:
863 try:
864 dev_config_dict['devid'] = devid = \
865 self._createDevice(dev_type, dev_config_dict)
866 if dev_type == 'tap2':
867 # createDevice may create a blktap1 device if blktap2 is not
868 # installed or if the blktap driver is not supported in
869 # blktap1
870 dev_type = self.getBlockDeviceClass(devid)
871 self._waitForDevice(dev_type, devid)
872 except VmError, ex:
873 del self.info['devices'][dev_uuid]
874 if dev_type == 'pci':
875 for dev in dev_config_dict['devs']:
876 XendAPIStore.deregister(dev['uuid'], 'DPCI')
877 elif dev_type == 'vscsi':
878 for dev in dev_config_dict['devs']:
879 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
880 elif dev_type == 'tap' or dev_type == 'tap2':
881 self.info['vbd_refs'].remove(dev_uuid)
882 else:
883 self.info['%s_refs' % dev_type].remove(dev_uuid)
884 raise ex
885 else:
886 devid = None
888 xen.xend.XendDomain.instance().managed_config_save(self)
889 return self.getDeviceController(dev_type).sxpr(devid)
892 def pci_device_configure(self, dev_sxp, devid = 0, first_dev = False):
893 """Configure an existing pci device.
895 @param dev_sxp: device configuration
896 @type dev_sxp: SXP object (parsed config)
897 @param devid: device id
898 @type devid: int
899 @return: Returns True if successfully updated device
900 @rtype: boolean
901 """
902 log.debug("XendDomainInfo.pci_device_configure: %s"
903 % scrub_password(dev_sxp))
905 dev_class = sxp.name(dev_sxp)
907 if dev_class != 'pci':
908 return False
910 pci_state = sxp.child_value(dev_sxp, 'state')
911 pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
912 existing_dev_info = self._getDeviceInfo_pci(devid)
914 if existing_dev_info is None and pci_state != 'Initialising':
915 raise XendError("Cannot detach when pci platform does not exist")
917 pci_dev = sxp.children(dev_sxp, 'dev')[0]
918 dev_config = pci_convert_sxp_to_dict(dev_sxp)
919 dev = dev_config['devs'][0]
921 stubdomid = self.getStubdomDomid()
922 # Do HVM specific processing
923 if self.info.is_hvm():
924 from xen.xend import XendDomain
925 if pci_state == 'Initialising':
926 if stubdomid is not None :
927 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
929 # HVM PCI device attachment
930 if pci_sub_state == 'Booting':
931 vdevfn = self.hvm_pci_device_insert(dev_config)
932 else:
933 vdevfn = self.hvm_pci_device_create(dev_config)
934 # Update vdevfn
935 dev['vdevfn'] = vdevfn
936 for n in sxp.children(pci_dev):
937 if(n[0] == 'vdevfn'):
938 n[1] = vdevfn
939 else:
940 # HVM PCI device detachment
941 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
942 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
943 existing_pci_devs = existing_pci_conf['devs']
944 new_devs = filter(lambda x: pci_dict_cmp(x, dev),
945 existing_pci_devs)
946 if len(new_devs) < 0:
947 raise VmError("Device %s is not connected" %
948 pci_dict_to_bdf_str(dev))
949 new_dev = new_devs[0]
950 # Only tell qemu-dm to unplug function 0.
951 # When unplugging a function, all functions in the
952 # same vslot must be unplugged, and function 0 must
953 # be one of the functions present when a vslot is
954 # hot-plugged. Telling qemu-dm to unplug function 0
955 # also tells it to unplug all other functions in the
956 # same vslot.
957 if (PCI_FUNC(int(new_dev['vdevfn'], 16)) == 0):
958 self.hvm_destroyPCIDevice(new_dev)
959 if stubdomid is not None :
960 XendDomain.instance().domain_lookup(stubdomid).pci_device_configure(dev_sxp[:])
961 # Update vdevfn
962 dev['vdevfn'] = new_dev['vdevfn']
963 for n in sxp.children(pci_dev):
964 if(n[0] == 'vdevfn'):
965 n[1] = new_dev['vdevfn']
966 else:
967 # Do PV specific checking
968 if pci_state == 'Initialising':
969 # PV PCI device attachment
970 self.pci_dev_check_attachability_and_do_FLR(dev)
972 # If pci platform does not exist, create and exit.
973 if existing_dev_info is None :
974 self.device_create(dev_sxp)
975 return True
977 if first_dev is True :
978 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
979 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
980 devid = self._createDevice('pci', existing_pci_conf)
981 self.info['devices'][existing_dev_uuid][1]['devid'] = devid
983 if self.domid is not None:
984 # use DevController.reconfigureDevice to change device config
985 dev_control = self.getDeviceController(dev_class)
986 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
987 if not self.info.is_hvm() and not self.info.is_stubdom():
988 # in PV case, wait until backend state becomes connected.
989 dev_control.waitForDevice_reconfigure(devid)
990 num_devs = dev_control.cleanupDevice(devid)
992 # update XendConfig with new device info
993 if dev_uuid:
994 new_dev_sxp = dev_control.configuration(devid)
995 self.info.device_update(dev_uuid, new_dev_sxp)
997 # If there is no device left, destroy pci and remove config.
998 if num_devs == 0:
999 if self.info.is_hvm():
1000 self.destroyDevice('pci', devid, True)
1001 else:
1002 self.destroyDevice('pci', devid)
1003 del self.info['devices'][dev_uuid]
1004 else:
1005 new_dev_sxp = ['pci']
1006 for cur_dev in sxp.children(existing_dev_info, 'dev'):
1007 if pci_state == 'Closing':
1008 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
1009 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
1010 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
1011 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
1012 continue
1013 new_dev_sxp.append(cur_dev)
1015 if pci_state == 'Initialising' and pci_sub_state != 'Booting':
1016 for new_dev in sxp.children(dev_sxp, 'dev'):
1017 new_dev_sxp.append(new_dev)
1019 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
1020 self.info.device_update(dev_uuid, new_dev_sxp)
1022 # If there is no device left, remove config.
1023 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1024 del self.info['devices'][dev_uuid]
1026 xen.xend.XendDomain.instance().managed_config_save(self)
1028 return True
1030 def vscsi_device_configure(self, dev_sxp):
1031 """Configure an existing vscsi device.
1032 quoted pci funciton
1033 """
1034 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
1035 if not dev_info:
1036 return False
1037 for dev in sxp.children(dev_info, 'dev'):
1038 if p_devs is not None:
1039 if sxp.child_value(dev, 'p-dev') in p_devs:
1040 return True
1041 if v_devs is not None:
1042 if sxp.child_value(dev, 'v-dev') in v_devs:
1043 return True
1044 return False
1046 def _vscsi_be(be):
1047 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
1048 if be_xdi is not None:
1049 be_domid = be_xdi.getDomid()
1050 if be_domid is not None:
1051 return str(be_domid)
1052 return str(be)
1054 dev_class = sxp.name(dev_sxp)
1055 if dev_class != 'vscsi':
1056 return False
1058 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
1059 devs = dev_config['devs']
1060 v_devs = [d['v-dev'] for d in devs]
1061 state = devs[0]['state']
1062 req_devid = int(devs[0]['devid'])
1063 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
1065 if state == xenbusState['Initialising']:
1066 # new create
1067 # If request devid does not exist, create and exit.
1068 p_devs = [d['p-dev'] for d in devs]
1069 for dev_type, dev_info in self.info.all_devices_sxpr():
1070 if dev_type != 'vscsi':
1071 continue
1072 if _is_vscsi_defined(dev_info, p_devs = p_devs):
1073 raise XendError('The physical device "%s" is already defined' % \
1074 p_devs[0])
1075 if cur_dev_sxp is None:
1076 self.device_create(dev_sxp)
1077 return True
1079 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1080 raise XendError('The virtual device "%s" is already defined' % \
1081 v_devs[0])
1083 if int(dev_config['feature-host']) != \
1084 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
1085 raise XendError('The physical device "%s" cannot define '
1086 'because mode is different' % devs[0]['p-dev'])
1088 new_be = dev_config.get('backend', None)
1089 if new_be is not None:
1090 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
1091 if cur_be is None:
1092 cur_be = xen.xend.XendDomain.DOM0_ID
1093 new_be_dom = _vscsi_be(new_be)
1094 cur_be_dom = _vscsi_be(cur_be)
1095 if new_be_dom != cur_be_dom:
1096 raise XendError('The physical device "%s" cannot define '
1097 'because backend is different' % devs[0]['p-dev'])
1099 elif state == xenbusState['Closing']:
1100 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1101 raise XendError("Cannot detach vscsi device does not exist")
1103 if self.domid is not None:
1104 # use DevController.reconfigureDevice to change device config
1105 dev_control = self.getDeviceController(dev_class)
1106 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
1107 dev_control.waitForDevice_reconfigure(req_devid)
1108 num_devs = dev_control.cleanupDevice(req_devid)
1110 # update XendConfig with new device info
1111 if dev_uuid:
1112 new_dev_sxp = dev_control.configuration(req_devid)
1113 self.info.device_update(dev_uuid, new_dev_sxp)
1115 # If there is no device left, destroy vscsi and remove config.
1116 if num_devs == 0:
1117 self.destroyDevice('vscsi', req_devid)
1118 del self.info['devices'][dev_uuid]
1120 else:
1121 new_dev_sxp = ['vscsi']
1122 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1123 new_dev_sxp.append(cur_mode)
1124 try:
1125 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1126 new_dev_sxp.append(cur_be)
1127 except IndexError:
1128 pass
1130 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1131 if state == xenbusState['Closing']:
1132 if int(cur_mode[1]) == 1:
1133 continue
1134 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1135 continue
1136 new_dev_sxp.append(cur_dev)
1138 if state == xenbusState['Initialising']:
1139 for new_dev in sxp.children(dev_sxp, 'dev'):
1140 new_dev_sxp.append(new_dev)
1142 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1143 self.info.device_update(dev_uuid, new_dev_sxp)
1145 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1146 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1147 del self.info['devices'][dev_uuid]
1149 xen.xend.XendDomain.instance().managed_config_save(self)
1151 return True
1153 def vusb_device_configure(self, dev_sxp, devid):
1154 """Configure a virtual root port.
1155 """
1156 dev_class = sxp.name(dev_sxp)
1157 if dev_class != 'vusb':
1158 return False
1160 dev_config = {}
1161 ports = sxp.child(dev_sxp, 'port')
1162 for port in ports[1:]:
1163 try:
1164 num, bus = port
1165 dev_config['port-%i' % int(num)] = str(bus)
1166 except TypeError:
1167 pass
1169 dev_control = self.getDeviceController(dev_class)
1170 dev_control.reconfigureDevice(devid, dev_config)
1172 return True
1174 def device_configure(self, dev_sxp, devid = None):
1175 """Configure an existing device.
1177 @param dev_config: device configuration
1178 @type dev_config: SXP object (parsed config)
1179 @param devid: device id
1180 @type devid: int
1181 @return: Returns True if successfully updated device
1182 @rtype: boolean
1183 """
1185 # convert device sxp to a dict
1186 dev_class = sxp.name(dev_sxp)
1187 dev_config = {}
1189 if dev_class == 'pci':
1190 return self.pci_device_configure(dev_sxp)
1192 if dev_class == 'vscsi':
1193 return self.vscsi_device_configure(dev_sxp)
1195 if dev_class == 'vusb':
1196 return self.vusb_device_configure(dev_sxp, devid)
1198 for opt_val in dev_sxp[1:]:
1199 try:
1200 dev_config[opt_val[0]] = opt_val[1]
1201 except IndexError:
1202 pass
1204 dev_control = self.getDeviceController(dev_class)
1205 if devid is None:
1206 dev = dev_config.get('dev', '')
1207 if not dev:
1208 raise VmError('Block device must have virtual details specified')
1209 if 'ioemu:' in dev:
1210 (_, dev) = dev.split(':', 1)
1211 try:
1212 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1213 except ValueError:
1214 pass
1215 devid = dev_control.convertToDeviceNumber(dev)
1216 dev_info = self._getDeviceInfo_vbd(devid)
1217 if dev_info is None:
1218 raise VmError("Device %s not connected" % devid)
1219 dev_uuid = sxp.child_value(dev_info, 'uuid')
1221 if self.domid is not None:
1222 # use DevController.reconfigureDevice to change device config
1223 dev_control.reconfigureDevice(devid, dev_config)
1224 else:
1225 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1226 if (new_f['device-type'] == 'cdrom' and
1227 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1228 new_b['mode'] == 'r' and
1229 sxp.child_value(dev_info, 'mode') == 'r'):
1230 pass
1231 else:
1232 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1233 (dev_class, devid, dev_config))
1235 # update XendConfig with new device info
1236 self.info.device_update(dev_uuid, dev_sxp)
1237 xen.xend.XendDomain.instance().managed_config_save(self)
1239 return True
1241 def waitForDevices(self):
1242 """Wait for this domain's configured devices to connect.
1244 @raise VmError: if any device fails to initialise.
1245 """
1246 for devclass in XendDevices.valid_devices():
1247 self.getDeviceController(devclass).waitForDevices()
1249 def hvm_destroyPCIDevice(self, pci_dev):
1250 log.debug("hvm_destroyPCIDevice: %s", pci_dev)
1252 if not self.info.is_hvm():
1253 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1255 # Check the co-assignment.
1256 # To pci-detach a device D from domN, we should ensure: for each DD in the
1257 # list of D's co-assignment devices, DD is not assigned (to domN).
1259 from xen.xend.server.pciif import PciDevice
1260 try:
1261 pci_device = PciDevice(pci_dev)
1262 except Exception, e:
1263 raise VmError("pci: failed to locate device and "+
1264 "parse its resources - "+str(e))
1265 coassignment_list = pci_device.find_coassigned_devices()
1266 coassignment_list.remove(pci_device.name)
1267 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1268 for pci_str in coassignment_list:
1269 if xoptions.get_pci_dev_assign_strict_check() and \
1270 pci_str in assigned_pci_device_str_list:
1271 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1272 " because one of its co-assignment device %s is still " + \
1273 " assigned to the domain." \
1274 )% (pci_device.name, self.info['name_label'], pci_str))
1277 bdf_str = pci_dict_to_bdf_str(pci_dev)
1278 log.info("hvm_destroyPCIDevice:%s:%s!", pci_dev, bdf_str)
1279 if self.domid is not None:
1280 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1282 return 0
1284 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1285 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1286 deviceClass, devid)
1288 if rm_cfg:
1289 # Convert devid to device number. A device number is
1290 # needed to remove its configuration.
1291 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1293 # Save current sxprs. A device number and a backend
1294 # path are needed to remove its configuration but sxprs
1295 # do not have those after calling destroyDevice.
1296 sxprs = self.getDeviceSxprs(deviceClass)
1298 rc = None
1299 if self.domid is not None:
1301 #new blktap implementation may need a sysfs write after everything is torn down.
1302 if deviceClass == 'tap2':
1303 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1304 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1305 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1306 backpath = xstransact.Read(frontpath, "backend")
1307 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1309 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1310 if not force and rm_cfg:
1311 # The backend path, other than the device itself,
1312 # has to be passed because its accompanied frontend
1313 # path may be void until its removal is actually
1314 # issued. It is probable because destroyDevice is
1315 # issued first.
1316 for dev_num, dev_info in sxprs:
1317 dev_num = int(dev_num)
1318 if dev_num == dev:
1319 for x in dev_info:
1320 if x[0] == 'backend':
1321 backend = x[1]
1322 break
1323 break
1324 self._waitForDevice_destroy(deviceClass, devid, backend)
1326 if rm_cfg and deviceClass != "vif2":
1327 if deviceClass == 'vif':
1328 if self.domid is not None:
1329 mac = ''
1330 for dev_num, dev_info in sxprs:
1331 dev_num = int(dev_num)
1332 if dev_num == dev:
1333 for x in dev_info:
1334 if x[0] == 'mac':
1335 mac = x[1]
1336 break
1337 break
1338 dev_info = self._getDeviceInfo_vif(mac)
1339 else:
1340 _, dev_info = sxprs[dev]
1341 else: # 'vbd' or 'tap' or 'tap2'
1342 dev_info = self._getDeviceInfo_vbd(dev)
1343 # To remove the UUID of the device from refs,
1344 # deviceClass must be always 'vbd'.
1345 deviceClass = 'vbd'
1346 if dev_info is None:
1347 raise XendError("Device %s is not defined" % devid)
1349 dev_uuid = sxp.child_value(dev_info, 'uuid')
1350 del self.info['devices'][dev_uuid]
1351 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1352 xen.xend.XendDomain.instance().managed_config_save(self)
1354 return rc
1356 def getDeviceSxprs(self, deviceClass):
1357 if deviceClass == 'pci':
1358 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1359 if dev_info is None:
1360 return []
1361 dev_uuid = sxp.child_value(dev_info, 'uuid')
1362 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1363 return pci_devs
1364 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1365 return self.getDeviceController(deviceClass).sxprs()
1366 else:
1367 sxprs = []
1368 dev_num = 0
1369 for dev_type, dev_info in self.info.all_devices_sxpr():
1370 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap', 'tap2']) or \
1371 (deviceClass != 'vbd' and dev_type != deviceClass):
1372 continue
1374 if deviceClass == 'vscsi':
1375 vscsi_devs = ['devs', []]
1376 for vscsi_dev in sxp.children(dev_info, 'dev'):
1377 vscsi_dev.append(['frontstate', None])
1378 vscsi_devs[1].append(vscsi_dev)
1379 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1380 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1381 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1382 elif deviceClass == 'vbd':
1383 dev = sxp.child_value(dev_info, 'dev')
1384 if 'ioemu:' in dev:
1385 (_, dev) = dev.split(':', 1)
1386 try:
1387 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1388 except ValueError:
1389 dev_name = dev
1390 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1391 sxprs.append([dev_num, dev_info])
1392 else:
1393 sxprs.append([dev_num, dev_info])
1394 dev_num += 1
1395 return sxprs
1397 def getBlockDeviceClass(self, devid):
1398 # if the domain is running we can get the device class from xenstore.
1399 # This is more accurate, as blktap1 devices show up as blktap2 devices
1400 # in the config.
1401 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1402 # All block devices have a vbd frontend, so we know the frontend path
1403 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1404 frontendPath = "%s/device/vbd/%s" % (self.dompath, dev)
1405 for devclass in XendDevices.valid_devices():
1406 for dev in xstransact.List("%s/device/%s" % (self.vmpath, devclass)):
1407 devFrontendPath = xstransact.Read("%s/device/%s/%s/frontend" % (self.vmpath, devclass, dev))
1408 if frontendPath == devFrontendPath:
1409 return devclass
1411 else: # the domain is not active so we must get the device class
1412 # from the config
1413 # To get a device number from the devid,
1414 # we temporarily use the device controller of VBD.
1415 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1416 dev_info = self._getDeviceInfo_vbd(dev)
1417 if dev_info:
1418 return dev_info[0]
1420 def _getDeviceInfo_vif(self, mac):
1421 for dev_type, dev_info in self.info.all_devices_sxpr():
1422 if dev_type != 'vif':
1423 continue
1424 if mac == sxp.child_value(dev_info, 'mac'):
1425 return dev_info
1427 def _getDeviceInfo_vbd(self, devid):
1428 for dev_type, dev_info in self.info.all_devices_sxpr():
1429 if dev_type != 'vbd' and dev_type != 'tap' and dev_type != 'tap2':
1430 continue
1431 dev = sxp.child_value(dev_info, 'dev')
1432 dev = dev.split(':')[0]
1433 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1434 if devid == dev:
1435 return dev_info
1437 def _getDeviceInfo_pci(self, devid):
1438 for dev_type, dev_info in self.info.all_devices_sxpr():
1439 if dev_type != 'pci':
1440 continue
1441 return dev_info
1442 return None
1444 def _getDeviceInfo_vscsi(self, devid):
1445 devid = int(devid)
1446 for dev_type, dev_info in self.info.all_devices_sxpr():
1447 if dev_type != 'vscsi':
1448 continue
1449 devs = sxp.children(dev_info, 'dev')
1450 if devid == int(sxp.child_value(devs[0], 'devid')):
1451 return dev_info
1452 return None
1454 def _getDeviceInfo_vusb(self, devid):
1455 for dev_type, dev_info in self.info.all_devices_sxpr():
1456 if dev_type != 'vusb':
1457 continue
1458 return dev_info
1459 return None
1461 def _get_assigned_pci_devices(self, devid = 0):
1462 if self.domid is not None:
1463 return get_assigned_pci_devices(self.domid)
1465 dev_info = self._getDeviceInfo_pci(devid)
1466 if dev_info is None:
1467 return []
1468 dev_uuid = sxp.child_value(dev_info, 'uuid')
1469 pci_conf = self.info['devices'][dev_uuid][1]
1470 return map(pci_dict_to_bdf_str, pci_conf['devs'])
1472 def setMemoryTarget(self, target):
1473 """Set the memory target of this domain.
1474 @param target: In MiB.
1475 """
1476 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1477 self.info['name_label'], str(self.domid), target)
1479 MiB = 1024 * 1024
1480 memory_cur = self.get_memory_dynamic_max() / MiB
1482 if self.domid == 0:
1483 dom0_min_mem = xoptions.get_dom0_min_mem()
1484 if target < memory_cur and dom0_min_mem > target:
1485 raise XendError("memory_dynamic_max too small")
1487 self._safe_set_memory('memory_dynamic_min', target * MiB)
1488 self._safe_set_memory('memory_dynamic_max', target * MiB)
1490 if self.domid >= 0:
1491 if target > memory_cur:
1492 balloon.free((target - memory_cur) * 1024, self)
1493 self.storeVm("memory", target)
1494 self.storeDom("memory/target", target << 10)
1495 xc.domain_set_target_mem(self.domid,
1496 (target * 1024))
1497 xen.xend.XendDomain.instance().managed_config_save(self)
1499 def setMemoryMaximum(self, limit):
1500 """Set the maximum memory limit of this domain
1501 @param limit: In MiB.
1502 """
1503 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1504 self.info['name_label'], str(self.domid), limit)
1506 maxmem_cur = self.get_memory_static_max()
1507 MiB = 1024 * 1024
1508 self._safe_set_memory('memory_static_max', limit * MiB)
1510 if self.domid >= 0:
1511 maxmem = int(limit) * 1024
1512 try:
1513 return xc.domain_setmaxmem(self.domid, maxmem)
1514 except Exception, ex:
1515 self._safe_set_memory('memory_static_max', maxmem_cur)
1516 raise XendError(str(ex))
1517 xen.xend.XendDomain.instance().managed_config_save(self)
1520 def getVCPUInfo(self):
1521 try:
1522 # We include the domain name and ID, to help xm.
1523 sxpr = ['domain',
1524 ['domid', self.domid],
1525 ['name', self.info['name_label']],
1526 ['vcpu_count', self.info['VCPUs_max']]]
1528 for i in range(0, self.info['VCPUs_max']):
1529 if self.domid is not None:
1530 info = xc.vcpu_getinfo(self.domid, i)
1532 sxpr.append(['vcpu',
1533 ['number', i],
1534 ['online', info['online']],
1535 ['blocked', info['blocked']],
1536 ['running', info['running']],
1537 ['cpu_time', info['cpu_time'] / 1e9],
1538 ['cpu', info['cpu']],
1539 ['cpumap', info['cpumap']]])
1540 else:
1541 sxpr.append(['vcpu',
1542 ['number', i],
1543 ['online', 0],
1544 ['blocked', 0],
1545 ['running', 0],
1546 ['cpu_time', 0.0],
1547 ['cpu', -1],
1548 ['cpumap', self.info['cpus'][i] and \
1549 self.info['cpus'][i] or range(64)]])
1551 return sxpr
1553 except RuntimeError, exn:
1554 raise XendError(str(exn))
1557 def getDomInfo(self):
1558 return dom_get(self.domid)
1561 # internal functions ... TODO: re-categorised
1564 def _augmentInfo(self, priv):
1565 """Augment self.info, as given to us through L{recreate}, with
1566 values taken from the store. This recovers those values known
1567 to xend but not to the hypervisor.
1568 """
1569 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1570 if priv:
1571 augment_entries.remove('memory')
1572 augment_entries.remove('maxmem')
1573 augment_entries.remove('vcpus')
1574 augment_entries.remove('vcpu_avail')
1576 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1577 for k in augment_entries])
1579 # make returned lists into a dictionary
1580 vm_config = dict(zip(augment_entries, vm_config))
1582 for arg in augment_entries:
1583 val = vm_config[arg]
1584 if val != None:
1585 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1586 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1587 self.info[xapiarg] = val
1588 elif arg == "memory":
1589 self.info["static_memory_min"] = val
1590 elif arg == "maxmem":
1591 self.info["static_memory_max"] = val
1592 else:
1593 self.info[arg] = val
1595 # read CPU Affinity
1596 self.info['cpus'] = []
1597 vcpus_info = self.getVCPUInfo()
1598 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1599 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1601 # For dom0, we ignore any stored value for the vcpus fields, and
1602 # read the current value from Xen instead. This allows boot-time
1603 # settings to take precedence over any entries in the store.
1604 if priv:
1605 xeninfo = dom_get(self.domid)
1606 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1607 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1609 # read image value
1610 image_sxp = self._readVm('image')
1611 if image_sxp:
1612 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1614 # read devices
1615 devices = []
1616 for devclass in XendDevices.valid_devices():
1617 devconfig = self.getDeviceController(devclass).configurations()
1618 if devconfig:
1619 devices.extend(devconfig)
1621 if not self.info['devices'] and devices is not None:
1622 for device in devices:
1623 self.info.device_add(device[0], cfg_sxp = device)
1625 self._update_consoles()
1627 def _update_consoles(self, transaction = None):
1628 if self.domid == None or self.domid == 0:
1629 return
1631 # Update VT100 port if it exists
1632 if transaction is None:
1633 self.console_port = self.readDom('console/port')
1634 else:
1635 self.console_port = self.readDomTxn(transaction, 'console/port')
1636 if self.console_port is not None:
1637 serial_consoles = self.info.console_get_all('vt100')
1638 if not serial_consoles:
1639 cfg = self.info.console_add('vt100', self.console_port)
1640 self._createDevice('console', cfg)
1641 else:
1642 console_uuid = serial_consoles[0].get('uuid')
1643 self.info.console_update(console_uuid, 'location',
1644 self.console_port)
1647 # Update VNC port if it exists and write to xenstore
1648 if transaction is None:
1649 vnc_port = self.readDom('console/vnc-port')
1650 else:
1651 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1652 if vnc_port is not None:
1653 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1654 if dev_type == 'vfb':
1655 old_location = dev_info.get('location')
1656 listen_host = dev_info.get('vnclisten', \
1657 XendOptions.instance().get_vnclisten_address())
1658 new_location = '%s:%s' % (listen_host, str(vnc_port))
1659 if old_location == new_location:
1660 break
1662 dev_info['location'] = new_location
1663 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1664 vfb_ctrl = self.getDeviceController('vfb')
1665 vfb_ctrl.reconfigureDevice(0, dev_info)
1666 break
1669 # Function to update xenstore /vm/*
1672 def _readVm(self, *args):
1673 return xstransact.Read(self.vmpath, *args)
1675 def _writeVm(self, *args):
1676 return xstransact.Write(self.vmpath, *args)
1678 def _removeVm(self, *args):
1679 return xstransact.Remove(self.vmpath, *args)
1681 def _gatherVm(self, *args):
1682 return xstransact.Gather(self.vmpath, *args)
1684 def _listRecursiveVm(self, *args):
1685 return xstransact.ListRecursive(self.vmpath, *args)
1687 def storeVm(self, *args):
1688 return xstransact.Store(self.vmpath, *args)
1690 def permissionsVm(self, *args):
1691 return xstransact.SetPermissions(self.vmpath, *args)
1694 # Function to update xenstore /dom/*
1697 def readDom(self, *args):
1698 return xstransact.Read(self.dompath, *args)
1700 def gatherDom(self, *args):
1701 return xstransact.Gather(self.dompath, *args)
1703 def _writeDom(self, *args):
1704 return xstransact.Write(self.dompath, *args)
1706 def _removeDom(self, *args):
1707 return xstransact.Remove(self.dompath, *args)
1709 def storeDom(self, *args):
1710 return xstransact.Store(self.dompath, *args)
1713 def readDomTxn(self, transaction, *args):
1714 paths = map(lambda x: self.dompath + "/" + x, args)
1715 return transaction.read(*paths)
1717 def gatherDomTxn(self, transaction, *args):
1718 paths = map(lambda x: self.dompath + "/" + x, args)
1719 return transaction.gather(*paths)
1721 def _writeDomTxn(self, transaction, *args):
1722 paths = map(lambda x: self.dompath + "/" + x, args)
1723 return transaction.write(*paths)
1725 def _removeDomTxn(self, transaction, *args):
1726 paths = map(lambda x: self.dompath + "/" + x, args)
1727 return transaction.remove(*paths)
1729 def storeDomTxn(self, transaction, *args):
1730 paths = map(lambda x: self.dompath + "/" + x, args)
1731 return transaction.store(*paths)
1734 def _recreateDom(self):
1735 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1737 def _recreateDomFunc(self, t):
1738 t.remove()
1739 t.mkdir()
1740 t.set_permissions({'dom' : self.domid, 'read' : True})
1741 t.write('vm', self.vmpath)
1742 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1743 # XCP Windows paravirtualized guests use data/
1744 for i in [ 'device', 'control', 'error', 'memory', 'guest', \
1745 'hvmpv', 'data' ]:
1746 t.mkdir(i)
1747 t.set_permissions(i, {'dom' : self.domid})
1749 def _storeDomDetails(self):
1750 to_store = {
1751 'domid': str(self.domid),
1752 'vm': self.vmpath,
1753 'name': self.info['name_label'],
1754 'console/limit': str(xoptions.get_console_limit() * 1024),
1755 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1756 'description': str(self.info['description']),
1759 def f(n, v):
1760 if v is not None:
1761 if type(v) == bool:
1762 to_store[n] = v and "1" or "0"
1763 else:
1764 to_store[n] = str(v)
1766 # Figure out if we need to tell xenconsoled to ignore this guest's
1767 # console - device model will handle console if it is running
1768 constype = "ioemu"
1769 if 'device_model' not in self.info['platform']:
1770 constype = "xenconsoled"
1772 f('console/port', self.console_port)
1773 f('console/ring-ref', self.console_mfn)
1774 f('console/type', constype)
1775 f('store/port', self.store_port)
1776 f('store/ring-ref', self.store_mfn)
1778 if arch.type == "x86":
1779 f('control/platform-feature-multiprocessor-suspend', True)
1781 # elfnotes
1782 for n, v in self.info.get_notes().iteritems():
1783 n = n.lower().replace('_', '-')
1784 if n == 'features':
1785 for v in v.split('|'):
1786 v = v.replace('_', '-')
1787 if v.startswith('!'):
1788 f('image/%s/%s' % (n, v[1:]), False)
1789 else:
1790 f('image/%s/%s' % (n, v), True)
1791 else:
1792 f('image/%s' % n, v)
1794 if self.info.has_key('security_label'):
1795 f('security_label', self.info['security_label'])
1797 to_store.update(self._vcpuDomDetails())
1799 log.debug("Storing domain details: %s", scrub_password(to_store))
1801 self._writeDom(to_store)
1803 def _vcpuDomDetails(self):
1804 def availability(n):
1805 if self.info['vcpu_avail'] & (1 << n):
1806 return 'online'
1807 else:
1808 return 'offline'
1810 result = {}
1811 for v in range(0, self.info['VCPUs_max']):
1812 result["cpu/%d/availability" % v] = availability(v)
1813 return result
1816 # xenstore watches
1819 def _registerWatches(self):
1820 """Register a watch on this VM's entries in the store, and the
1821 domain's control/shutdown node, so that when they are changed
1822 externally, we keep up to date. This should only be called by {@link
1823 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1824 details have been written, but before the new instance is returned."""
1825 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1826 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1827 self._handleShutdownWatch)
1829 def _storeChanged(self, _):
1830 log.trace("XendDomainInfo.storeChanged");
1832 changed = False
1834 # Check whether values in the configuration have
1835 # changed in Xenstore.
1837 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1838 'rtc/timeoffset']
1840 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1841 for k in cfg_vm])
1843 # convert two lists into a python dictionary
1844 vm_details = dict(zip(cfg_vm, vm_details))
1846 for arg, val in vm_details.items():
1847 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1848 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1849 if val != None and val != self.info[xapiarg]:
1850 self.info[xapiarg] = val
1851 changed = True
1852 elif arg == "memory":
1853 if val != None and val != self.info["static_memory_min"]:
1854 self.info["static_memory_min"] = val
1855 changed = True
1856 elif arg == "maxmem":
1857 if val != None and val != self.info["static_memory_max"]:
1858 self.info["static_memory_max"] = val
1859 changed = True
1861 # Check whether image definition has been updated
1862 image_sxp = self._readVm('image')
1863 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1864 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1865 changed = True
1867 # Update the rtc_timeoffset to be preserved across reboot.
1868 # NB. No need to update xenstore domain section.
1869 val = int(vm_details.get("rtc/timeoffset", 0))
1870 self.info["platform"]["rtc_timeoffset"] = val
1872 if changed:
1873 # Update the domain section of the store, as this contains some
1874 # parameters derived from the VM configuration.
1875 self.refresh_shutdown_lock.acquire()
1876 try:
1877 state = self._stateGet()
1878 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1879 self._storeDomDetails()
1880 finally:
1881 self.refresh_shutdown_lock.release()
1883 return 1
1885 def _handleShutdownWatch(self, _):
1886 log.debug('XendDomainInfo.handleShutdownWatch')
1888 reason = self.readDom('control/shutdown')
1890 if reason and reason != 'suspend':
1891 sst = self.readDom('xend/shutdown_start_time')
1892 now = time.time()
1893 if sst:
1894 self.shutdownStartTime = float(sst)
1895 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1896 else:
1897 self.shutdownStartTime = now
1898 self.storeDom('xend/shutdown_start_time', now)
1899 timeout = SHUTDOWN_TIMEOUT
1901 log.trace(
1902 "Scheduling refreshShutdown on domain %d in %ds.",
1903 self.domid, timeout)
1904 threading.Timer(timeout, self.refreshShutdown).start()
1906 return True
1910 # Public Attributes for the VM
1914 def getDomid(self):
1915 return self.domid
1917 def getStubdomDomid(self):
1918 dom_list = xstransact.List('/local/domain')
1919 for d in dom_list:
1920 target = xstransact.Read('/local/domain/' + d + '/target')
1921 if target is not None and int(target) is self.domid :
1922 return int(d)
1923 return None
1925 def setName(self, name, to_store = True):
1926 self._checkName(name)
1927 self.info['name_label'] = name
1928 if to_store:
1929 self.storeVm("name", name)
1931 def getName(self):
1932 return self.info['name_label']
1934 def getDomainPath(self):
1935 return self.dompath
1937 def getShutdownReason(self):
1938 return self.readDom('control/shutdown')
1940 def getStorePort(self):
1941 """For use only by image.py and XendCheckpoint.py."""
1942 return self.store_port
1944 def getConsolePort(self):
1945 """For use only by image.py and XendCheckpoint.py"""
1946 return self.console_port
1948 def getFeatures(self):
1949 """For use only by image.py."""
1950 return self.info['features']
1952 def getVCpuCount(self):
1953 return self.info['VCPUs_max']
1955 def getVCpuAvail(self):
1956 return self.info['vcpu_avail']
1958 def setVCpuCount(self, vcpus):
1959 def vcpus_valid(n):
1960 if vcpus <= 0:
1961 raise XendError('Zero or less VCPUs is invalid')
1962 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1963 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1964 vcpus_valid(vcpus)
1966 self.info['vcpu_avail'] = (1 << vcpus) - 1
1967 if self.domid >= 0:
1968 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1969 self._writeDom(self._vcpuDomDetails())
1970 self.info['VCPUs_live'] = vcpus
1971 else:
1972 if self.info['VCPUs_max'] > vcpus:
1973 # decreasing
1974 del self.info['cpus'][vcpus:]
1975 elif self.info['VCPUs_max'] < vcpus:
1976 # increasing
1977 for c in range(self.info['VCPUs_max'], vcpus):
1978 self.info['cpus'].append(list())
1979 self.info['VCPUs_max'] = vcpus
1980 xen.xend.XendDomain.instance().managed_config_save(self)
1981 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1982 vcpus)
1984 def getMemoryTarget(self):
1985 """Get this domain's target memory size, in KB."""
1986 return self.info['memory_dynamic_max'] / 1024
1988 def getMemoryMaximum(self):
1989 """Get this domain's maximum memory size, in KB."""
1990 # remember, info now stores memory in bytes
1991 return self.info['memory_static_max'] / 1024
1993 def getResume(self):
1994 return str(self._resume)
1996 def setResume(self, isresume):
1997 self._resume = isresume
1999 def getCpus(self):
2000 return self.info['cpus']
2002 def setCpus(self, cpumap):
2003 self.info['cpus'] = cpumap
2005 def getCap(self):
2006 return self.info['vcpus_params']['cap']
2008 def setCap(self, cpu_cap):
2009 self.info['vcpus_params']['cap'] = cpu_cap
2011 def getWeight(self):
2012 return self.info['vcpus_params']['weight']
2014 def setWeight(self, cpu_weight):
2015 self.info['vcpus_params']['weight'] = cpu_weight
2017 def getRestartCount(self):
2018 return self._readVm('xend/restart_count')
2020 def refreshShutdown(self, xeninfo = None):
2021 """ Checks the domain for whether a shutdown is required.
2023 Called from XendDomainInfo and also image.py for HVM images.
2024 """
2026 # If set at the end of this method, a restart is required, with the
2027 # given reason. This restart has to be done out of the scope of
2028 # refresh_shutdown_lock.
2029 restart_reason = None
2031 self.refresh_shutdown_lock.acquire()
2032 try:
2033 if xeninfo is None:
2034 xeninfo = dom_get(self.domid)
2035 if xeninfo is None:
2036 # The domain no longer exists. This will occur if we have
2037 # scheduled a timer to check for shutdown timeouts and the
2038 # shutdown succeeded. It will also occur if someone
2039 # destroys a domain beneath us. We clean up the domain,
2040 # just in case, but we can't clean up the VM, because that
2041 # VM may have migrated to a different domain on this
2042 # machine.
2043 self.cleanupDomain()
2044 self._stateSet(DOM_STATE_HALTED)
2045 return
2047 if xeninfo['dying']:
2048 # Dying means that a domain has been destroyed, but has not
2049 # yet been cleaned up by Xen. This state could persist
2050 # indefinitely if, for example, another domain has some of its
2051 # pages mapped. We might like to diagnose this problem in the
2052 # future, but for now all we do is make sure that it's not us
2053 # holding the pages, by calling cleanupDomain. We can't
2054 # clean up the VM, as above.
2055 self.cleanupDomain()
2056 self._stateSet(DOM_STATE_SHUTDOWN)
2057 return
2059 elif xeninfo['crashed']:
2060 if self.readDom('xend/shutdown_completed'):
2061 # We've seen this shutdown already, but we are preserving
2062 # the domain for debugging. Leave it alone.
2063 return
2065 log.warn('Domain has crashed: name=%s id=%d.',
2066 self.info['name_label'], self.domid)
2067 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
2069 restart_reason = 'crash'
2070 self._stateSet(DOM_STATE_HALTED)
2072 elif xeninfo['shutdown']:
2073 self._stateSet(DOM_STATE_SHUTDOWN)
2074 if self.readDom('xend/shutdown_completed'):
2075 # We've seen this shutdown already, but we are preserving
2076 # the domain for debugging. Leave it alone.
2077 return
2079 else:
2080 reason = shutdown_reason(xeninfo['shutdown_reason'])
2082 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
2083 self.info['name_label'], self.domid, reason)
2084 self._writeVm(LAST_SHUTDOWN_REASON, reason)
2086 self._clearRestart()
2088 if reason == 'suspend':
2089 self._stateSet(DOM_STATE_SUSPENDED)
2090 # Don't destroy the domain. XendCheckpoint will do
2091 # this once it has finished. However, stop watching
2092 # the VM path now, otherwise we will end up with one
2093 # watch for the old domain, and one for the new.
2094 self._unwatchVm()
2095 elif reason in ('poweroff', 'reboot'):
2096 restart_reason = reason
2097 else:
2098 self.destroy()
2100 elif self.dompath is None:
2101 # We have yet to manage to call introduceDomain on this
2102 # domain. This can happen if a restore is in progress, or has
2103 # failed. Ignore this domain.
2104 pass
2105 else:
2106 # Domain is alive. If we are shutting it down, log a message
2107 # if it seems unresponsive.
2108 if xeninfo['paused']:
2109 self._stateSet(DOM_STATE_PAUSED)
2110 else:
2111 self._stateSet(DOM_STATE_RUNNING)
2113 if self.shutdownStartTime:
2114 timeout = (SHUTDOWN_TIMEOUT - time.time() +
2115 self.shutdownStartTime)
2116 if (timeout < 0 and not self.readDom('xend/unresponsive')):
2117 log.info(
2118 "Domain shutdown timeout expired: name=%s id=%s",
2119 self.info['name_label'], self.domid)
2120 self.storeDom('xend/unresponsive', 'True')
2121 finally:
2122 self.refresh_shutdown_lock.release()
2124 if restart_reason and not self.restart_in_progress:
2125 self.restart_in_progress = True
2126 threading.Thread(target = self._maybeRestart,
2127 args = (restart_reason,)).start()
2131 # Restart functions - handling whether we come back up on shutdown.
2134 def _clearRestart(self):
2135 self._removeDom("xend/shutdown_start_time")
2137 def _maybeDumpCore(self, reason):
2138 if reason == 'crash':
2139 if xoptions.get_enable_dump() or self.get_on_crash() \
2140 in ['coredump_and_destroy', 'coredump_and_restart']:
2141 try:
2142 self.dumpCore()
2143 except XendError:
2144 # This error has been logged -- there's nothing more
2145 # we can do in this context.
2146 pass
2148 def _maybeRestart(self, reason):
2149 # Before taking configured action, dump core if configured to do so.
2151 self._maybeDumpCore(reason)
2153 # Dispatch to the correct method based upon the configured on_{reason}
2154 # behaviour.
2155 actions = {"destroy" : self.destroy,
2156 "restart" : self._restart,
2157 "preserve" : self._preserve,
2158 "rename-restart" : self._renameRestart,
2159 "coredump-destroy" : self.destroy,
2160 "coredump-restart" : self._restart}
2162 action_conf = {
2163 'poweroff': 'actions_after_shutdown',
2164 'reboot': 'actions_after_reboot',
2165 'crash': 'actions_after_crash',
2168 action_target = self.info.get(action_conf.get(reason))
2169 func = actions.get(action_target, None)
2170 if func and callable(func):
2171 func()
2172 else:
2173 self.destroy() # default to destroy
2175 def _renameRestart(self):
2176 self._restart(True)
2178 def _restart(self, rename = False):
2179 """Restart the domain after it has exited.
2181 @param rename True if the old domain is to be renamed and preserved,
2182 False if it is to be destroyed.
2183 """
2184 from xen.xend import XendDomain
2186 if self._readVm(RESTART_IN_PROGRESS):
2187 log.error('Xend failed during restart of domain %s. '
2188 'Refusing to restart to avoid loops.',
2189 str(self.domid))
2190 self.destroy()
2191 return
2193 old_domid = self.domid
2194 self._writeVm(RESTART_IN_PROGRESS, 'True')
2196 elapse = time.time() - self.info['start_time']
2197 if elapse < MINIMUM_RESTART_TIME:
2198 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2199 'Refusing to restart to avoid loops.',
2200 self.info['name_label'], elapse)
2201 self.destroy()
2202 return
2204 prev_vm_xend = self._listRecursiveVm('xend')
2205 new_dom_info = self.info
2206 try:
2207 if rename:
2208 new_dom_info = self._preserveForRestart()
2209 else:
2210 self._unwatchVm()
2211 self.destroy()
2213 # new_dom's VM will be the same as this domain's VM, except where
2214 # the rename flag has instructed us to call preserveForRestart.
2215 # In that case, it is important that we remove the
2216 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2217 # once the new one is available.
2219 new_dom = None
2220 try:
2221 new_dom = XendDomain.instance().domain_create_from_dict(
2222 new_dom_info)
2223 for x in prev_vm_xend[0][1]:
2224 new_dom._writeVm('xend/%s' % x[0], x[1])
2225 new_dom.waitForDevices()
2226 new_dom.unpause()
2227 rst_cnt = new_dom._readVm('xend/restart_count')
2228 rst_cnt = int(rst_cnt) + 1
2229 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2230 new_dom._removeVm(RESTART_IN_PROGRESS)
2231 except:
2232 if new_dom:
2233 new_dom._removeVm(RESTART_IN_PROGRESS)
2234 new_dom.destroy()
2235 else:
2236 self._removeVm(RESTART_IN_PROGRESS)
2237 raise
2238 except:
2239 log.exception('Failed to restart domain %s.', str(old_domid))
2241 def _preserveForRestart(self):
2242 """Preserve a domain that has been shut down, by giving it a new UUID,
2243 cloning the VM details, and giving it a new name. This allows us to
2244 keep this domain for debugging, but restart a new one in its place
2245 preserving the restart semantics (name and UUID preserved).
2246 """
2248 new_uuid = uuid.createString()
2249 new_name = 'Domain-%s' % new_uuid
2250 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2251 self.info['name_label'], self.domid, self.info['uuid'],
2252 new_name, new_uuid)
2253 self._unwatchVm()
2254 self._releaseDevices()
2255 # Remove existing vm node in xenstore
2256 self._removeVm()
2257 new_dom_info = self.info.copy()
2258 new_dom_info['name_label'] = self.info['name_label']
2259 new_dom_info['uuid'] = self.info['uuid']
2260 self.info['name_label'] = new_name
2261 self.info['uuid'] = new_uuid
2262 self.vmpath = XS_VMROOT + new_uuid
2263 # Write out new vm node to xenstore
2264 self._storeVmDetails()
2265 self._preserve()
2266 return new_dom_info
2269 def _preserve(self):
2270 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2271 self.domid)
2272 self._unwatchVm()
2273 self.storeDom('xend/shutdown_completed', 'True')
2274 self._stateSet(DOM_STATE_HALTED)
2277 # Debugging ..
2280 def dumpCore(self, corefile = None):
2281 """Create a core dump for this domain.
2283 @raise: XendError if core dumping failed.
2284 """
2286 if not corefile:
2287 # To prohibit directory traversal
2288 based_name = os.path.basename(self.info['name_label'])
2290 coredir = "/var/xen/dump/%s" % (based_name)
2291 if not os.path.exists(coredir):
2292 try:
2293 mkdir.parents(coredir, stat.S_IRWXU)
2294 except Exception, ex:
2295 log.error("Cannot create directory: %s" % str(ex))
2297 if not os.path.isdir(coredir):
2298 # Use former directory to dump core
2299 coredir = '/var/xen/dump'
2301 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2302 corefile = "%s/%s-%s.%s.core" % (coredir, this_time,
2303 self.info['name_label'], self.domid)
2305 if os.path.isdir(corefile):
2306 raise XendError("Cannot dump core in a directory: %s" %
2307 corefile)
2309 try:
2310 try:
2311 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2312 xc.domain_dumpcore(self.domid, corefile)
2313 except RuntimeError, ex:
2314 corefile_incomp = corefile+'-incomplete'
2315 try:
2316 os.rename(corefile, corefile_incomp)
2317 except:
2318 pass
2320 log.error("core dump failed: id = %s name = %s: %s",
2321 self.domid, self.info['name_label'], str(ex))
2322 raise XendError("Failed to dump core: %s" % str(ex))
2323 finally:
2324 self._removeVm(DUMPCORE_IN_PROGRESS)
2327 # Device creation/deletion functions
2330 def _createDevice(self, deviceClass, devConfig):
2331 return self.getDeviceController(deviceClass).createDevice(devConfig)
2333 def _waitForDevice(self, deviceClass, devid):
2334 return self.getDeviceController(deviceClass).waitForDevice(devid)
2336 def _waitForDeviceUUID(self, dev_uuid):
2337 deviceClass, config = self.info['devices'].get(dev_uuid)
2338 self._waitForDevice(deviceClass, config['devid'])
2340 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2341 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2342 devid, backpath)
2344 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2345 return self.getDeviceController(deviceClass).reconfigureDevice(
2346 devid, devconfig)
2348 def _createDevices(self):
2349 """Create the devices for a vm.
2351 @raise: VmError for invalid devices
2352 """
2353 if self.image:
2354 self.image.prepareEnvironment()
2356 vscsi_uuidlist = {}
2357 vscsi_devidlist = []
2358 ordered_refs = self.info.ordered_device_refs()
2359 for dev_uuid in ordered_refs:
2360 devclass, config = self.info['devices'][dev_uuid]
2361 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2362 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2363 dev_uuid = config.get('uuid')
2365 if devclass == 'pci':
2366 self.pci_dev_check_assignability_and_do_FLR(config)
2368 if devclass != 'pci' or not self.info.is_hvm() :
2369 devid = self._createDevice(devclass, config)
2371 # store devid in XendConfig for caching reasons
2372 if dev_uuid in self.info['devices']:
2373 self.info['devices'][dev_uuid][1]['devid'] = devid
2375 elif devclass == 'vscsi':
2376 vscsi_config = config.get('devs', [])[0]
2377 devid = vscsi_config.get('devid', '')
2378 dev_uuid = config.get('uuid')
2379 vscsi_uuidlist[devid] = dev_uuid
2380 vscsi_devidlist.append(devid)
2382 #It is necessary to sorted it for /dev/sdxx in guest.
2383 if len(vscsi_uuidlist) > 0:
2384 vscsi_devidlist.sort()
2385 for vscsiid in vscsi_devidlist:
2386 dev_uuid = vscsi_uuidlist[vscsiid]
2387 devclass, config = self.info['devices'][dev_uuid]
2388 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2389 dev_uuid = config.get('uuid')
2390 devid = self._createDevice(devclass, config)
2391 # store devid in XendConfig for caching reasons
2392 if dev_uuid in self.info['devices']:
2393 self.info['devices'][dev_uuid][1]['devid'] = devid
2396 if self.image:
2397 self.image.createDeviceModel()
2399 #if have pass-through devs, need the virtual pci slots info from qemu
2400 self.pci_device_configure_boot()
2402 def _releaseDevices(self, suspend = False):
2403 """Release all domain's devices. Nothrow guarantee."""
2404 if self.image:
2405 try:
2406 log.debug("Destroying device model")
2407 self.image.destroyDeviceModel()
2408 except Exception, e:
2409 log.exception("Device model destroy failed %s" % str(e))
2410 else:
2411 log.debug("No device model")
2413 log.debug("Releasing devices")
2414 t = xstransact("%s/device" % self.vmpath)
2415 try:
2416 for devclass in XendDevices.valid_devices():
2417 for dev in t.list(devclass):
2418 try:
2419 log.debug("Removing %s", dev);
2420 self.destroyDevice(devclass, dev, False);
2421 except:
2422 # Log and swallow any exceptions in removal --
2423 # there's nothing more we can do.
2424 log.exception("Device release failed: %s; %s; %s",
2425 self.info['name_label'],
2426 devclass, dev)
2427 finally:
2428 t.abort()
2430 def getDeviceController(self, name):
2431 """Get the device controller for this domain, and if it
2432 doesn't exist, create it.
2434 @param name: device class name
2435 @type name: string
2436 @rtype: subclass of DevController
2437 """
2438 if name not in self._deviceControllers:
2439 devController = XendDevices.make_controller(name, self)
2440 if not devController:
2441 raise XendError("Unknown device type: %s" % name)
2442 self._deviceControllers[name] = devController
2444 return self._deviceControllers[name]
2447 # Migration functions (public)
2450 def testMigrateDevices(self, network, dst):
2451 """ Notify all device about intention of migration
2452 @raise: XendError for a device that cannot be migrated
2453 """
2454 for (n, c) in self.info.all_devices_sxpr():
2455 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2456 if rc != 0:
2457 raise XendError("Device of type '%s' refuses migration." % n)
2459 def migrateDevices(self, network, dst, step, domName=''):
2460 """Notify the devices about migration
2461 """
2462 ctr = 0
2463 try:
2464 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2465 self.migrateDevice(dev_type, dev_conf, network, dst,
2466 step, domName)
2467 ctr = ctr + 1
2468 except:
2469 for dev_type, dev_conf in self.info.all_devices_sxpr():
2470 if ctr == 0:
2471 step = step - 1
2472 ctr = ctr - 1
2473 self._recoverMigrateDevice(dev_type, dev_conf, network,
2474 dst, step, domName)
2475 raise
2477 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2478 step, domName=''):
2479 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2480 network, dst, step, domName)
2482 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2483 dst, step, domName=''):
2484 return self.getDeviceController(deviceClass).recover_migrate(
2485 deviceConfig, network, dst, step, domName)
2487 def setChangeHomeServer(self, chs):
2488 if chs is not None:
2489 self.info['change_home_server'] = bool(chs)
2490 else:
2491 if self.info.has_key('change_home_server'):
2492 del self.info['change_home_server']
2495 ## private:
2497 def _constructDomain(self):
2498 """Construct the domain.
2500 @raise: VmError on error
2501 """
2503 log.debug('XendDomainInfo.constructDomain')
2505 self.shutdownStartTime = None
2506 self.restart_in_progress = False
2508 hap = 0
2509 hvm = self.info.is_hvm()
2510 if hvm:
2511 hap = self.info.is_hap()
2512 info = xc.xeninfo()
2513 if 'hvm' not in info['xen_caps']:
2514 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2515 "supported by your CPU and enabled in your "
2516 "BIOS?")
2518 # Hack to pre-reserve some memory for initial domain creation.
2519 # There is an implicit memory overhead for any domain creation. This
2520 # overhead is greater for some types of domain than others. For
2521 # example, an x86 HVM domain will have a default shadow-pagetable
2522 # allocation of 1MB. We free up 4MB here to be on the safe side.
2523 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2524 balloon.free(4*1024, self) # 4MB should be plenty
2526 ssidref = 0
2527 if security.on() == xsconstants.XS_POLICY_USE:
2528 ssidref = security.calc_dom_ssidref_from_info(self.info)
2529 if security.has_authorization(ssidref) == False:
2530 raise VmError("VM is not authorized to run.")
2532 s3_integrity = 0
2533 if self.info.has_key('s3_integrity'):
2534 s3_integrity = self.info['s3_integrity']
2536 oos = self.info['platform'].get('oos', 1)
2537 oos_off = 1 - int(oos)
2539 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
2541 try:
2542 self.domid = xc.domain_create(
2543 domid = 0,
2544 ssidref = ssidref,
2545 handle = uuid.fromString(self.info['uuid']),
2546 flags = flags,
2547 target = self.info.target())
2548 except Exception, e:
2549 # may get here if due to ACM the operation is not permitted
2550 if security.on() == xsconstants.XS_POLICY_ACM:
2551 raise VmError('Domain in conflict set with running domain?')
2552 log.exception(e)
2554 if not self.domid or self.domid < 0:
2555 failmsg = 'Creating domain failed: name=%s' % self.info['name_label']
2556 if self.domid:
2557 failmsg += ', error=%i' % int(self.domid)
2558 raise VmError(failmsg)
2560 self.dompath = GetDomainPath(self.domid)
2562 self._recreateDom()
2564 # Set TSC mode of domain
2565 tsc_mode = self.info["platform"].get("tsc_mode")
2566 if arch.type == "x86" and tsc_mode is not None:
2567 xc.domain_set_tsc_info(self.domid, int(tsc_mode))
2569 # Set timer configuration of domain
2570 timer_mode = self.info["platform"].get("timer_mode")
2571 if hvm and timer_mode is not None:
2572 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2573 long(timer_mode))
2575 # Set Viridian interface configuration of domain
2576 viridian = self.info["platform"].get("viridian")
2577 if arch.type == "x86" and hvm and viridian is not None:
2578 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2580 # If nomigrate is set, disable migration
2581 nomigrate = self.info["platform"].get("nomigrate")
2582 if nomigrate is not None and long(nomigrate) != 0:
2583 xc.domain_disable_migrate(self.domid)
2585 # Optionally enable virtual HPET
2586 hpet = self.info["platform"].get("hpet")
2587 if hvm and hpet is not None:
2588 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2589 long(hpet))
2591 # Optionally enable periodic vpt aligning
2592 vpt_align = self.info["platform"].get("vpt_align")
2593 if hvm and vpt_align is not None:
2594 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2595 long(vpt_align))
2597 # Set maximum number of vcpus in domain
2598 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2600 # Check for cpu_{cap|weight} validity for credit scheduler
2601 if XendNode.instance().xenschedinfo() == 'credit':
2602 cap = self.getCap()
2603 weight = self.getWeight()
2605 assert type(weight) == int
2606 assert type(cap) == int
2608 if weight < 1 or weight > 65535:
2609 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2611 if cap < 0 or cap > self.getVCpuCount() * 100:
2612 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2613 (self.getVCpuCount() * 100))
2615 # Test whether the devices can be assigned with VT-d
2616 self.info.update_platform_pci()
2617 pci = self.info["platform"].get("pci")
2618 pci_str = ''
2619 if pci and len(pci) > 0:
2620 pci = map(lambda x: x[0:4], pci) # strip options
2621 pci_str = str(pci)
2623 # This test is done for both pv and hvm guest.
2624 for p in pci:
2625 pci_name = '%04x:%02x:%02x.%x' % \
2626 (parse_hex(p[0]), parse_hex(p[1]), parse_hex(p[2]), parse_hex(p[3]))
2627 try:
2628 pci_device = PciDevice(parse_pci_name(pci_name))
2629 except Exception, e:
2630 raise VmError("pci: failed to locate device and "+
2631 "parse its resources - "+str(e))
2632 if pci_device.driver!='pciback' and pci_device.driver!='pci-stub':
2633 raise VmError(("pci: PCI Backend and pci-stub don't own device %s")\
2634 %pci_device.name)
2635 if pci_name in get_all_assigned_pci_devices():
2636 raise VmError("failed to assign device %s that has"
2637 " already been assigned to other domain." % pci_name)
2639 if hvm and pci_str != '':
2640 bdf = xc.test_assign_device(0, pci_str)
2641 if bdf != 0:
2642 if bdf == -1:
2643 raise VmError("failed to assign device: maybe the platform"
2644 " doesn't support VT-d, or VT-d isn't enabled"
2645 " properly?")
2646 bus = (bdf >> 16) & 0xff
2647 devfn = (bdf >> 8) & 0xff
2648 dev = (devfn >> 3) & 0x1f
2649 func = devfn & 0x7
2650 raise VmError("failed to assign device %02x:%02x.%x: maybe it has"
2651 " already been assigned to other domain, or maybe"
2652 " it doesn't exist." % (bus, dev, func))
2654 # register the domain in the list
2655 from xen.xend import XendDomain
2656 XendDomain.instance().add_domain(self)
2658 def _introduceDomain(self):
2659 assert self.domid is not None
2660 assert self.store_mfn is not None
2661 assert self.store_port is not None
2663 try:
2664 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2665 except RuntimeError, exn:
2666 raise XendError(str(exn))
2668 def _setTarget(self, target):
2669 assert self.domid is not None
2671 try:
2672 SetTarget(self.domid, target)
2673 self.storeDom('target', target)
2674 except RuntimeError, exn:
2675 raise XendError(str(exn))
2678 def _setCPUAffinity(self):
2679 """ Repin domain vcpus if a restricted cpus list is provided.
2680 Returns the choosen node number.
2681 """
2683 def has_cpus():
2684 if self.info['cpus'] is not None:
2685 for c in self.info['cpus']:
2686 if c:
2687 return True
2688 return False
2690 def has_cpumap():
2691 if self.info.has_key('vcpus_params'):
2692 for k, v in self.info['vcpus_params'].items():
2693 if k.startswith('cpumap'):
2694 return True
2695 return False
2697 index = 0
2698 if has_cpumap():
2699 for v in range(0, self.info['VCPUs_max']):
2700 if self.info['vcpus_params'].has_key('cpumap%i' % v):
2701 cpumask = map(int, self.info['vcpus_params']['cpumap%i' % v].split(','))
2702 xc.vcpu_setaffinity(self.domid, v, cpumask)
2703 elif has_cpus():
2704 for v in range(0, self.info['VCPUs_max']):
2705 if self.info['cpus'][v]:
2706 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2707 else:
2708 def find_relaxed_node(node_list):
2709 import sys
2710 nr_nodes = info['max_node_id']+1
2711 if node_list is None:
2712 node_list = range(0, nr_nodes)
2713 nodeload = [0]
2714 nodeload = nodeload * nr_nodes
2715 from xen.xend import XendDomain
2716 doms = XendDomain.instance().list('all')
2717 for dom in filter (lambda d: d.domid != self.domid, doms):
2718 cpuinfo = dom.getVCPUInfo()
2719 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2720 if sxp.child_value(vcpu, 'online') == 0: continue
2721 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2722 for i in range(0, nr_nodes):
2723 node_cpumask = info['node_to_cpu'][i]
2724 for j in node_cpumask:
2725 if j in cpumap:
2726 nodeload[i] += 1
2727 break
2728 for i in range(0, nr_nodes):
2729 if len(info['node_to_cpu'][i]) == 0:
2730 nodeload[i] += 8
2731 else:
2732 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2733 if i not in node_list:
2734 nodeload[i] += 8
2735 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
2737 info = xc.physinfo()
2738 if info['nr_nodes'] > 1:
2739 node_memory_list = info['node_to_memory']
2740 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2741 candidate_node_list = []
2742 for i in range(0, info['max_node_id']+1):
2743 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2744 candidate_node_list.append(i)
2745 best_node = find_relaxed_node(candidate_node_list)[0]
2746 cpumask = info['node_to_cpu'][best_node]
2747 best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['max_node_id']+1)))
2748 for node_idx in best_nodes:
2749 if len(cpumask) >= self.info['VCPUs_max']:
2750 break
2751 cpumask = cpumask + info['node_to_cpu'][node_idx]
2752 log.debug("allocating additional NUMA node %d", node_idx)
2753 for v in range(0, self.info['VCPUs_max']):
2754 xc.vcpu_setaffinity(self.domid, v, cpumask)
2755 return index
2757 def _freeDMAmemory(self, node):
2759 # If we are PV and have PCI devices the guest will
2760 # turn on a SWIOTLB. The SWIOTLB _MUST_ be located in the DMA32
2761 # zone (under 4GB). To do so, we need to balloon down Dom0 to where
2762 # there is enough (64MB) memory under the 4GB mark. This balloon-ing
2763 # might take more memory out than just 64MB thought :-(
2764 if not self.info.is_pv_and_has_pci():
2765 return
2767 retries = 2000
2768 ask_for_mem = 0
2769 need_mem = 0
2770 try:
2771 while (retries > 0):
2772 physinfo = xc.physinfo()
2773 free_mem = physinfo['free_memory']
2774 max_node_id = physinfo['max_node_id']
2775 node_to_dma32_mem = physinfo['node_to_dma32_mem']
2776 if (node > max_node_id):
2777 return
2778 # Extra 2MB above 64GB seems to do the trick.
2779 need_mem = 64 * 1024 + 2048 - node_to_dma32_mem[node]
2780 # our starting point. We ask just for the difference to
2781 # be have an extra 64MB under 4GB.
2782 ask_for_mem = max(need_mem, ask_for_mem);
2783 if (need_mem > 0):
2784 log.debug('_freeDMAmemory (%d) Need %dKiB DMA memory. '
2785 'Asking for %dKiB', retries, need_mem,
2786 ask_for_mem)
2788 balloon.free(ask_for_mem, self)
2789 ask_for_mem = ask_for_mem + 2048
2790 else:
2791 # OK. We got enough DMA memory.
2792 break
2793 retries = retries - 1
2794 except:
2795 # This is best-try after all.
2796 need_mem = max(1, need_mem)
2797 pass
2799 if (need_mem > 0):
2800 log.warn('We tried our best to balloon down DMA memory to '
2801 'accomodate your PV guest. We need %dKiB extra memory.',
2802 need_mem)
2804 def _setSchedParams(self):
2805 if XendNode.instance().xenschedinfo() == 'credit':
2806 from xen.xend import XendDomain
2807 XendDomain.instance().domain_sched_credit_set(self.getDomid(),
2808 self.getWeight(),
2809 self.getCap())
2811 def _initDomain(self):
2812 log.debug('XendDomainInfo.initDomain: %s %s',
2813 self.domid,
2814 self.info['vcpus_params']['weight'])
2816 self._configureBootloader()
2818 try:
2819 self.image = image.create(self, self.info)
2821 # repin domain vcpus if a restricted cpus list is provided
2822 # this is done prior to memory allocation to aide in memory
2823 # distribution for NUMA systems.
2824 node = self._setCPUAffinity()
2826 # Set scheduling parameters.
2827 self._setSchedParams()
2829 # Use architecture- and image-specific calculations to determine
2830 # the various headrooms necessary, given the raw configured
2831 # values. maxmem, memory, and shadow are all in KiB.
2832 # but memory_static_max etc are all stored in bytes now.
2833 memory = self.image.getRequiredAvailableMemory(
2834 self.info['memory_dynamic_max'] / 1024)
2835 maxmem = self.image.getRequiredAvailableMemory(
2836 self.info['memory_static_max'] / 1024)
2837 shadow = self.image.getRequiredShadowMemory(
2838 self.info['shadow_memory'] * 1024,
2839 self.info['memory_static_max'] / 1024)
2841 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2842 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2843 # takes MiB and we must not round down and end up under-providing.
2844 shadow = ((shadow + 1023) / 1024) * 1024
2846 # set memory limit
2847 xc.domain_setmaxmem(self.domid, maxmem)
2849 vtd_mem = 0
2850 info = xc.physinfo()
2851 if 'hvm_directio' in info['virt_caps']:
2852 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2853 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2854 # Round vtd_mem up to a multiple of a MiB.
2855 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2857 self.guest_bitsize = self.image.getBitSize()
2858 # Make sure there's enough RAM available for the domain
2859 balloon.free(memory + shadow + vtd_mem, self)
2861 # Set up the shadow memory
2862 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2863 self.info['shadow_memory'] = shadow_cur
2865 # machine address size
2866 if self.info.has_key('machine_address_size'):
2867 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2868 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2870 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2871 log.debug("_initDomain: suppressing spurious page faults")
2872 xc.domain_suppress_spurious_page_faults(self.domid)
2874 self._createChannels()
2876 channel_details = self.image.createImage()
2878 self.store_mfn = channel_details['store_mfn']
2879 if 'console_mfn' in channel_details:
2880 self.console_mfn = channel_details['console_mfn']
2881 if 'notes' in channel_details:
2882 self.info.set_notes(channel_details['notes'])
2883 if 'native_protocol' in channel_details:
2884 self.native_protocol = channel_details['native_protocol'];
2886 self._introduceDomain()
2887 if self.info.target():
2888 self._setTarget(self.info.target())
2890 self._freeDMAmemory(node)
2892 self._createDevices()
2894 self.image.cleanupTmpImages()
2896 self.info['start_time'] = time.time()
2898 self._stateSet(DOM_STATE_RUNNING)
2899 except VmError, exn:
2900 log.exception("XendDomainInfo.initDomain: exception occurred")
2901 if self.image:
2902 self.image.cleanupTmpImages()
2903 raise exn
2904 except RuntimeError, exn:
2905 log.exception("XendDomainInfo.initDomain: exception occurred")
2906 if self.image:
2907 self.image.cleanupTmpImages()
2908 raise VmError(str(exn))
2911 def cleanupDomain(self):
2912 """Cleanup domain resources; release devices. Idempotent. Nothrow
2913 guarantee."""
2915 self.refresh_shutdown_lock.acquire()
2916 try:
2917 self.unwatchShutdown()
2918 self._releaseDevices()
2919 bootloader_tidy(self)
2921 if self.image:
2922 self.image = None
2924 try:
2925 self._removeDom()
2926 except:
2927 log.exception("Removing domain path failed.")
2929 self._stateSet(DOM_STATE_HALTED)
2930 self.domid = None # Do not push into _stateSet()!
2931 finally:
2932 self.refresh_shutdown_lock.release()
2935 def unwatchShutdown(self):
2936 """Remove the watch on the domain's control/shutdown node, if any.
2937 Idempotent. Nothrow guarantee. Expects to be protected by the
2938 refresh_shutdown_lock."""
2940 try:
2941 try:
2942 if self.shutdownWatch:
2943 self.shutdownWatch.unwatch()
2944 finally:
2945 self.shutdownWatch = None
2946 except:
2947 log.exception("Unwatching control/shutdown failed.")
2949 def waitForShutdown(self):
2950 self.state_updated.acquire()
2951 try:
2952 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2953 self.state_updated.wait(timeout=1.0)
2954 finally:
2955 self.state_updated.release()
2957 def waitForSuspend(self):
2958 """Wait for the guest to respond to a suspend request by
2959 shutting down. If the guest hasn't re-written control/shutdown
2960 after a certain amount of time, it's obviously not listening and
2961 won't suspend, so we give up. HVM guests with no PV drivers
2962 should already be shutdown.
2963 """
2964 state = "suspend"
2965 nr_tries = 60
2967 self.state_updated.acquire()
2968 try:
2969 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2970 self.state_updated.wait(1.0)
2971 if state == "suspend":
2972 if nr_tries == 0:
2973 msg = ('Timeout waiting for domain %s to suspend'
2974 % self.domid)
2975 self._writeDom('control/shutdown', '')
2976 raise XendError(msg)
2977 state = self.readDom('control/shutdown')
2978 nr_tries -= 1
2979 finally:
2980 self.state_updated.release()
2983 # TODO: recategorise - called from XendCheckpoint
2986 def completeRestore(self, store_mfn, console_mfn):
2988 log.debug("XendDomainInfo.completeRestore")
2990 self.store_mfn = store_mfn
2991 self.console_mfn = console_mfn
2993 self._introduceDomain()
2994 self.image = image.create(self, self.info)
2995 if self.image:
2996 self.image.createDeviceModel(True)
2997 self._storeDomDetails()
2998 self._registerWatches()
2999 self.refreshShutdown()
3001 log.debug("XendDomainInfo.completeRestore done")
3004 def _endRestore(self):
3005 self.setResume(False)
3008 # VM Destroy
3011 def _prepare_phantom_paths(self):
3012 # get associated devices to destroy
3013 # build list of phantom devices to be removed after normal devices
3014 plist = []
3015 if self.domid is not None:
3016 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
3017 try:
3018 for dev in t.list():
3019 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
3020 % (self.dompath, dev))
3021 if backend_phantom_vbd is not None:
3022 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
3023 % backend_phantom_vbd)
3024 plist.append(backend_phantom_vbd)
3025 plist.append(frontend_phantom_vbd)
3026 finally:
3027 t.abort()
3028 return plist
3030 def _cleanup_phantom_devs(self, plist):
3031 # remove phantom devices
3032 if not plist == []:
3033 time.sleep(2)
3034 for paths in plist:
3035 if paths.find('backend') != -1:
3036 # Modify online status /before/ updating state (latter is watched by
3037 # drivers, so this ordering avoids a race).
3038 xstransact.Write(paths, 'online', "0")
3039 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
3040 # force
3041 xstransact.Remove(paths)
3043 def destroy(self):
3044 """Cleanup VM and destroy domain. Nothrow guarantee."""
3046 if self.domid is None:
3047 return
3048 from xen.xend import XendDomain
3049 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
3051 paths = self._prepare_phantom_paths()
3053 if self.dompath is not None:
3054 try:
3055 xc.domain_destroy_hook(self.domid)
3056 xc.domain_pause(self.domid)
3057 do_FLR(self.domid, self.info.is_hvm())
3058 xc.domain_destroy(self.domid)
3059 for state in DOM_STATES_OLD:
3060 self.info[state] = 0
3061 self._stateSet(DOM_STATE_HALTED)
3062 except:
3063 log.exception("XendDomainInfo.destroy: domain destruction failed.")
3065 XendDomain.instance().remove_domain(self)
3066 self.cleanupDomain()
3068 if self.info.is_hvm() or self.guest_bitsize != 32:
3069 if self.alloc_mem:
3070 import MemoryPool
3071 log.debug("%s KiB need to add to Memory pool" %self.alloc_mem)
3072 MemoryPool.instance().increase_memory(self.alloc_mem)
3074 self._cleanup_phantom_devs(paths)
3075 self._cleanupVm()
3077 if ("transient" in self.info["other_config"] and \
3078 bool(self.info["other_config"]["transient"])) or \
3079 ("change_home_server" in self.info and \
3080 bool(self.info["change_home_server"])):
3081 XendDomain.instance().domain_delete_by_dominfo(self)
3084 def resetDomain(self):
3085 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
3087 old_domid = self.domid
3088 prev_vm_xend = self._listRecursiveVm('xend')
3089 new_dom_info = self.info
3090 try:
3091 self._unwatchVm()
3092 self.destroy()
3094 new_dom = None
3095 try:
3096 from xen.xend import XendDomain
3097 new_dom_info['domid'] = None
3098 new_dom = XendDomain.instance().domain_create_from_dict(
3099 new_dom_info)
3100 for x in prev_vm_xend[0][1]:
3101 new_dom._writeVm('xend/%s' % x[0], x[1])
3102 new_dom.waitForDevices()
3103 new_dom.unpause()
3104 except:
3105 if new_dom:
3106 new_dom.destroy()
3107 raise
3108 except:
3109 log.exception('Failed to reset domain %s.', str(old_domid))
3112 def resumeDomain(self):
3113 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
3115 # resume a suspended domain (e.g. after live checkpoint, or after
3116 # a later error during save or migate); checks that the domain
3117 # is currently suspended first so safe to call from anywhere
3119 xeninfo = dom_get(self.domid)
3120 if xeninfo is None:
3121 return
3122 if not xeninfo['shutdown']:
3123 return
3124 reason = shutdown_reason(xeninfo['shutdown_reason'])
3125 if reason != 'suspend':
3126 return
3128 try:
3129 # could also fetch a parsed note from xenstore
3130 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
3131 if not fast:
3132 self._releaseDevices()
3133 self.testDeviceComplete()
3134 self.testvifsComplete()
3135 log.debug("XendDomainInfo.resumeDomain: devices released")
3137 self._resetChannels()
3139 self._removeDom('control/shutdown')
3140 self._removeDom('device-misc/vif/nextDeviceID')
3142 self._createChannels()
3143 self._introduceDomain()
3144 self._storeDomDetails()
3146 self._createDevices()
3147 log.debug("XendDomainInfo.resumeDomain: devices created")
3149 xc.domain_resume(self.domid, fast)
3150 ResumeDomain(self.domid)
3151 except:
3152 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
3153 self.image.resumeDeviceModel()
3154 log.debug("XendDomainInfo.resumeDomain: completed")
3158 # Channels for xenstore and console
3161 def _createChannels(self):
3162 """Create the channels to the domain.
3163 """
3164 self.store_port = self._createChannel()
3165 self.console_port = self._createChannel()
3168 def _createChannel(self):
3169 """Create an event channel to the domain.
3170 """
3171 try:
3172 if self.domid != None:
3173 return xc.evtchn_alloc_unbound(domid = self.domid,
3174 remote_dom = 0)
3175 except:
3176 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
3177 raise
3179 def _resetChannels(self):
3180 """Reset all event channels in the domain.
3181 """
3182 try:
3183 if self.domid != None:
3184 return xc.evtchn_reset(dom = self.domid)
3185 except:
3186 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
3187 raise
3191 # Bootloader configuration
3194 def _configureBootloader(self):
3195 """Run the bootloader if we're configured to do so."""
3197 blexec = self.info['PV_bootloader']
3198 bootloader_args = self.info['PV_bootloader_args']
3199 kernel = self.info['PV_kernel']
3200 ramdisk = self.info['PV_ramdisk']
3201 args = self.info['PV_args']
3202 boot = self.info['HVM_boot_policy']
3204 if boot:
3205 # HVM booting.
3206 pass
3207 elif not blexec and kernel:
3208 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
3209 # will be picked up by image.py.
3210 pass
3211 else:
3212 # Boot using bootloader
3213 if not blexec or blexec == 'pygrub':
3214 blexec = auxbin.pathTo('pygrub')
3216 blcfg = None
3217 disks = [x for x in self.info['vbd_refs']
3218 if self.info['devices'][x][1]['bootable']]
3220 if not disks:
3221 msg = "Had a bootloader specified, but no disks are bootable"
3222 log.error(msg)
3223 raise VmError(msg)
3225 devinfo = self.info['devices'][disks[0]]
3226 devtype = devinfo[0]
3227 disk = devinfo[1]['uname']
3229 fn = blkdev_uname_to_file(disk)
3231 # If this is a drbd volume, check if we need to activate it
3232 if disk.find(":") != -1:
3233 (disktype, diskname) = disk.split(':', 1)
3234 if disktype == 'drbd':
3235 (drbdadmstdin, drbdadmstdout) = os.popen2(["/sbin/drbdadm", "state", diskname])
3236 (state, junk) = drbdadmstdout.readline().split('/', 1)
3237 if state == 'Secondary':
3238 os.system('/sbin/drbdadm primary ' + diskname)
3240 taptype = blkdev_uname_to_taptype(disk)
3241 mounted = devtype in ['tap', 'tap2'] and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
3242 if mounted:
3243 # This is a file, not a device. pygrub can cope with a
3244 # file if it's raw, but if it's QCOW or other such formats
3245 # used through blktap, then we need to mount it first.
3247 log.info("Mounting %s on %s." %
3248 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3250 vbd = {
3251 'mode': 'RO',
3252 'device': BOOTLOADER_LOOPBACK_DEVICE,
3255 from xen.xend import XendDomain
3256 dom0 = XendDomain.instance().privilegedDomain()
3257 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
3258 fn = BOOTLOADER_LOOPBACK_DEVICE
3260 try:
3261 blcfg = bootloader(blexec, fn, self, False,
3262 bootloader_args, kernel, ramdisk, args)
3263 finally:
3264 if mounted:
3265 log.info("Unmounting %s from %s." %
3266 (fn, BOOTLOADER_LOOPBACK_DEVICE))
3268 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
3270 if blcfg is None:
3271 msg = "Had a bootloader specified, but can't find disk"
3272 log.error(msg)
3273 raise VmError(msg)
3275 self.info.update_with_image_sxp(blcfg, True)
3279 # VM Functions
3282 def _readVMDetails(self, params):
3283 """Read the specified parameters from the store.
3284 """
3285 try:
3286 return self._gatherVm(*params)
3287 except ValueError:
3288 # One of the int/float entries in params has a corresponding store
3289 # entry that is invalid. We recover, because older versions of
3290 # Xend may have put the entry there (memory/target, for example),
3291 # but this is in general a bad situation to have reached.
3292 log.exception(
3293 "Store corrupted at %s! Domain %d's configuration may be "
3294 "affected.", self.vmpath, self.domid)
3295 return []
3297 def _cleanupVm(self):
3298 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
3300 self._unwatchVm()
3302 try:
3303 self._removeVm()
3304 except:
3305 log.exception("Removing VM path failed.")
3308 def checkLiveMigrateMemory(self):
3309 """ Make sure there's enough memory to migrate this domain """
3310 overhead_kb = 0
3311 if arch.type == "x86":
3312 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3313 # the minimum that Xen would allocate if no value were given.
3314 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3315 (self.info['memory_static_max'] / 1024 / 1024) * 4
3316 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3317 # The domain might already have some shadow memory
3318 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3319 if overhead_kb > 0:
3320 balloon.free(overhead_kb, self)
3322 def _unwatchVm(self):
3323 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3324 guarantee."""
3325 try:
3326 try:
3327 if self.vmWatch:
3328 self.vmWatch.unwatch()
3329 finally:
3330 self.vmWatch = None
3331 except:
3332 log.exception("Unwatching VM path failed.")
3334 def testDeviceComplete(self):
3335 """ For Block IO migration safety we must ensure that
3336 the device has shutdown correctly, i.e. all blocks are
3337 flushed to disk
3338 """
3339 start = time.time()
3340 while True:
3341 test = 0
3342 diff = time.time() - start
3343 vbds = self.getDeviceController('vbd').deviceIDs()
3344 taps = self.getDeviceController('tap').deviceIDs()
3345 tap2s = self.getDeviceController('tap2').deviceIDs()
3346 for i in vbds + taps + tap2s:
3347 test = 1
3348 log.info("Dev %s still active, looping...", i)
3349 time.sleep(0.1)
3351 if test == 0:
3352 break
3353 if diff >= MIGRATE_TIMEOUT:
3354 log.info("Dev still active but hit max loop timeout")
3355 break
3357 def testvifsComplete(self):
3358 """ In case vifs are released and then created for the same
3359 domain, we need to wait the device shut down.
3360 """
3361 start = time.time()
3362 while True:
3363 test = 0
3364 diff = time.time() - start
3365 for i in self.getDeviceController('vif').deviceIDs():
3366 test = 1
3367 log.info("Dev %s still active, looping...", i)
3368 time.sleep(0.1)
3370 if test == 0:
3371 break
3372 if diff >= MIGRATE_TIMEOUT:
3373 log.info("Dev still active but hit max loop timeout")
3374 break
3376 def _storeVmDetails(self):
3377 to_store = {}
3379 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3380 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3381 if self._infoIsSet(info_key):
3382 to_store[key] = str(self.info[info_key])
3384 if self._infoIsSet("static_memory_min"):
3385 to_store["memory"] = str(self.info["static_memory_min"])
3386 if self._infoIsSet("static_memory_max"):
3387 to_store["maxmem"] = str(self.info["static_memory_max"])
3389 image_sxpr = self.info.image_sxpr()
3390 if image_sxpr:
3391 to_store['image'] = sxp.to_string(image_sxpr)
3393 if not self._readVm('xend/restart_count'):
3394 to_store['xend/restart_count'] = str(0)
3396 log.debug("Storing VM details: %s", scrub_password(to_store))
3398 self._writeVm(to_store)
3399 self._setVmPermissions()
3401 def _setVmPermissions(self):
3402 """Allow the guest domain to read its UUID. We don't allow it to
3403 access any other entry, for security."""
3404 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3405 { 'dom' : self.domid,
3406 'read' : True,
3407 'write' : False })
3410 # Utility functions
3413 def __getattr__(self, name):
3414 if name == "state":
3415 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3416 log.warn("".join(traceback.format_stack()))
3417 return self._stateGet()
3418 else:
3419 raise AttributeError(name)
3421 def __setattr__(self, name, value):
3422 if name == "state":
3423 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3424 log.warn("".join(traceback.format_stack()))
3425 self._stateSet(value)
3426 else:
3427 self.__dict__[name] = value
3429 def _stateSet(self, state):
3430 self.state_updated.acquire()
3431 try:
3432 # TODO Not sure this is correct...
3433 # _stateGet is live now. Why not fire event
3434 # even when it hasn't changed?
3435 if self._stateGet() != state:
3436 self.state_updated.notifyAll()
3437 import XendAPI
3438 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3439 'power_state')
3440 finally:
3441 self.state_updated.release()
3443 def _stateGet(self):
3444 # Lets try and reconsitute the state from xc
3445 # first lets try and get the domain info
3446 # from xc - this will tell us if the domain
3447 # exists
3448 info = dom_get(self.getDomid())
3449 if info is None or info['shutdown']:
3450 # We are either HALTED or SUSPENDED
3451 # check saved image exists
3452 from xen.xend import XendDomain
3453 managed_config_path = \
3454 XendDomain.instance()._managed_check_point_path( \
3455 self.get_uuid())
3456 if os.path.exists(managed_config_path):
3457 return XEN_API_VM_POWER_STATE_SUSPENDED
3458 else:
3459 return XEN_API_VM_POWER_STATE_HALTED
3460 elif info['crashed']:
3461 # Crashed
3462 return XEN_API_VM_POWER_STATE_CRASHED
3463 else:
3464 # We are either RUNNING or PAUSED
3465 if info['paused']:
3466 return XEN_API_VM_POWER_STATE_PAUSED
3467 else:
3468 return XEN_API_VM_POWER_STATE_RUNNING
3470 def _infoIsSet(self, name):
3471 return name in self.info and self.info[name] is not None
3473 def _checkName(self, name):
3474 """Check if a vm name is valid. Valid names contain alphabetic
3475 characters, digits, or characters in '_-.:+'.
3476 The same name cannot be used for more than one vm at the same time.
3478 @param name: name
3479 @raise: VmError if invalid
3480 """
3481 from xen.xend import XendDomain
3483 if name is None or name == '':
3484 raise VmError('Missing VM Name')
3486 if not re.search(r'^[A-Za-z0-9_\-\.\:\+]+$', name):
3487 raise VmError('Invalid VM Name')
3489 dom = XendDomain.instance().domain_lookup_nr(name)
3490 if dom and dom.info['uuid'] != self.info['uuid']:
3491 raise VmError("VM name '%s' already exists%s" %
3492 (name,
3493 dom.domid is not None and
3494 (" as domain %s" % str(dom.domid)) or ""))
3497 def update(self, info = None, refresh = True, transaction = None):
3498 """Update with info from xc.domain_getinfo().
3499 """
3500 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3501 str(self.domid))
3503 if not info:
3504 info = dom_get(self.domid)
3505 if not info:
3506 return
3508 if info["maxmem_kb"] < 0:
3509 info["maxmem_kb"] = XendNode.instance() \
3510 .physinfo_dict()['total_memory'] * 1024
3512 # make sure state is reset for info
3513 # TODO: we should eventually get rid of old_dom_states
3515 self.info.update_config(info)
3516 self._update_consoles(transaction)
3518 if refresh:
3519 self.refreshShutdown(info)
3521 log.trace("XendDomainInfo.update done on domain %s: %s",
3522 str(self.domid), self.info)
3524 def sxpr(self, ignore_store = False, legacy_only = True):
3525 result = self.info.to_sxp(domain = self,
3526 ignore_devices = ignore_store,
3527 legacy_only = legacy_only)
3529 return result
3531 # Xen API
3532 # ----------------------------------------------------------------
3534 def get_uuid(self):
3535 dom_uuid = self.info.get('uuid')
3536 if not dom_uuid: # if it doesn't exist, make one up
3537 dom_uuid = uuid.createString()
3538 self.info['uuid'] = dom_uuid
3539 return dom_uuid
3541 def get_memory_static_max(self):
3542 return self.info.get('memory_static_max', 0)
3543 def get_memory_static_min(self):
3544 return self.info.get('memory_static_min', 0)
3545 def get_memory_dynamic_max(self):
3546 return self.info.get('memory_dynamic_max', 0)
3547 def get_memory_dynamic_min(self):
3548 return self.info.get('memory_dynamic_min', 0)
3550 # only update memory-related config values if they maintain sanity
3551 def _safe_set_memory(self, key, newval):
3552 oldval = self.info.get(key, 0)
3553 try:
3554 self.info[key] = newval
3555 self.info._memory_sanity_check()
3556 except Exception, ex:
3557 self.info[key] = oldval
3558 raise
3560 def set_memory_static_max(self, val):
3561 self._safe_set_memory('memory_static_max', val)
3562 def set_memory_static_min(self, val):
3563 self._safe_set_memory('memory_static_min', val)
3564 def set_memory_dynamic_max(self, val):
3565 self._safe_set_memory('memory_dynamic_max', val)
3566 def set_memory_dynamic_min(self, val):
3567 self._safe_set_memory('memory_dynamic_min', val)
3569 def get_vcpus_params(self):
3570 if self.getDomid() is None:
3571 return self.info['vcpus_params']
3573 retval = xc.sched_credit_domain_get(self.getDomid())
3574 return retval
3575 def get_power_state(self):
3576 return XEN_API_VM_POWER_STATE[self._stateGet()]
3577 def get_platform(self):
3578 return self.info.get('platform', {})
3579 def get_pci_bus(self):
3580 return self.info.get('pci_bus', '')
3581 def get_tools_version(self):
3582 return self.info.get('tools_version', {})
3583 def get_metrics(self):
3584 return self.metrics.get_uuid();
3587 def get_security_label(self, xspol=None):
3588 import xen.util.xsm.xsm as security
3589 label = security.get_security_label(self, xspol)
3590 return label
3592 def set_security_label(self, seclab, old_seclab, xspol=None,
3593 xspol_old=None):
3594 """
3595 Set the security label of a domain from its old to
3596 a new value.
3597 @param seclab New security label formatted in the form
3598 <policy type>:<policy name>:<vm label>
3599 @param old_seclab The current security label that the
3600 VM must have.
3601 @param xspol An optional policy under which this
3602 update should be done. If not given,
3603 then the current active policy is used.
3604 @param xspol_old The old policy; only to be passed during
3605 the updating of a policy
3606 @return Returns return code, a string with errors from
3607 the hypervisor's operation, old label of the
3608 domain
3609 """
3610 rc = 0
3611 errors = ""
3612 old_label = ""
3613 new_ssidref = 0
3614 domid = self.getDomid()
3615 res_labels = None
3616 is_policy_update = (xspol_old != None)
3618 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3620 state = self._stateGet()
3621 # Relabel only HALTED or RUNNING or PAUSED domains
3622 if domid != 0 and \
3623 state not in \
3624 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3625 DOM_STATE_SUSPENDED ]:
3626 log.warn("Relabeling domain not possible in state '%s'" %
3627 DOM_STATES[state])
3628 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3630 # Remove security label. Works only for halted or suspended domains
3631 if not seclab or seclab == "":
3632 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3633 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3635 if self.info.has_key('security_label'):
3636 old_label = self.info['security_label']
3637 # Check label against expected one.
3638 if old_label != old_seclab:
3639 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3640 del self.info['security_label']
3641 xen.xend.XendDomain.instance().managed_config_save(self)
3642 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3644 tmp = seclab.split(":")
3645 if len(tmp) != 3:
3646 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3647 typ, policy, label = tmp
3649 poladmin = XSPolicyAdminInstance()
3650 if not xspol:
3651 xspol = poladmin.get_policy_by_name(policy)
3653 try:
3654 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3656 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3657 #if domain is running or paused try to relabel in hypervisor
3658 if not xspol:
3659 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3661 if typ != xspol.get_type_name() or \
3662 policy != xspol.get_name():
3663 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3665 if typ == xsconstants.ACM_POLICY_ID:
3666 new_ssidref = xspol.vmlabel_to_ssidref(label)
3667 if new_ssidref == xsconstants.INVALID_SSIDREF:
3668 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3670 # Check that all used resources are accessible under the
3671 # new label
3672 if not is_policy_update and \
3673 not security.resources_compatible_with_vmlabel(xspol,
3674 self, label):
3675 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3677 #Check label against expected one. Can only do this
3678 # if the policy hasn't changed underneath in the meantime
3679 if xspol_old == None:
3680 old_label = self.get_security_label()
3681 if old_label != old_seclab:
3682 log.info("old_label != old_seclab: %s != %s" %
3683 (old_label, old_seclab))
3684 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3686 # relabel domain in the hypervisor
3687 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3688 log.info("rc from relabeling in HV: %d" % rc)
3689 else:
3690 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3692 if rc == 0:
3693 # HALTED, RUNNING or PAUSED
3694 if domid == 0:
3695 if xspol:
3696 self.info['security_label'] = seclab
3697 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3698 else:
3699 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3700 else:
3701 if self.info.has_key('security_label'):
3702 old_label = self.info['security_label']
3703 # Check label against expected one, unless wildcard
3704 if old_label != old_seclab:
3705 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3707 self.info['security_label'] = seclab
3709 try:
3710 xen.xend.XendDomain.instance().managed_config_save(self)
3711 except:
3712 pass
3713 return (rc, errors, old_label, new_ssidref)
3714 finally:
3715 xen.xend.XendDomain.instance().policy_lock.release()
3717 def get_on_shutdown(self):
3718 after_shutdown = self.info.get('actions_after_shutdown')
3719 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3720 return XEN_API_ON_NORMAL_EXIT[-1]
3721 return after_shutdown
3723 def get_on_reboot(self):
3724 after_reboot = self.info.get('actions_after_reboot')
3725 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3726 return XEN_API_ON_NORMAL_EXIT[-1]
3727 return after_reboot
3729 def get_on_suspend(self):
3730 # TODO: not supported
3731 after_suspend = self.info.get('actions_after_suspend')
3732 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3733 return XEN_API_ON_NORMAL_EXIT[-1]
3734 return after_suspend
3736 def get_on_crash(self):
3737 after_crash = self.info.get('actions_after_crash')
3738 if not after_crash or after_crash not in \
3739 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3740 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3741 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3743 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3744 """ Get's a device configuration either from XendConfig or
3745 from the DevController.
3747 @param dev_class: device class, either, 'vbd' or 'vif'
3748 @param dev_uuid: device UUID
3750 @rtype: dictionary
3751 """
3752 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3754 # shortcut if the domain isn't started because
3755 # the devcontrollers will have no better information
3756 # than XendConfig.
3757 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3758 XEN_API_VM_POWER_STATE_SUSPENDED):
3759 if dev_config:
3760 return copy.deepcopy(dev_config)
3761 return None
3763 # instead of using dev_class, we use the dev_type
3764 # that is from XendConfig.
3765 controller = self.getDeviceController(dev_type)
3766 if not controller:
3767 return None
3769 all_configs = controller.getAllDeviceConfigurations()
3770 if not all_configs:
3771 return None
3773 updated_dev_config = copy.deepcopy(dev_config)
3774 for _devid, _devcfg in all_configs.items():
3775 if _devcfg.get('uuid') == dev_uuid:
3776 updated_dev_config.update(_devcfg)
3777 updated_dev_config['id'] = _devid
3778 return updated_dev_config
3780 return updated_dev_config
3782 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3783 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3784 if not config:
3785 return {}
3787 config['VM'] = self.get_uuid()
3789 if dev_class == 'vif':
3790 if not config.has_key('name'):
3791 config['name'] = config.get('vifname', '')
3792 if not config.has_key('MAC'):
3793 config['MAC'] = config.get('mac', '')
3794 if not config.has_key('type'):
3795 config['type'] = 'paravirtualised'
3796 if not config.has_key('device'):
3797 devid = config.get('id')
3798 if devid != None:
3799 config['device'] = 'eth%s' % devid
3800 else:
3801 config['device'] = ''
3803 if not config.has_key('network'):
3804 try:
3805 bridge = config.get('bridge', None)
3806 if bridge is None:
3807 from xen.util import Brctl
3808 if_to_br = dict([(i,b)
3809 for (b,ifs) in Brctl.get_state().items()
3810 for i in ifs])
3811 vifname = "vif%s.%s" % (self.getDomid(),
3812 config.get('id'))
3813 bridge = if_to_br.get(vifname, None)
3814 config['network'] = \
3815 XendNode.instance().bridge_to_network(
3816 config.get('bridge')).get_uuid()
3817 except Exception:
3818 log.exception('bridge_to_network')
3819 # Ignore this for now -- it may happen if the device
3820 # has been specified using the legacy methods, but at
3821 # some point we're going to have to figure out how to
3822 # handle that properly.
3824 config['MTU'] = 1500 # TODO
3826 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3827 xennode = XendNode.instance()
3828 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3829 config['io_read_kbs'] = rx_bps/1024
3830 config['io_write_kbs'] = tx_bps/1024
3831 rx, tx = xennode.get_vif_stat(self.domid, devid)
3832 config['io_total_read_kbs'] = rx/1024
3833 config['io_total_write_kbs'] = tx/1024
3834 else:
3835 config['io_read_kbs'] = 0.0
3836 config['io_write_kbs'] = 0.0
3837 config['io_total_read_kbs'] = 0.0
3838 config['io_total_write_kbs'] = 0.0
3840 config['security_label'] = config.get('security_label', '')
3842 if dev_class == 'vbd':
3844 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3845 controller = self.getDeviceController(dev_class)
3846 devid, _1, _2 = controller.getDeviceDetails(config)
3847 xennode = XendNode.instance()
3848 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3849 config['io_read_kbs'] = rd_blkps
3850 config['io_write_kbs'] = wr_blkps
3851 else:
3852 config['io_read_kbs'] = 0.0
3853 config['io_write_kbs'] = 0.0
3855 config['VDI'] = config.get('VDI', '')
3856 config['device'] = config.get('dev', '')
3857 if config['device'].startswith('ioemu:'):
3858 _, vbd_device = config['device'].split(':', 1)
3859 config['device'] = vbd_device
3860 if ':' in config['device']:
3861 vbd_name, vbd_type = config['device'].split(':', 1)
3862 config['device'] = vbd_name
3863 if vbd_type == 'cdrom':
3864 config['type'] = XEN_API_VBD_TYPE[0]
3865 else:
3866 config['type'] = XEN_API_VBD_TYPE[1]
3868 config['driver'] = 'paravirtualised' # TODO
3869 config['image'] = config.get('uname', '')
3871 if config.get('mode', 'r') == 'r':
3872 config['mode'] = 'RO'
3873 else:
3874 config['mode'] = 'RW'
3876 if dev_class == 'vtpm':
3877 if not config.has_key('type'):
3878 config['type'] = 'paravirtualised' # TODO
3879 if not config.has_key('backend'):
3880 config['backend'] = "00000000-0000-0000-0000-000000000000"
3882 return config
3884 def get_dev_property(self, dev_class, dev_uuid, field):
3885 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3886 try:
3887 return config[field]
3888 except KeyError:
3889 raise XendError('Invalid property for device: %s' % field)
3891 def set_dev_property(self, dev_class, dev_uuid, field, value):
3892 self.info['devices'][dev_uuid][1][field] = value
3894 def get_vcpus_util(self):
3895 vcpu_util = {}
3896 xennode = XendNode.instance()
3897 if 'VCPUs_max' in self.info and self.domid != None:
3898 for i in range(0, self.info['VCPUs_max']):
3899 util = xennode.get_vcpu_util(self.domid, i)
3900 vcpu_util[str(i)] = util
3902 return vcpu_util
3904 def get_consoles(self):
3905 return self.info.get('console_refs', [])
3907 def get_vifs(self):
3908 return self.info.get('vif_refs', [])
3910 def get_vbds(self):
3911 return self.info.get('vbd_refs', [])
3913 def get_vtpms(self):
3914 return self.info.get('vtpm_refs', [])
3916 def get_dpcis(self):
3917 return XendDPCI.get_by_VM(self.info.get('uuid'))
3919 def get_dscsis(self):
3920 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3922 def get_dscsi_HBAs(self):
3923 return XendDSCSI_HBA.get_by_VM(self.info.get('uuid'))
3925 def create_vbd(self, xenapi_vbd, vdi_image_path):
3926 """Create a VBD using a VDI from XendStorageRepository.
3928 @param xenapi_vbd: vbd struct from the Xen API
3929 @param vdi_image_path: VDI UUID
3930 @rtype: string
3931 @return: uuid of the device
3932 """
3933 xenapi_vbd['image'] = vdi_image_path
3934 if vdi_image_path.startswith('tap'):
3935 dev_uuid = self.info.device_add('tap2', cfg_xenapi = xenapi_vbd)
3936 else:
3937 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3939 if not dev_uuid:
3940 raise XendError('Failed to create device')
3942 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3943 XEN_API_VM_POWER_STATE_PAUSED):
3944 _, config = self.info['devices'][dev_uuid]
3946 if vdi_image_path.startswith('tap'):
3947 dev_control = self.getDeviceController('tap2')
3948 else:
3949 dev_control = self.getDeviceController('vbd')
3951 try:
3952 devid = dev_control.createDevice(config)
3953 dev_type = self.getBlockDeviceClass(devid)
3954 self._waitForDevice(dev_type, devid)
3955 self.info.device_update(dev_uuid,
3956 cfg_xenapi = {'devid': devid})
3957 except Exception, exn:
3958 log.exception(exn)
3959 del self.info['devices'][dev_uuid]
3960 self.info['vbd_refs'].remove(dev_uuid)
3961 raise
3963 return dev_uuid
3965 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3966 """Create a VBD using a VDI from XendStorageRepository.
3968 @param xenapi_vbd: vbd struct from the Xen API
3969 @param vdi_image_path: VDI UUID
3970 @rtype: string
3971 @return: uuid of the device
3972 """
3973 xenapi_vbd['image'] = vdi_image_path
3974 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3975 if not dev_uuid:
3976 raise XendError('Failed to create device')
3978 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3979 _, config = self.info['devices'][dev_uuid]
3980 config['devid'] = self.getDeviceController('tap').createDevice(config)
3982 return config['devid']
3984 def create_vif(self, xenapi_vif):
3985 """Create VIF device from the passed struct in Xen API format.
3987 @param xenapi_vif: Xen API VIF Struct.
3988 @rtype: string
3989 @return: UUID
3990 """
3991 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3992 if not dev_uuid:
3993 raise XendError('Failed to create device')
3995 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3996 XEN_API_VM_POWER_STATE_PAUSED):
3998 _, config = self.info['devices'][dev_uuid]
3999 dev_control = self.getDeviceController('vif')
4001 try:
4002 devid = dev_control.createDevice(config)
4003 dev_control.waitForDevice(devid)
4004 self.info.device_update(dev_uuid,
4005 cfg_xenapi = {'devid': devid})
4006 except Exception, exn:
4007 log.exception(exn)
4008 del self.info['devices'][dev_uuid]
4009 self.info['vif_refs'].remove(dev_uuid)
4010 raise
4012 return dev_uuid
4014 def create_vtpm(self, xenapi_vtpm):
4015 """Create a VTPM device from the passed struct in Xen API format.
4017 @return: uuid of the device
4018 @rtype: string
4019 """
4021 if self._stateGet() not in (DOM_STATE_HALTED,):
4022 raise VmError("Can only add vTPM to a halted domain.")
4023 if self.get_vtpms() != []:
4024 raise VmError('Domain already has a vTPM.')
4025 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
4026 if not dev_uuid:
4027 raise XendError('Failed to create device')
4029 return dev_uuid
4031 def create_console(self, xenapi_console):
4032 """ Create a console device from a Xen API struct.
4034 @return: uuid of device
4035 @rtype: string
4036 """
4037 if self._stateGet() not in (DOM_STATE_HALTED,):
4038 raise VmError("Can only add console to a halted domain.")
4040 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
4041 if not dev_uuid:
4042 raise XendError('Failed to create device')
4044 return dev_uuid
4046 def set_console_other_config(self, console_uuid, other_config):
4047 self.info.console_update(console_uuid, 'other_config', other_config)
4049 def create_dpci(self, xenapi_pci):
4050 """Create pci device from the passed struct in Xen API format.
4052 @param xenapi_pci: DPCI struct from Xen API
4053 @rtype: bool
4054 #@rtype: string
4055 @return: True if successfully created device
4056 #@return: UUID
4057 """
4059 dpci_uuid = uuid.createString()
4061 dpci_opts = []
4062 opts_dict = xenapi_pci.get('options')
4063 for k in opts_dict.keys():
4064 dpci_opts.append([k, opts_dict[k]])
4065 opts_sxp = pci_opts_list_to_sxp(dpci_opts)
4067 # Convert xenapi to sxp
4068 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
4070 dev_sxp = ['dev',
4071 ['domain', '0x%02x' % ppci.get_domain()],
4072 ['bus', '0x%02x' % ppci.get_bus()],
4073 ['slot', '0x%02x' % ppci.get_slot()],
4074 ['func', '0x%1x' % ppci.get_func()],
4075 ['vdevfn', '0x%02x' % xenapi_pci.get('hotplug_slot')],
4076 ['key', xenapi_pci['key']],
4077 ['uuid', dpci_uuid]]
4078 dev_sxp = sxp.merge(dev_sxp, opts_sxp)
4080 target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
4082 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4084 old_pci_sxp = self._getDeviceInfo_pci(0)
4086 if old_pci_sxp is None:
4087 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
4088 if not dev_uuid:
4089 raise XendError('Failed to create device')
4091 else:
4092 new_pci_sxp = ['pci']
4093 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
4094 new_pci_sxp.append(existing_dev)
4095 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
4097 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4098 self.info.device_update(dev_uuid, new_pci_sxp)
4100 xen.xend.XendDomain.instance().managed_config_save(self)
4102 else:
4103 try:
4104 self.device_configure(target_pci_sxp)
4106 except Exception, exn:
4107 raise XendError('Failed to create device')
4109 return dpci_uuid
4111 def create_dscsi(self, xenapi_dscsi):
4112 """Create scsi device from the passed struct in Xen API format.
4114 @param xenapi_dscsi: DSCSI struct from Xen API
4115 @rtype: string
4116 @return: UUID
4117 """
4119 dscsi_uuid = uuid.createString()
4121 # Convert xenapi to sxp
4122 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
4123 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
4124 target_vscsi_sxp = \
4125 ['vscsi',
4126 ['dev',
4127 ['devid', devid],
4128 ['p-devname', pscsi.get_dev_name()],
4129 ['p-dev', pscsi.get_physical_HCTL()],
4130 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
4131 ['state', xenbusState['Initialising']],
4132 ['uuid', dscsi_uuid]
4133 ],
4134 ['feature-host', 0]
4137 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4139 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4141 if cur_vscsi_sxp is None:
4142 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
4143 if not dev_uuid:
4144 raise XendError('Failed to create device')
4146 else:
4147 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4148 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
4149 new_vscsi_sxp.append(existing_dev)
4150 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
4152 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4153 self.info.device_update(dev_uuid, new_vscsi_sxp)
4155 xen.xend.XendDomain.instance().managed_config_save(self)
4157 else:
4158 try:
4159 self.device_configure(target_vscsi_sxp)
4160 except Exception, exn:
4161 log.exception('create_dscsi: %s', exn)
4162 raise XendError('Failed to create device')
4164 return dscsi_uuid
4166 def create_dscsi_HBA(self, xenapi_dscsi):
4167 """Create scsi devices from the passed struct in Xen API format.
4169 @param xenapi_dscsi: DSCSI_HBA struct from Xen API
4170 @rtype: string
4171 @return: UUID
4172 """
4174 dscsi_HBA_uuid = uuid.createString()
4176 # Convert xenapi to sxp
4177 feature_host = xenapi_dscsi.get('assignment_mode', 'HOST') == 'HOST' and 1 or 0
4178 target_vscsi_sxp = \
4179 ['vscsi',
4180 ['feature-host', feature_host],
4181 ['uuid', dscsi_HBA_uuid],
4183 pscsi_HBA = XendAPIStore.get(xenapi_dscsi.get('PSCSI_HBA'), 'PSCSI_HBA')
4184 devid = pscsi_HBA.get_physical_host()
4185 for pscsi_uuid in pscsi_HBA.get_PSCSIs():
4186 pscsi = XendAPIStore.get(pscsi_uuid, 'PSCSI')
4187 pscsi_HCTL = pscsi.get_physical_HCTL()
4188 dscsi_uuid = uuid.createString()
4189 dev = \
4190 ['dev',
4191 ['devid', devid],
4192 ['p-devname', pscsi.get_dev_name()],
4193 ['p-dev', pscsi_HCTL],
4194 ['v-dev', pscsi_HCTL],
4195 ['state', xenbusState['Initialising']],
4196 ['uuid', dscsi_uuid]
4198 target_vscsi_sxp.append(dev)
4200 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4201 if not self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp):
4202 raise XendError('Failed to create device')
4203 xen.xend.XendDomain.instance().managed_config_save(self)
4204 else:
4205 try:
4206 self.device_configure(target_vscsi_sxp)
4207 except Exception, exn:
4208 log.exception('create_dscsi_HBA: %s', exn)
4209 raise XendError('Failed to create device')
4211 return dscsi_HBA_uuid
4214 def change_vdi_of_vbd(self, xenapi_vbd, vdi_image_path):
4215 """Change current VDI with the new VDI.
4217 @param xenapi_vbd: vbd struct from the Xen API
4218 @param vdi_image_path: path of VDI
4219 """
4220 dev_uuid = xenapi_vbd['uuid']
4221 if dev_uuid not in self.info['devices']:
4222 raise XendError('Device does not exist')
4224 # Convert xenapi to sxp
4225 if vdi_image_path.startswith('tap'):
4226 dev_class = 'tap'
4227 else:
4228 dev_class = 'vbd'
4229 dev_sxp = [
4230 dev_class,
4231 ['uuid', dev_uuid],
4232 ['uname', vdi_image_path],
4233 ['dev', '%s:cdrom' % xenapi_vbd['device']],
4234 ['mode', 'r'],
4235 ['VDI', xenapi_vbd['VDI']]
4238 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4239 XEN_API_VM_POWER_STATE_PAUSED):
4240 self.device_configure(dev_sxp)
4241 else:
4242 self.info.device_update(dev_uuid, dev_sxp)
4245 def destroy_device_by_uuid(self, dev_type, dev_uuid):
4246 if dev_uuid not in self.info['devices']:
4247 raise XendError('Device does not exist')
4249 try:
4250 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
4251 XEN_API_VM_POWER_STATE_PAUSED):
4252 _, config = self.info['devices'][dev_uuid]
4253 devid = config.get('devid')
4254 if devid != None:
4255 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
4256 else:
4257 raise XendError('Unable to get devid for device: %s:%s' %
4258 (dev_type, dev_uuid))
4259 finally:
4260 del self.info['devices'][dev_uuid]
4261 self.info['%s_refs' % dev_type].remove(dev_uuid)
4263 def destroy_vbd(self, dev_uuid):
4264 self.destroy_device_by_uuid('vbd', dev_uuid)
4266 def destroy_vif(self, dev_uuid):
4267 self.destroy_device_by_uuid('vif', dev_uuid)
4269 def destroy_vtpm(self, dev_uuid):
4270 self.destroy_device_by_uuid('vtpm', dev_uuid)
4272 def destroy_dpci(self, dev_uuid):
4274 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
4275 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
4277 old_pci_sxp = self._getDeviceInfo_pci(0)
4278 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
4279 target_dev = None
4280 new_pci_sxp = ['pci']
4281 for dev in sxp.children(old_pci_sxp, 'dev'):
4282 pci_dev = {}
4283 pci_dev['domain'] = sxp.child_value(dev, 'domain')
4284 pci_dev['bus'] = sxp.child_value(dev, 'bus')
4285 pci_dev['slot'] = sxp.child_value(dev, 'slot')
4286 pci_dev['func'] = sxp.child_value(dev, 'func')
4287 if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
4288 target_dev = dev
4289 else:
4290 new_pci_sxp.append(dev)
4292 if target_dev is None:
4293 raise XendError('Failed to destroy device')
4295 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
4297 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4299 self.info.device_update(dev_uuid, new_pci_sxp)
4300 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
4301 del self.info['devices'][dev_uuid]
4302 xen.xend.XendDomain.instance().managed_config_save(self)
4304 else:
4305 try:
4306 self.device_configure(target_pci_sxp)
4308 except Exception, exn:
4309 raise XendError('Failed to destroy device')
4311 def destroy_dscsi(self, dev_uuid):
4312 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
4313 devid = dscsi.get_virtual_host()
4314 vHCTL = dscsi.get_virtual_HCTL()
4315 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4316 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
4318 target_dev = None
4319 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
4320 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4321 if vHCTL == sxp.child_value(dev, 'v-dev'):
4322 target_dev = dev
4323 else:
4324 new_vscsi_sxp.append(dev)
4326 if target_dev is None:
4327 raise XendError('Failed to destroy device')
4329 target_dev.append(['state', xenbusState['Closing']])
4330 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
4332 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4334 self.info.device_update(dev_uuid, new_vscsi_sxp)
4335 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
4336 del self.info['devices'][dev_uuid]
4337 xen.xend.XendDomain.instance().managed_config_save(self)
4339 else:
4340 try:
4341 self.device_configure(target_vscsi_sxp)
4342 except Exception, exn:
4343 log.exception('destroy_dscsi: %s', exn)
4344 raise XendError('Failed to destroy device')
4346 def destroy_dscsi_HBA(self, dev_uuid):
4347 dscsi_HBA = XendAPIStore.get(dev_uuid, 'DSCSI_HBA')
4348 devid = dscsi_HBA.get_virtual_host()
4349 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
4350 feature_host = sxp.child_value(cur_vscsi_sxp, 'feature-host')
4352 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
4353 new_vscsi_sxp = ['vscsi', ['feature-host', feature_host]]
4354 self.info.device_update(dev_uuid, new_vscsi_sxp)
4355 del self.info['devices'][dev_uuid]
4356 xen.xend.XendDomain.instance().managed_config_save(self)
4357 else:
4358 # If feature_host is 1, all devices are destroyed by just
4359 # one reconfiguration.
4360 # If feature_host is 0, we should reconfigure all devices
4361 # one-by-one to destroy all devices.
4362 # See reconfigureDevice@VSCSIController.
4363 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
4364 target_vscsi_sxp = [
4365 'vscsi',
4366 dev + [['state', xenbusState['Closing']]],
4367 ['feature-host', feature_host]
4369 try:
4370 self.device_configure(target_vscsi_sxp)
4371 except Exception, exn:
4372 log.exception('destroy_dscsi_HBA: %s', exn)
4373 raise XendError('Failed to destroy device')
4374 if feature_host:
4375 break
4377 def destroy_xapi_instances(self):
4378 """Destroy Xen-API instances stored in XendAPIStore.
4379 """
4380 # Xen-API classes based on XendBase have their instances stored
4381 # in XendAPIStore. Cleanup these instances here, if they are supposed
4382 # to be destroyed when the parent domain is dead.
4384 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
4385 # XendBase and there's no need to remove them from XendAPIStore.
4387 from xen.xend import XendDomain
4388 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
4389 # domain still exists.
4390 return
4392 # Destroy the VMMetrics instance.
4393 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
4394 is not None:
4395 self.metrics.destroy()
4397 # Destroy DPCI instances.
4398 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
4399 XendAPIStore.deregister(dpci_uuid, "DPCI")
4401 # Destroy DSCSI instances.
4402 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
4403 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
4405 # Destroy DSCSI_HBA instances.
4406 for dscsi_HBA_uuid in XendDSCSI_HBA.get_by_VM(self.info.get('uuid')):
4407 XendAPIStore.deregister(dscsi_HBA_uuid, "DSCSI_HBA")
4409 def has_device(self, dev_class, dev_uuid):
4410 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
4412 def __str__(self):
4413 return '<domain id=%s name=%s memory=%s state=%s>' % \
4414 (str(self.domid), self.info['name_label'],
4415 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
4417 __repr__ = __str__