debuggers.hg

view tools/python/xen/xend/XendDomainInfo.py @ 19680:dc7de36c94e3

xm, xend: passthrough: Add assigned_or_requested_vslot()

Add an accessor to simplify accessing vslot if available,
otherwise requested_vslot.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 26 10:04:10 2009 +0100 (2009-05-26)
parents caa8c0e2d6f6
children 1c627434605e
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts, auxbin
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
41 from xen.util.pci import assigned_or_requested_vslot
43 from xen.xend import balloon, sxp, uuid, image, arch
44 from xen.xend import XendOptions, XendNode, XendConfig
46 from xen.xend.XendConfig import scrub_password
47 from xen.xend.XendBootloader import bootloader, bootloader_tidy
48 from xen.xend.XendError import XendError, VmError
49 from xen.xend.XendDevices import XendDevices
50 from xen.xend.XendTask import XendTask
51 from xen.xend.xenstore.xstransact import xstransact, complete
52 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
53 from xen.xend.xenstore.xswatch import xswatch
54 from xen.xend.XendConstants import *
55 from xen.xend.XendAPIConstants import *
56 from xen.xend.server.DevConstants import xenbusState
58 from xen.xend.XendVMMetrics import XendVMMetrics
60 from xen.xend import XendAPIStore
61 from xen.xend.XendPPCI import XendPPCI
62 from xen.xend.XendDPCI import XendDPCI
63 from xen.xend.XendPSCSI import XendPSCSI
64 from xen.xend.XendDSCSI import XendDSCSI
66 MIGRATE_TIMEOUT = 30.0
67 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
69 xc = xen.lowlevel.xc.xc()
70 xoptions = XendOptions.instance()
72 log = logging.getLogger("xend.XendDomainInfo")
73 #log.setLevel(logging.TRACE)
76 def create(config):
77 """Creates and start a VM using the supplied configuration.
79 @param config: A configuration object involving lists of tuples.
80 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
82 @rtype: XendDomainInfo
83 @return: An up and running XendDomainInfo instance
84 @raise VmError: Invalid configuration or failure to start.
85 """
86 from xen.xend import XendDomain
87 domconfig = XendConfig.XendConfig(sxp_obj = config)
88 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
89 if othervm is None or othervm.domid is None:
90 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
91 if othervm is not None and othervm.domid is not None:
92 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
93 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
94 vm = XendDomainInfo(domconfig)
95 try:
96 vm.start()
97 except:
98 log.exception('Domain construction failed')
99 vm.destroy()
100 raise
102 return vm
104 def create_from_dict(config_dict):
105 """Creates and start a VM using the supplied configuration.
107 @param config_dict: An configuration dictionary.
109 @rtype: XendDomainInfo
110 @return: An up and running XendDomainInfo instance
111 @raise VmError: Invalid configuration or failure to start.
112 """
114 log.debug("XendDomainInfo.create_from_dict(%s)",
115 scrub_password(config_dict))
116 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
117 try:
118 vm.start()
119 except:
120 log.exception('Domain construction failed')
121 vm.destroy()
122 raise
123 return vm
125 def recreate(info, priv):
126 """Create the VM object for an existing domain. The domain must not
127 be dying, as the paths in the store should already have been removed,
128 and asking us to recreate them causes problems.
130 @param xeninfo: Parsed configuration
131 @type xeninfo: Dictionary
132 @param priv: Is a privileged domain (Dom 0)
133 @type priv: bool
135 @rtype: XendDomainInfo
136 @return: A up and running XendDomainInfo instance
137 @raise VmError: Invalid configuration.
138 @raise XendError: Errors with configuration.
139 """
141 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
143 assert not info['dying']
145 xeninfo = XendConfig.XendConfig(dominfo = info)
146 xeninfo['is_control_domain'] = priv
147 xeninfo['is_a_template'] = False
148 xeninfo['auto_power_on'] = False
149 domid = xeninfo['domid']
150 uuid1 = uuid.fromString(xeninfo['uuid'])
151 needs_reinitialising = False
153 dompath = GetDomainPath(domid)
154 if not dompath:
155 raise XendError('No domain path in store for existing '
156 'domain %d' % domid)
158 log.info("Recreating domain %d, UUID %s. at %s" %
159 (domid, xeninfo['uuid'], dompath))
161 # need to verify the path and uuid if not Domain-0
162 # if the required uuid and vm aren't set, then that means
163 # we need to recreate the dom with our own values
164 #
165 # NOTE: this is probably not desirable, really we should just
166 # abort or ignore, but there may be cases where xenstore's
167 # entry disappears (eg. xenstore-rm /)
168 #
169 try:
170 vmpath = xstransact.Read(dompath, "vm")
171 if not vmpath:
172 if not priv:
173 log.warn('/local/domain/%d/vm is missing. recreate is '
174 'confused, trying our best to recover' % domid)
175 needs_reinitialising = True
176 raise XendError('reinit')
178 uuid2_str = xstransact.Read(vmpath, "uuid")
179 if not uuid2_str:
180 log.warn('%s/uuid/ is missing. recreate is confused, '
181 'trying our best to recover' % vmpath)
182 needs_reinitialising = True
183 raise XendError('reinit')
185 uuid2 = uuid.fromString(uuid2_str)
186 if uuid1 != uuid2:
187 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
188 'Trying out best to recover' % domid)
189 needs_reinitialising = True
190 except XendError:
191 pass # our best shot at 'goto' in python :)
193 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
194 vmpath = vmpath)
196 if needs_reinitialising:
197 vm._recreateDom()
198 vm._removeVm()
199 vm._storeVmDetails()
200 vm._storeDomDetails()
202 vm.image = image.create(vm, vm.info)
203 vm.image.recreate()
205 vm._registerWatches()
206 vm.refreshShutdown(xeninfo)
208 # register the domain in the list
209 from xen.xend import XendDomain
210 XendDomain.instance().add_domain(vm)
212 return vm
215 def restore(config):
216 """Create a domain and a VM object to do a restore.
218 @param config: Domain SXP configuration
219 @type config: list of lists. (see C{create})
221 @rtype: XendDomainInfo
222 @return: A up and running XendDomainInfo instance
223 @raise VmError: Invalid configuration or failure to start.
224 @raise XendError: Errors with configuration.
225 """
227 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
228 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
229 resume = True)
230 try:
231 vm.resume()
232 return vm
233 except:
234 vm.destroy()
235 raise
237 def createDormant(domconfig):
238 """Create a dormant/inactive XenDomainInfo without creating VM.
239 This is for creating instances of persistent domains that are not
240 yet start.
242 @param domconfig: Parsed configuration
243 @type domconfig: XendConfig object
245 @rtype: XendDomainInfo
246 @return: A up and running XendDomainInfo instance
247 @raise XendError: Errors with configuration.
248 """
250 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
252 # domid does not make sense for non-running domains.
253 domconfig.pop('domid', None)
254 vm = XendDomainInfo(domconfig)
255 return vm
257 def domain_by_name(name):
258 """Get domain by name
260 @params name: Name of the domain
261 @type name: string
262 @return: XendDomainInfo or None
263 """
264 from xen.xend import XendDomain
265 return XendDomain.instance().domain_lookup_by_name_nr(name)
268 def shutdown_reason(code):
269 """Get a shutdown reason from a code.
271 @param code: shutdown code
272 @type code: int
273 @return: shutdown reason
274 @rtype: string
275 """
276 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
278 def dom_get(dom):
279 """Get info from xen for an existing domain.
281 @param dom: domain id
282 @type dom: int
283 @return: info or None
284 @rtype: dictionary
285 """
286 try:
287 domlist = xc.domain_getinfo(dom, 1)
288 if domlist and dom == domlist[0]['domid']:
289 return domlist[0]
290 except Exception, err:
291 # ignore missing domain
292 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
293 return None
295 def get_assigned_pci_devices(domid):
296 dev_str_list = []
297 path = '/local/domain/0/backend/pci/%u/0/' % domid
298 num_devs = xstransact.Read(path + 'num_devs');
299 if num_devs is None or num_devs == "":
300 return dev_str_list
301 num_devs = int(num_devs);
302 for i in range(num_devs):
303 dev_str = xstransact.Read(path + 'dev-%i' % i)
304 dev_str_list = dev_str_list + [dev_str]
305 return dev_str_list
307 def do_FLR(domid):
308 from xen.xend.server.pciif import parse_pci_name, PciDevice
309 dev_str_list = get_assigned_pci_devices(domid)
311 for dev_str in dev_str_list:
312 (dom, b, d, f) = parse_pci_name(dev_str)
313 try:
314 dev = PciDevice(dom, b, d, f)
315 except Exception, e:
316 raise VmError("pci: failed to locate device and "+
317 "parse it's resources - "+str(e))
318 dev.do_FLR()
320 class XendDomainInfo:
321 """An object represents a domain.
323 @TODO: try to unify dom and domid, they mean the same thing, but
324 xc refers to it as dom, and everywhere else, including
325 xenstore it is domid. The best way is to change xc's
326 python interface.
328 @ivar info: Parsed configuration
329 @type info: dictionary
330 @ivar domid: Domain ID (if VM has started)
331 @type domid: int or None
332 @ivar vmpath: XenStore path to this VM.
333 @type vmpath: string
334 @ivar dompath: XenStore path to this Domain.
335 @type dompath: string
336 @ivar image: Reference to the VM Image.
337 @type image: xen.xend.image.ImageHandler
338 @ivar store_port: event channel to xenstored
339 @type store_port: int
340 @ivar console_port: event channel to xenconsoled
341 @type console_port: int
342 @ivar store_mfn: xenstored mfn
343 @type store_mfn: int
344 @ivar console_mfn: xenconsoled mfn
345 @type console_mfn: int
346 @ivar notes: OS image notes
347 @type notes: dictionary
348 @ivar vmWatch: reference to a watch on the xenstored vmpath
349 @type vmWatch: xen.xend.xenstore.xswatch
350 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
351 @type shutdownWatch: xen.xend.xenstore.xswatch
352 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
353 @type shutdownStartTime: float or None
354 @ivar restart_in_progress: Is a domain restart thread running?
355 @type restart_in_progress: bool
356 # @ivar state: Domain state
357 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
358 @ivar state_updated: lock for self.state
359 @type state_updated: threading.Condition
360 @ivar refresh_shutdown_lock: lock for polling shutdown state
361 @type refresh_shutdown_lock: threading.Condition
362 @ivar _deviceControllers: device controller cache for this domain
363 @type _deviceControllers: dict 'string' to DevControllers
364 """
366 def __init__(self, info, domid = None, dompath = None, augment = False,
367 priv = False, resume = False, vmpath = None):
368 """Constructor for a domain
370 @param info: parsed configuration
371 @type info: dictionary
372 @keyword domid: Set initial domain id (if any)
373 @type domid: int
374 @keyword dompath: Set initial dompath (if any)
375 @type dompath: string
376 @keyword augment: Augment given info with xenstored VM info
377 @type augment: bool
378 @keyword priv: Is a privileged domain (Dom 0)
379 @type priv: bool
380 @keyword resume: Is this domain being resumed?
381 @type resume: bool
382 """
384 self.info = info
385 if domid == None:
386 self.domid = self.info.get('domid')
387 else:
388 self.domid = domid
390 #REMOVE: uuid is now generated in XendConfig
391 #if not self._infoIsSet('uuid'):
392 # self.info['uuid'] = uuid.toString(uuid.create())
394 # Find a unique /vm/<uuid>/<integer> path if not specified.
395 # This avoids conflict between pre-/post-migrate domains when doing
396 # localhost relocation.
397 self.vmpath = vmpath
398 i = 0
399 while self.vmpath == None:
400 self.vmpath = XS_VMROOT + self.info['uuid']
401 if i != 0:
402 self.vmpath = self.vmpath + '-' + str(i)
403 try:
404 if self._readVm("uuid"):
405 self.vmpath = None
406 i = i + 1
407 except:
408 pass
410 self.dompath = dompath
412 self.image = None
413 self.store_port = None
414 self.store_mfn = None
415 self.console_port = None
416 self.console_mfn = None
418 self.native_protocol = None
420 self.vmWatch = None
421 self.shutdownWatch = None
422 self.shutdownStartTime = None
423 self._resume = resume
424 self.restart_in_progress = False
426 self.state_updated = threading.Condition()
427 self.refresh_shutdown_lock = threading.Condition()
428 self._stateSet(DOM_STATE_HALTED)
430 self._deviceControllers = {}
432 for state in DOM_STATES_OLD:
433 self.info[state] = 0
435 if augment:
436 self._augmentInfo(priv)
438 self._checkName(self.info['name_label'])
440 self.metrics = XendVMMetrics(uuid.createString(), self)
443 #
444 # Public functions available through XMLRPC
445 #
448 def start(self, is_managed = False):
449 """Attempts to start the VM by do the appropriate
450 initialisation if it not started.
451 """
452 from xen.xend import XendDomain
454 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
455 try:
456 XendTask.log_progress(0, 30, self._constructDomain)
457 XendTask.log_progress(31, 60, self._initDomain)
459 XendTask.log_progress(61, 70, self._storeVmDetails)
460 XendTask.log_progress(71, 80, self._storeDomDetails)
461 XendTask.log_progress(81, 90, self._registerWatches)
462 XendTask.log_progress(91, 100, self.refreshShutdown)
464 xendomains = XendDomain.instance()
465 xennode = XendNode.instance()
467 # save running configuration if XendDomains believe domain is
468 # persistent
469 if is_managed:
470 xendomains.managed_config_save(self)
472 if xennode.xenschedinfo() == 'credit':
473 xendomains.domain_sched_credit_set(self.getDomid(),
474 self.getWeight(),
475 self.getCap())
476 except:
477 log.exception('VM start failed')
478 self.destroy()
479 raise
480 else:
481 raise XendError('VM already running')
483 def resume(self):
484 """Resumes a domain that has come back from suspension."""
485 state = self._stateGet()
486 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
487 try:
488 self._constructDomain()
490 try:
491 self._setCPUAffinity()
492 except:
493 # usually a CPU we want to set affinity to does not exist
494 # we just ignore it so that the domain can still be restored
495 log.warn("Cannot restore CPU affinity")
497 self._storeVmDetails()
498 self._createChannels()
499 self._createDevices()
500 self._storeDomDetails()
501 self._endRestore()
502 except:
503 log.exception('VM resume failed')
504 self.destroy()
505 raise
506 else:
507 raise XendError('VM is not suspended; it is %s'
508 % XEN_API_VM_POWER_STATE[state])
510 def shutdown(self, reason):
511 """Shutdown a domain by signalling this via xenstored."""
512 log.debug('XendDomainInfo.shutdown(%s)', reason)
513 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
514 raise XendError('Domain cannot be shutdown')
516 if self.domid == 0:
517 raise XendError('Domain 0 cannot be shutdown')
519 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
520 raise XendError('Invalid reason: %s' % reason)
521 self.storeDom("control/shutdown", reason)
523 # HVM domain shuts itself down only if it has PV drivers
524 if self.info.is_hvm():
525 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
526 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
527 if not hvm_pvdrv or hvm_s_state != 0:
528 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
529 log.info("HVM save:remote shutdown dom %d!", self.domid)
530 xc.domain_shutdown(self.domid, code)
532 def pause(self):
533 """Pause domain
535 @raise XendError: Failed pausing a domain
536 """
537 try:
538 xc.domain_pause(self.domid)
539 self._stateSet(DOM_STATE_PAUSED)
540 except Exception, ex:
541 log.exception(ex)
542 raise XendError("Domain unable to be paused: %s" % str(ex))
544 def unpause(self):
545 """Unpause domain
547 @raise XendError: Failed unpausing a domain
548 """
549 try:
550 xc.domain_unpause(self.domid)
551 self._stateSet(DOM_STATE_RUNNING)
552 except Exception, ex:
553 log.exception(ex)
554 raise XendError("Domain unable to be unpaused: %s" % str(ex))
556 def send_sysrq(self, key):
557 """ Send a Sysrq equivalent key via xenstored."""
558 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
559 raise XendError("Domain '%s' is not started" % self.info['name_label'])
561 asserts.isCharConvertible(key)
562 self.storeDom("control/sysrq", '%c' % key)
564 def sync_pcidev_info(self):
566 if not self.info.is_hvm():
567 return
569 devid = '0'
570 dev_info = self._getDeviceInfo_pci(devid)
571 if dev_info is None:
572 return
574 # get the virtual slot info from xenstore
575 dev_uuid = sxp.child_value(dev_info, 'uuid')
576 pci_conf = self.info['devices'][dev_uuid][1]
577 pci_devs = pci_conf['devs']
579 count = 0
580 vslots = None
581 while vslots is None and count < 20:
582 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
583 % (self.getDomid(), devid))
584 time.sleep(0.1)
585 count += 1
586 if vslots is None:
587 log.error("Device model didn't tell the vslots for PCI device")
588 return
590 #delete last delim
591 if vslots[-1] == ";":
592 vslots = vslots[:-1]
594 slot_list = vslots.split(';')
595 if len(slot_list) != len(pci_devs):
596 log.error("Device model's pci dev num dismatch")
597 return
599 #update the vslot info
600 count = 0;
601 for x in pci_devs:
602 x['vslot'] = slot_list[count]
603 count += 1
606 def hvm_pci_device_create(self, dev_config):
607 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
608 % scrub_password(dev_config))
610 if not self.info.is_hvm():
611 raise VmError("hvm_pci_device_create called on non-HVM guest")
613 #all the PCI devs share one conf node
614 devid = '0'
616 new_dev = dev_config['devs'][0]
617 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
619 #check conflict before trigger hotplug event
620 if dev_info is not None:
621 dev_uuid = sxp.child_value(dev_info, 'uuid')
622 pci_conf = self.info['devices'][dev_uuid][1]
623 pci_devs = pci_conf['devs']
624 for x in pci_devs:
625 x_vslot = assigned_or_requested_vslot(x)
626 if (int(x_vslot, 16) == int(new_dev['requested_vslot'], 16) and
627 int(x_vslot, 16) != AUTO_PHP_SLOT):
628 raise VmError("vslot %s already have a device." % (new_dev['requested_vslot']))
630 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
631 int(x['bus'], 16) == int(new_dev['bus'], 16) and
632 int(x['slot'], 16) == int(new_dev['slot'], 16) and
633 int(x['func'], 16) == int(new_dev['func'], 16) ):
634 raise VmError("device is already inserted")
636 # Test whether the devices can be assigned with VT-d
637 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
638 new_dev['bus'],
639 new_dev['slot'],
640 new_dev['func'])
641 bdf = xc.test_assign_device(0, pci_str)
642 if bdf != 0:
643 if bdf == -1:
644 raise VmError("failed to assign device: maybe the platform"
645 " doesn't support VT-d, or VT-d isn't enabled"
646 " properly?")
647 bus = (bdf >> 16) & 0xff
648 devfn = (bdf >> 8) & 0xff
649 dev = (devfn >> 3) & 0x1f
650 func = devfn & 0x7
651 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
652 " already been assigned to other domain, or maybe"
653 " it doesn't exist." % (bus, dev, func))
655 # Here, we duplicate some checkings (in some cases, we mustn't allow
656 # a device to be hot-plugged into an HVM guest) that are also done in
657 # pci_device_configure()'s self.device_create(dev_sxp) or
658 # dev_control.reconfigureDevice(devid, dev_config).
659 # We must make the checkings before sending the command 'pci-ins' to
660 # ioemu.
662 # Test whether the device is owned by pciback. For instance, we can't
663 # hotplug a device being used by Dom0 itself to an HVM guest.
664 from xen.xend.server.pciif import PciDevice, parse_pci_name
665 domain = int(new_dev['domain'],16)
666 bus = int(new_dev['bus'],16)
667 dev = int(new_dev['slot'],16)
668 func = int(new_dev['func'],16)
669 try:
670 pci_device = PciDevice(domain, bus, dev, func)
671 except Exception, e:
672 raise VmError("pci: failed to locate device and "+
673 "parse it's resources - "+str(e))
674 if pci_device.driver!='pciback':
675 raise VmError(("pci: PCI Backend does not own device "+ \
676 "%s\n"+ \
677 "See the pciback.hide kernel "+ \
678 "command-line parameter or\n"+ \
679 "bind your slot/device to the PCI backend using sysfs" \
680 )%(pci_device.name))
682 # Check non-page-aligned MMIO BAR.
683 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
684 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
685 pci_device.name)
687 # Check the co-assignment.
688 # To pci-attach a device D to domN, we should ensure each of D's
689 # co-assignment devices hasn't been assigned, or has been assigned to
690 # domN.
691 coassignment_list = pci_device.find_coassigned_devices()
692 assigned_pci_device_str_list = self._get_assigned_pci_devices()
693 for pci_str in coassignment_list:
694 (domain, bus, dev, func) = parse_pci_name(pci_str)
695 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
696 if xc.test_assign_device(0, dev_str) == 0:
697 continue
698 if not pci_str in assigned_pci_device_str_list:
699 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
700 " because one of its co-assignment device %s has been" + \
701 " assigned to other domain." \
702 )% (pci_device.name, self.info['name_label'], pci_str))
704 if self.domid is not None:
705 opts = ''
706 if 'opts' in new_dev and len(new_dev['opts']) > 0:
707 config_opts = new_dev['opts']
708 config_opts = map(lambda (x, y): x+'='+y, config_opts)
709 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
711 bdf_str = "%s:%s:%s.%s@%s%s" % (new_dev['domain'],
712 new_dev['bus'],
713 new_dev['slot'],
714 new_dev['func'],
715 new_dev['requested_vslot'],
716 opts)
717 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
719 vslot = xstransact.Read("/local/domain/0/device-model/%i/parameter"
720 % self.getDomid())
721 else:
722 vslot = new_dev['requested_vslot']
724 return vslot
727 def device_create(self, dev_config):
728 """Create a new device.
730 @param dev_config: device configuration
731 @type dev_config: SXP object (parsed config)
732 """
733 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
734 dev_type = sxp.name(dev_config)
735 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
736 dev_config_dict = self.info['devices'][dev_uuid][1]
737 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
739 if dev_type == 'vif':
740 for x in dev_config:
741 if x != 'vif' and x[0] == 'mac':
742 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
743 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
744 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
746 if self.domid is not None:
747 try:
748 dev_config_dict['devid'] = devid = \
749 self._createDevice(dev_type, dev_config_dict)
750 self._waitForDevice(dev_type, devid)
751 except VmError, ex:
752 del self.info['devices'][dev_uuid]
753 if dev_type == 'pci':
754 for dev in dev_config_dict['devs']:
755 XendAPIStore.deregister(dev['uuid'], 'DPCI')
756 elif dev_type == 'vscsi':
757 for dev in dev_config_dict['devs']:
758 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
759 elif dev_type == 'tap':
760 self.info['vbd_refs'].remove(dev_uuid)
761 else:
762 self.info['%s_refs' % dev_type].remove(dev_uuid)
763 raise ex
764 else:
765 devid = None
767 xen.xend.XendDomain.instance().managed_config_save(self)
768 return self.getDeviceController(dev_type).sxpr(devid)
771 def pci_device_configure(self, dev_sxp, devid = 0):
772 """Configure an existing pci device.
774 @param dev_sxp: device configuration
775 @type dev_sxp: SXP object (parsed config)
776 @param devid: device id
777 @type devid: int
778 @return: Returns True if successfully updated device
779 @rtype: boolean
780 """
781 log.debug("XendDomainInfo.pci_device_configure: %s"
782 % scrub_password(dev_sxp))
784 dev_class = sxp.name(dev_sxp)
786 if dev_class != 'pci':
787 return False
789 pci_state = sxp.child_value(dev_sxp, 'state')
790 existing_dev_info = self._getDeviceInfo_pci(devid)
792 if existing_dev_info is None and pci_state != 'Initialising':
793 raise XendError("Cannot detach when pci platform does not exist")
795 pci_dev = sxp.children(dev_sxp, 'dev')[0]
796 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
797 dev = dev_config['devs'][0]
799 # Do HVM specific processing
800 if self.info.is_hvm():
801 if pci_state == 'Initialising':
802 # HVM PCI device attachment
803 vslot = self.hvm_pci_device_create(dev_config)
804 # Update vslot
805 dev['vslot'] = vslot
806 for n in sxp.children(pci_dev):
807 if(n[0] == 'vslot'):
808 n[1] = vslot
809 else:
810 # HVM PCI device detachment
811 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
812 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
813 existing_pci_devs = existing_pci_conf['devs']
814 vslot = AUTO_PHP_SLOT_STR
815 for x in existing_pci_devs:
816 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
817 int(x['bus'], 16) == int(dev['bus'], 16) and
818 int(x['slot'], 16) == int(dev['slot'], 16) and
819 int(x['func'], 16) == int(dev['func'], 16) ):
820 vslot = assigned_or_requested_vslot(x)
821 break
822 if vslot == AUTO_PHP_SLOT_STR:
823 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
824 % (int(dev['domain'],16), int(dev['bus'],16),
825 int(dev['slot'],16), int(dev['func'],16)))
826 self.hvm_destroyPCIDevice(int(vslot, 16))
827 # Update vslot
828 dev['vslot'] = vslot
829 for n in sxp.children(pci_dev):
830 if(n[0] == 'vslot'):
831 n[1] = vslot
833 # If pci platform does not exist, create and exit.
834 if existing_dev_info is None:
835 self.device_create(dev_sxp)
836 return True
838 if self.domid is not None:
839 # use DevController.reconfigureDevice to change device config
840 dev_control = self.getDeviceController(dev_class)
841 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
842 if not self.info.is_hvm():
843 # in PV case, wait until backend state becomes connected.
844 dev_control.waitForDevice_reconfigure(devid)
845 num_devs = dev_control.cleanupDevice(devid)
847 # update XendConfig with new device info
848 if dev_uuid:
849 new_dev_sxp = dev_control.configuration(devid)
850 self.info.device_update(dev_uuid, new_dev_sxp)
852 # If there is no device left, destroy pci and remove config.
853 if num_devs == 0:
854 if self.info.is_hvm():
855 self.destroyDevice('pci', devid, True)
856 del self.info['devices'][dev_uuid]
857 platform = self.info['platform']
858 orig_dev_num = len(platform['pci'])
859 # TODO: can use this to keep some info to ask high level
860 # management tools to hot insert a new passthrough dev
861 # after migration
862 if orig_dev_num != 0:
863 #platform['pci'] = ["%dDEVs" % orig_dev_num]
864 platform['pci'] = []
865 else:
866 self.destroyDevice('pci', devid)
867 del self.info['devices'][dev_uuid]
868 else:
869 new_dev_sxp = ['pci']
870 for cur_dev in sxp.children(existing_dev_info, 'dev'):
871 if pci_state == 'Closing':
872 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
873 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
874 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
875 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
876 continue
877 new_dev_sxp.append(cur_dev)
879 if pci_state == 'Initialising':
880 for new_dev in sxp.children(dev_sxp, 'dev'):
881 new_dev_sxp.append(new_dev)
883 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
884 self.info.device_update(dev_uuid, new_dev_sxp)
886 # If there is only 'vscsi' in new_dev_sxp, remove the config.
887 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
888 del self.info['devices'][dev_uuid]
889 if self.info.is_hvm():
890 platform = self.info['platform']
891 orig_dev_num = len(platform['pci'])
892 # TODO: can use this to keep some info to ask high level
893 # management tools to hot insert a new passthrough dev
894 # after migration
895 if orig_dev_num != 0:
896 #platform['pci'] = ["%dDEVs" % orig_dev_num]
897 platform['pci'] = []
899 xen.xend.XendDomain.instance().managed_config_save(self)
901 return True
903 def vscsi_device_configure(self, dev_sxp):
904 """Configure an existing vscsi device.
905 quoted pci funciton
906 """
907 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
908 if not dev_info:
909 return False
910 for dev in sxp.children(dev_info, 'dev'):
911 if p_devs is not None:
912 if sxp.child_value(dev, 'p-dev') in p_devs:
913 return True
914 if v_devs is not None:
915 if sxp.child_value(dev, 'v-dev') in v_devs:
916 return True
917 return False
919 def _vscsi_be(be):
920 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
921 if be_xdi is not None:
922 be_domid = be_xdi.getDomid()
923 if be_domid is not None:
924 return str(be_domid)
925 return str(be)
927 dev_class = sxp.name(dev_sxp)
928 if dev_class != 'vscsi':
929 return False
931 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
932 devs = dev_config['devs']
933 v_devs = [d['v-dev'] for d in devs]
934 state = devs[0]['state']
935 req_devid = int(devs[0]['devid'])
936 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
938 if state == xenbusState['Initialising']:
939 # new create
940 # If request devid does not exist, create and exit.
941 p_devs = [d['p-dev'] for d in devs]
942 for dev_type, dev_info in self.info.all_devices_sxpr():
943 if dev_type != 'vscsi':
944 continue
945 if _is_vscsi_defined(dev_info, p_devs = p_devs):
946 raise XendError('The physical device "%s" is already defined' % \
947 p_devs[0])
948 if cur_dev_sxp is None:
949 self.device_create(dev_sxp)
950 return True
952 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
953 raise XendError('The virtual device "%s" is already defined' % \
954 v_devs[0])
956 if int(dev_config['feature-host']) != \
957 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
958 raise XendError('The physical device "%s" cannot define '
959 'because mode is different' % devs[0]['p-dev'])
961 new_be = dev_config.get('backend', None)
962 if new_be is not None:
963 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
964 if cur_be is None:
965 cur_be = xen.xend.XendDomain.DOM0_ID
966 new_be_dom = _vscsi_be(new_be)
967 cur_be_dom = _vscsi_be(cur_be)
968 if new_be_dom != cur_be_dom:
969 raise XendError('The physical device "%s" cannot define '
970 'because backend is different' % devs[0]['p-dev'])
972 elif state == xenbusState['Closing']:
973 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
974 raise XendError("Cannot detach vscsi device does not exist")
976 if self.domid is not None:
977 # use DevController.reconfigureDevice to change device config
978 dev_control = self.getDeviceController(dev_class)
979 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
980 dev_control.waitForDevice_reconfigure(req_devid)
981 num_devs = dev_control.cleanupDevice(req_devid)
983 # update XendConfig with new device info
984 if dev_uuid:
985 new_dev_sxp = dev_control.configuration(req_devid)
986 self.info.device_update(dev_uuid, new_dev_sxp)
988 # If there is no device left, destroy vscsi and remove config.
989 if num_devs == 0:
990 self.destroyDevice('vscsi', req_devid)
991 del self.info['devices'][dev_uuid]
993 else:
994 new_dev_sxp = ['vscsi']
995 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
996 new_dev_sxp.append(cur_mode)
997 try:
998 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
999 new_dev_sxp.append(cur_be)
1000 except IndexError:
1001 pass
1003 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1004 if state == xenbusState['Closing']:
1005 if int(cur_mode[1]) == 1:
1006 continue
1007 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1008 continue
1009 new_dev_sxp.append(cur_dev)
1011 if state == xenbusState['Initialising']:
1012 for new_dev in sxp.children(dev_sxp, 'dev'):
1013 new_dev_sxp.append(new_dev)
1015 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1016 self.info.device_update(dev_uuid, new_dev_sxp)
1018 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1019 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1020 del self.info['devices'][dev_uuid]
1022 xen.xend.XendDomain.instance().managed_config_save(self)
1024 return True
1026 def device_configure(self, dev_sxp, devid = None):
1027 """Configure an existing device.
1029 @param dev_config: device configuration
1030 @type dev_config: SXP object (parsed config)
1031 @param devid: device id
1032 @type devid: int
1033 @return: Returns True if successfully updated device
1034 @rtype: boolean
1035 """
1037 # convert device sxp to a dict
1038 dev_class = sxp.name(dev_sxp)
1039 dev_config = {}
1041 if dev_class == 'pci':
1042 return self.pci_device_configure(dev_sxp)
1044 if dev_class == 'vscsi':
1045 return self.vscsi_device_configure(dev_sxp)
1047 for opt_val in dev_sxp[1:]:
1048 try:
1049 dev_config[opt_val[0]] = opt_val[1]
1050 except IndexError:
1051 pass
1053 dev_control = self.getDeviceController(dev_class)
1054 if devid is None:
1055 dev = dev_config.get('dev', '')
1056 if not dev:
1057 raise VmError('Block device must have virtual details specified')
1058 if 'ioemu:' in dev:
1059 (_, dev) = dev.split(':', 1)
1060 try:
1061 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1062 except ValueError:
1063 pass
1064 devid = dev_control.convertToDeviceNumber(dev)
1065 dev_info = self._getDeviceInfo_vbd(devid)
1066 if dev_info is None:
1067 raise VmError("Device %s not connected" % devid)
1068 dev_uuid = sxp.child_value(dev_info, 'uuid')
1070 if self.domid is not None:
1071 # use DevController.reconfigureDevice to change device config
1072 dev_control.reconfigureDevice(devid, dev_config)
1073 else:
1074 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1075 if (new_f['device-type'] == 'cdrom' and
1076 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1077 new_b['mode'] == 'r' and
1078 sxp.child_value(dev_info, 'mode') == 'r'):
1079 pass
1080 else:
1081 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1082 (dev_class, devid, dev_config))
1084 # update XendConfig with new device info
1085 self.info.device_update(dev_uuid, dev_sxp)
1086 xen.xend.XendDomain.instance().managed_config_save(self)
1088 return True
1090 def waitForDevices(self):
1091 """Wait for this domain's configured devices to connect.
1093 @raise VmError: if any device fails to initialise.
1094 """
1095 for devclass in XendDevices.valid_devices():
1096 self.getDeviceController(devclass).waitForDevices()
1098 def hvm_destroyPCIDevice(self, vslot):
1099 log.debug("hvm_destroyPCIDevice called %s", vslot)
1101 if not self.info.is_hvm():
1102 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1104 #all the PCI devs share one conf node
1105 devid = '0'
1106 vslot = int(vslot)
1107 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1108 dev_uuid = sxp.child_value(dev_info, 'uuid')
1110 #delete the pci bdf config under the pci device
1111 pci_conf = self.info['devices'][dev_uuid][1]
1112 pci_len = len(pci_conf['devs'])
1114 #find the pass-through device with the virtual slot
1115 devnum = 0
1116 for x in pci_conf['devs']:
1117 x_vslot = assigned_or_requested_vslot(x)
1118 if int(x_vslot, 16) == vslot:
1119 break
1120 devnum += 1
1122 if devnum >= pci_len:
1123 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1125 # Check the co-assignment.
1126 # To pci-detach a device D from domN, we should ensure: for each DD in the
1127 # list of D's co-assignment devices, DD is not assigned (to domN).
1129 from xen.xend.server.pciif import PciDevice
1130 domain = int(x['domain'],16)
1131 bus = int(x['bus'],16)
1132 dev = int(x['slot'],16)
1133 func = int(x['func'],16)
1134 try:
1135 pci_device = PciDevice(domain, bus, dev, func)
1136 except Exception, e:
1137 raise VmError("pci: failed to locate device and "+
1138 "parse it's resources - "+str(e))
1139 coassignment_list = pci_device.find_coassigned_devices()
1140 coassignment_list.remove(pci_device.name)
1141 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1142 for pci_str in coassignment_list:
1143 if pci_str in assigned_pci_device_str_list:
1144 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1145 " because one of its co-assignment device %s is still " + \
1146 " assigned to the domain." \
1147 )% (pci_device.name, self.info['name_label'], pci_str))
1150 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1151 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1153 if self.domid is not None:
1154 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1156 return 0
1158 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1159 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1160 deviceClass, devid)
1162 if rm_cfg:
1163 # Convert devid to device number. A device number is
1164 # needed to remove its configuration.
1165 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1167 # Save current sxprs. A device number and a backend
1168 # path are needed to remove its configuration but sxprs
1169 # do not have those after calling destroyDevice.
1170 sxprs = self.getDeviceSxprs(deviceClass)
1172 rc = None
1173 if self.domid is not None:
1174 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1175 if not force and rm_cfg:
1176 # The backend path, other than the device itself,
1177 # has to be passed because its accompanied frontend
1178 # path may be void until its removal is actually
1179 # issued. It is probable because destroyDevice is
1180 # issued first.
1181 for dev_num, dev_info in sxprs:
1182 dev_num = int(dev_num)
1183 if dev_num == dev:
1184 for x in dev_info:
1185 if x[0] == 'backend':
1186 backend = x[1]
1187 break
1188 break
1189 self._waitForDevice_destroy(deviceClass, devid, backend)
1191 if rm_cfg:
1192 if deviceClass == 'vif':
1193 if self.domid is not None:
1194 for dev_num, dev_info in sxprs:
1195 dev_num = int(dev_num)
1196 if dev_num == dev:
1197 for x in dev_info:
1198 if x[0] == 'mac':
1199 mac = x[1]
1200 break
1201 break
1202 dev_info = self._getDeviceInfo_vif(mac)
1203 else:
1204 _, dev_info = sxprs[dev]
1205 else: # 'vbd' or 'tap'
1206 dev_info = self._getDeviceInfo_vbd(dev)
1207 # To remove the UUID of the device from refs,
1208 # deviceClass must be always 'vbd'.
1209 deviceClass = 'vbd'
1210 if dev_info is None:
1211 raise XendError("Device %s is not defined" % devid)
1213 dev_uuid = sxp.child_value(dev_info, 'uuid')
1214 del self.info['devices'][dev_uuid]
1215 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1216 xen.xend.XendDomain.instance().managed_config_save(self)
1218 return rc
1220 def getDeviceSxprs(self, deviceClass):
1221 if deviceClass == 'pci':
1222 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1223 if dev_info is None:
1224 return []
1225 dev_uuid = sxp.child_value(dev_info, 'uuid')
1226 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1227 return pci_devs
1228 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1229 return self.getDeviceController(deviceClass).sxprs()
1230 else:
1231 sxprs = []
1232 dev_num = 0
1233 for dev_type, dev_info in self.info.all_devices_sxpr():
1234 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1235 (deviceClass != 'vbd' and dev_type != deviceClass):
1236 continue
1238 if deviceClass == 'vscsi':
1239 vscsi_devs = ['devs', []]
1240 for vscsi_dev in sxp.children(dev_info, 'dev'):
1241 vscsi_dev.append(['frontstate', None])
1242 vscsi_devs[1].append(vscsi_dev)
1243 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1244 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1245 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1246 elif deviceClass == 'vbd':
1247 dev = sxp.child_value(dev_info, 'dev')
1248 if 'ioemu:' in dev:
1249 (_, dev) = dev.split(':', 1)
1250 try:
1251 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1252 except ValueError:
1253 dev_name = dev
1254 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1255 sxprs.append([dev_num, dev_info])
1256 else:
1257 sxprs.append([dev_num, dev_info])
1258 dev_num += 1
1259 return sxprs
1261 def getBlockDeviceClass(self, devid):
1262 # To get a device number from the devid,
1263 # we temporarily use the device controller of VBD.
1264 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1265 dev_info = self._getDeviceInfo_vbd(dev)
1266 if dev_info:
1267 return dev_info[0]
1269 def _getDeviceInfo_vif(self, mac):
1270 for dev_type, dev_info in self.info.all_devices_sxpr():
1271 if dev_type != 'vif':
1272 continue
1273 if mac == sxp.child_value(dev_info, 'mac'):
1274 return dev_info
1276 def _getDeviceInfo_vbd(self, devid):
1277 for dev_type, dev_info in self.info.all_devices_sxpr():
1278 if dev_type != 'vbd' and dev_type != 'tap':
1279 continue
1280 dev = sxp.child_value(dev_info, 'dev')
1281 dev = dev.split(':')[0]
1282 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1283 if devid == dev:
1284 return dev_info
1286 def _getDeviceInfo_pci(self, devid):
1287 for dev_type, dev_info in self.info.all_devices_sxpr():
1288 if dev_type != 'pci':
1289 continue
1290 return dev_info
1291 return None
1293 def _getDeviceInfo_vscsi(self, devid):
1294 devid = int(devid)
1295 for dev_type, dev_info in self.info.all_devices_sxpr():
1296 if dev_type != 'vscsi':
1297 continue
1298 devs = sxp.children(dev_info, 'dev')
1299 if devid == int(sxp.child_value(devs[0], 'devid')):
1300 return dev_info
1301 return None
1303 def _get_assigned_pci_devices(self, devid = 0):
1304 if self.domid is not None:
1305 return get_assigned_pci_devices(self.domid)
1307 dev_str_list = []
1308 dev_info = self._getDeviceInfo_pci(devid)
1309 if dev_info is None:
1310 return dev_str_list
1311 dev_uuid = sxp.child_value(dev_info, 'uuid')
1312 pci_conf = self.info['devices'][dev_uuid][1]
1313 pci_devs = pci_conf['devs']
1314 for pci_dev in pci_devs:
1315 domain = int(pci_dev['domain'], 16)
1316 bus = int(pci_dev['bus'], 16)
1317 slot = int(pci_dev['slot'], 16)
1318 func = int(pci_dev['func'], 16)
1319 dev_str = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
1320 dev_str_list = dev_str_list + [dev_str]
1321 return dev_str_list
1323 def setMemoryTarget(self, target):
1324 """Set the memory target of this domain.
1325 @param target: In MiB.
1326 """
1327 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1328 self.info['name_label'], str(self.domid), target)
1330 MiB = 1024 * 1024
1331 memory_cur = self.get_memory_dynamic_max() / MiB
1333 if self.domid == 0:
1334 dom0_min_mem = xoptions.get_dom0_min_mem()
1335 if target < memory_cur and dom0_min_mem > target:
1336 raise XendError("memory_dynamic_max too small")
1338 self._safe_set_memory('memory_dynamic_min', target * MiB)
1339 self._safe_set_memory('memory_dynamic_max', target * MiB)
1341 if self.domid >= 0:
1342 if target > memory_cur:
1343 balloon.free((target - memory_cur) * 1024, self)
1344 self.storeVm("memory", target)
1345 self.storeDom("memory/target", target << 10)
1346 xc.domain_set_target_mem(self.domid,
1347 (target * 1024))
1348 xen.xend.XendDomain.instance().managed_config_save(self)
1350 def setMemoryMaximum(self, limit):
1351 """Set the maximum memory limit of this domain
1352 @param limit: In MiB.
1353 """
1354 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1355 self.info['name_label'], str(self.domid), limit)
1357 maxmem_cur = self.get_memory_static_max()
1358 MiB = 1024 * 1024
1359 self._safe_set_memory('memory_static_max', limit * MiB)
1361 if self.domid >= 0:
1362 maxmem = int(limit) * 1024
1363 try:
1364 return xc.domain_setmaxmem(self.domid, maxmem)
1365 except Exception, ex:
1366 self._safe_set_memory('memory_static_max', maxmem_cur)
1367 raise XendError(str(ex))
1368 xen.xend.XendDomain.instance().managed_config_save(self)
1371 def getVCPUInfo(self):
1372 try:
1373 # We include the domain name and ID, to help xm.
1374 sxpr = ['domain',
1375 ['domid', self.domid],
1376 ['name', self.info['name_label']],
1377 ['vcpu_count', self.info['VCPUs_max']]]
1379 for i in range(0, self.info['VCPUs_max']):
1380 if self.domid is not None:
1381 info = xc.vcpu_getinfo(self.domid, i)
1383 sxpr.append(['vcpu',
1384 ['number', i],
1385 ['online', info['online']],
1386 ['blocked', info['blocked']],
1387 ['running', info['running']],
1388 ['cpu_time', info['cpu_time'] / 1e9],
1389 ['cpu', info['cpu']],
1390 ['cpumap', info['cpumap']]])
1391 else:
1392 sxpr.append(['vcpu',
1393 ['number', i],
1394 ['online', 0],
1395 ['blocked', 0],
1396 ['running', 0],
1397 ['cpu_time', 0.0],
1398 ['cpu', -1],
1399 ['cpumap', self.info['cpus'][i] and \
1400 self.info['cpus'][i] or range(64)]])
1402 return sxpr
1404 except RuntimeError, exn:
1405 raise XendError(str(exn))
1408 def getDomInfo(self):
1409 return dom_get(self.domid)
1412 # internal functions ... TODO: re-categorised
1415 def _augmentInfo(self, priv):
1416 """Augment self.info, as given to us through L{recreate}, with
1417 values taken from the store. This recovers those values known
1418 to xend but not to the hypervisor.
1419 """
1420 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1421 if priv:
1422 augment_entries.remove('memory')
1423 augment_entries.remove('maxmem')
1424 augment_entries.remove('vcpus')
1425 augment_entries.remove('vcpu_avail')
1427 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1428 for k in augment_entries])
1430 # make returned lists into a dictionary
1431 vm_config = dict(zip(augment_entries, vm_config))
1433 for arg in augment_entries:
1434 val = vm_config[arg]
1435 if val != None:
1436 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1437 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1438 self.info[xapiarg] = val
1439 elif arg == "memory":
1440 self.info["static_memory_min"] = val
1441 elif arg == "maxmem":
1442 self.info["static_memory_max"] = val
1443 else:
1444 self.info[arg] = val
1446 # read CPU Affinity
1447 self.info['cpus'] = []
1448 vcpus_info = self.getVCPUInfo()
1449 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1450 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1452 # For dom0, we ignore any stored value for the vcpus fields, and
1453 # read the current value from Xen instead. This allows boot-time
1454 # settings to take precedence over any entries in the store.
1455 if priv:
1456 xeninfo = dom_get(self.domid)
1457 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1458 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1460 # read image value
1461 image_sxp = self._readVm('image')
1462 if image_sxp:
1463 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1465 # read devices
1466 devices = []
1467 for devclass in XendDevices.valid_devices():
1468 devconfig = self.getDeviceController(devclass).configurations()
1469 if devconfig:
1470 devices.extend(devconfig)
1472 if not self.info['devices'] and devices is not None:
1473 for device in devices:
1474 self.info.device_add(device[0], cfg_sxp = device)
1476 self._update_consoles()
1478 def _update_consoles(self, transaction = None):
1479 if self.domid == None or self.domid == 0:
1480 return
1482 # Update VT100 port if it exists
1483 if transaction is None:
1484 self.console_port = self.readDom('console/port')
1485 else:
1486 self.console_port = self.readDomTxn(transaction, 'console/port')
1487 if self.console_port is not None:
1488 serial_consoles = self.info.console_get_all('vt100')
1489 if not serial_consoles:
1490 cfg = self.info.console_add('vt100', self.console_port)
1491 self._createDevice('console', cfg)
1492 else:
1493 console_uuid = serial_consoles[0].get('uuid')
1494 self.info.console_update(console_uuid, 'location',
1495 self.console_port)
1498 # Update VNC port if it exists and write to xenstore
1499 if transaction is None:
1500 vnc_port = self.readDom('console/vnc-port')
1501 else:
1502 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1503 if vnc_port is not None:
1504 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1505 if dev_type == 'vfb':
1506 old_location = dev_info.get('location')
1507 listen_host = dev_info.get('vnclisten', \
1508 XendOptions.instance().get_vnclisten_address())
1509 new_location = '%s:%s' % (listen_host, str(vnc_port))
1510 if old_location == new_location:
1511 break
1513 dev_info['location'] = new_location
1514 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1515 vfb_ctrl = self.getDeviceController('vfb')
1516 vfb_ctrl.reconfigureDevice(0, dev_info)
1517 break
1520 # Function to update xenstore /vm/*
1523 def _readVm(self, *args):
1524 return xstransact.Read(self.vmpath, *args)
1526 def _writeVm(self, *args):
1527 return xstransact.Write(self.vmpath, *args)
1529 def _removeVm(self, *args):
1530 return xstransact.Remove(self.vmpath, *args)
1532 def _gatherVm(self, *args):
1533 return xstransact.Gather(self.vmpath, *args)
1535 def _listRecursiveVm(self, *args):
1536 return xstransact.ListRecursive(self.vmpath, *args)
1538 def storeVm(self, *args):
1539 return xstransact.Store(self.vmpath, *args)
1541 def permissionsVm(self, *args):
1542 return xstransact.SetPermissions(self.vmpath, *args)
1545 # Function to update xenstore /dom/*
1548 def readDom(self, *args):
1549 return xstransact.Read(self.dompath, *args)
1551 def gatherDom(self, *args):
1552 return xstransact.Gather(self.dompath, *args)
1554 def _writeDom(self, *args):
1555 return xstransact.Write(self.dompath, *args)
1557 def _removeDom(self, *args):
1558 return xstransact.Remove(self.dompath, *args)
1560 def storeDom(self, *args):
1561 return xstransact.Store(self.dompath, *args)
1564 def readDomTxn(self, transaction, *args):
1565 paths = map(lambda x: self.dompath + "/" + x, args)
1566 return transaction.read(*paths)
1568 def gatherDomTxn(self, transaction, *args):
1569 paths = map(lambda x: self.dompath + "/" + x, args)
1570 return transaction.gather(*paths)
1572 def _writeDomTxn(self, transaction, *args):
1573 paths = map(lambda x: self.dompath + "/" + x, args)
1574 return transaction.write(*paths)
1576 def _removeDomTxn(self, transaction, *args):
1577 paths = map(lambda x: self.dompath + "/" + x, args)
1578 return transaction.remove(*paths)
1580 def storeDomTxn(self, transaction, *args):
1581 paths = map(lambda x: self.dompath + "/" + x, args)
1582 return transaction.store(*paths)
1585 def _recreateDom(self):
1586 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1588 def _recreateDomFunc(self, t):
1589 t.remove()
1590 t.mkdir()
1591 t.set_permissions({'dom' : self.domid, 'read' : True})
1592 t.write('vm', self.vmpath)
1593 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1594 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1595 t.mkdir(i)
1596 t.set_permissions(i, {'dom' : self.domid})
1598 def _storeDomDetails(self):
1599 to_store = {
1600 'domid': str(self.domid),
1601 'vm': self.vmpath,
1602 'name': self.info['name_label'],
1603 'console/limit': str(xoptions.get_console_limit() * 1024),
1604 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1607 def f(n, v):
1608 if v is not None:
1609 if type(v) == bool:
1610 to_store[n] = v and "1" or "0"
1611 else:
1612 to_store[n] = str(v)
1614 # Figure out if we need to tell xenconsoled to ignore this guest's
1615 # console - device model will handle console if it is running
1616 constype = "ioemu"
1617 if 'device_model' not in self.info['platform']:
1618 constype = "xenconsoled"
1620 f('console/port', self.console_port)
1621 f('console/ring-ref', self.console_mfn)
1622 f('console/type', constype)
1623 f('store/port', self.store_port)
1624 f('store/ring-ref', self.store_mfn)
1626 if arch.type == "x86":
1627 f('control/platform-feature-multiprocessor-suspend', True)
1629 # elfnotes
1630 for n, v in self.info.get_notes().iteritems():
1631 n = n.lower().replace('_', '-')
1632 if n == 'features':
1633 for v in v.split('|'):
1634 v = v.replace('_', '-')
1635 if v.startswith('!'):
1636 f('image/%s/%s' % (n, v[1:]), False)
1637 else:
1638 f('image/%s/%s' % (n, v), True)
1639 else:
1640 f('image/%s' % n, v)
1642 if self.info.has_key('security_label'):
1643 f('security_label', self.info['security_label'])
1645 to_store.update(self._vcpuDomDetails())
1647 log.debug("Storing domain details: %s", scrub_password(to_store))
1649 self._writeDom(to_store)
1651 def _vcpuDomDetails(self):
1652 def availability(n):
1653 if self.info['vcpu_avail'] & (1 << n):
1654 return 'online'
1655 else:
1656 return 'offline'
1658 result = {}
1659 for v in range(0, self.info['VCPUs_max']):
1660 result["cpu/%d/availability" % v] = availability(v)
1661 return result
1664 # xenstore watches
1667 def _registerWatches(self):
1668 """Register a watch on this VM's entries in the store, and the
1669 domain's control/shutdown node, so that when they are changed
1670 externally, we keep up to date. This should only be called by {@link
1671 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1672 details have been written, but before the new instance is returned."""
1673 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1674 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1675 self._handleShutdownWatch)
1677 def _storeChanged(self, _):
1678 log.trace("XendDomainInfo.storeChanged");
1680 changed = False
1682 # Check whether values in the configuration have
1683 # changed in Xenstore.
1685 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1686 'rtc/timeoffset']
1688 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1689 for k in cfg_vm])
1691 # convert two lists into a python dictionary
1692 vm_details = dict(zip(cfg_vm, vm_details))
1694 for arg, val in vm_details.items():
1695 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1696 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1697 if val != None and val != self.info[xapiarg]:
1698 self.info[xapiarg] = val
1699 changed = True
1700 elif arg == "memory":
1701 if val != None and val != self.info["static_memory_min"]:
1702 self.info["static_memory_min"] = val
1703 changed = True
1704 elif arg == "maxmem":
1705 if val != None and val != self.info["static_memory_max"]:
1706 self.info["static_memory_max"] = val
1707 changed = True
1709 # Check whether image definition has been updated
1710 image_sxp = self._readVm('image')
1711 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1712 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1713 changed = True
1715 # Update the rtc_timeoffset to be preserved across reboot.
1716 # NB. No need to update xenstore domain section.
1717 val = int(vm_details.get("rtc/timeoffset", 0))
1718 self.info["platform"]["rtc_timeoffset"] = val
1720 if changed:
1721 # Update the domain section of the store, as this contains some
1722 # parameters derived from the VM configuration.
1723 self.refresh_shutdown_lock.acquire()
1724 try:
1725 state = self._stateGet()
1726 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1727 self._storeDomDetails()
1728 finally:
1729 self.refresh_shutdown_lock.release()
1731 return 1
1733 def _handleShutdownWatch(self, _):
1734 log.debug('XendDomainInfo.handleShutdownWatch')
1736 reason = self.readDom('control/shutdown')
1738 if reason and reason != 'suspend':
1739 sst = self.readDom('xend/shutdown_start_time')
1740 now = time.time()
1741 if sst:
1742 self.shutdownStartTime = float(sst)
1743 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1744 else:
1745 self.shutdownStartTime = now
1746 self.storeDom('xend/shutdown_start_time', now)
1747 timeout = SHUTDOWN_TIMEOUT
1749 log.trace(
1750 "Scheduling refreshShutdown on domain %d in %ds.",
1751 self.domid, timeout)
1752 threading.Timer(timeout, self.refreshShutdown).start()
1754 return True
1758 # Public Attributes for the VM
1762 def getDomid(self):
1763 return self.domid
1765 def setName(self, name, to_store = True):
1766 self._checkName(name)
1767 self.info['name_label'] = name
1768 if to_store:
1769 self.storeVm("name", name)
1771 def getName(self):
1772 return self.info['name_label']
1774 def getDomainPath(self):
1775 return self.dompath
1777 def getShutdownReason(self):
1778 return self.readDom('control/shutdown')
1780 def getStorePort(self):
1781 """For use only by image.py and XendCheckpoint.py."""
1782 return self.store_port
1784 def getConsolePort(self):
1785 """For use only by image.py and XendCheckpoint.py"""
1786 return self.console_port
1788 def getFeatures(self):
1789 """For use only by image.py."""
1790 return self.info['features']
1792 def getVCpuCount(self):
1793 return self.info['VCPUs_max']
1795 def setVCpuCount(self, vcpus):
1796 def vcpus_valid(n):
1797 if vcpus <= 0:
1798 raise XendError('Zero or less VCPUs is invalid')
1799 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1800 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1801 vcpus_valid(vcpus)
1803 self.info['vcpu_avail'] = (1 << vcpus) - 1
1804 if self.domid >= 0:
1805 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1806 self._writeDom(self._vcpuDomDetails())
1807 self.info['VCPUs_live'] = vcpus
1808 else:
1809 if self.info['VCPUs_max'] > vcpus:
1810 # decreasing
1811 del self.info['cpus'][vcpus:]
1812 elif self.info['VCPUs_max'] < vcpus:
1813 # increasing
1814 for c in range(self.info['VCPUs_max'], vcpus):
1815 self.info['cpus'].append(list())
1816 self.info['VCPUs_max'] = vcpus
1817 xen.xend.XendDomain.instance().managed_config_save(self)
1818 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1819 vcpus)
1821 def getMemoryTarget(self):
1822 """Get this domain's target memory size, in KB."""
1823 return self.info['memory_dynamic_max'] / 1024
1825 def getMemoryMaximum(self):
1826 """Get this domain's maximum memory size, in KB."""
1827 # remember, info now stores memory in bytes
1828 return self.info['memory_static_max'] / 1024
1830 def getResume(self):
1831 return str(self._resume)
1833 def setResume(self, isresume):
1834 self._resume = isresume
1836 def getCpus(self):
1837 return self.info['cpus']
1839 def setCpus(self, cpumap):
1840 self.info['cpus'] = cpumap
1842 def getCap(self):
1843 return self.info['vcpus_params']['cap']
1845 def setCap(self, cpu_cap):
1846 self.info['vcpus_params']['cap'] = cpu_cap
1848 def getWeight(self):
1849 return self.info['vcpus_params']['weight']
1851 def setWeight(self, cpu_weight):
1852 self.info['vcpus_params']['weight'] = cpu_weight
1854 def getRestartCount(self):
1855 return self._readVm('xend/restart_count')
1857 def refreshShutdown(self, xeninfo = None):
1858 """ Checks the domain for whether a shutdown is required.
1860 Called from XendDomainInfo and also image.py for HVM images.
1861 """
1863 # If set at the end of this method, a restart is required, with the
1864 # given reason. This restart has to be done out of the scope of
1865 # refresh_shutdown_lock.
1866 restart_reason = None
1868 self.refresh_shutdown_lock.acquire()
1869 try:
1870 if xeninfo is None:
1871 xeninfo = dom_get(self.domid)
1872 if xeninfo is None:
1873 # The domain no longer exists. This will occur if we have
1874 # scheduled a timer to check for shutdown timeouts and the
1875 # shutdown succeeded. It will also occur if someone
1876 # destroys a domain beneath us. We clean up the domain,
1877 # just in case, but we can't clean up the VM, because that
1878 # VM may have migrated to a different domain on this
1879 # machine.
1880 self.cleanupDomain()
1881 self._stateSet(DOM_STATE_HALTED)
1882 return
1884 if xeninfo['dying']:
1885 # Dying means that a domain has been destroyed, but has not
1886 # yet been cleaned up by Xen. This state could persist
1887 # indefinitely if, for example, another domain has some of its
1888 # pages mapped. We might like to diagnose this problem in the
1889 # future, but for now all we do is make sure that it's not us
1890 # holding the pages, by calling cleanupDomain. We can't
1891 # clean up the VM, as above.
1892 self.cleanupDomain()
1893 self._stateSet(DOM_STATE_SHUTDOWN)
1894 return
1896 elif xeninfo['crashed']:
1897 if self.readDom('xend/shutdown_completed'):
1898 # We've seen this shutdown already, but we are preserving
1899 # the domain for debugging. Leave it alone.
1900 return
1902 log.warn('Domain has crashed: name=%s id=%d.',
1903 self.info['name_label'], self.domid)
1904 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1906 restart_reason = 'crash'
1907 self._stateSet(DOM_STATE_HALTED)
1909 elif xeninfo['shutdown']:
1910 self._stateSet(DOM_STATE_SHUTDOWN)
1911 if self.readDom('xend/shutdown_completed'):
1912 # We've seen this shutdown already, but we are preserving
1913 # the domain for debugging. Leave it alone.
1914 return
1916 else:
1917 reason = shutdown_reason(xeninfo['shutdown_reason'])
1919 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1920 self.info['name_label'], self.domid, reason)
1921 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1923 self._clearRestart()
1925 if reason == 'suspend':
1926 self._stateSet(DOM_STATE_SUSPENDED)
1927 # Don't destroy the domain. XendCheckpoint will do
1928 # this once it has finished. However, stop watching
1929 # the VM path now, otherwise we will end up with one
1930 # watch for the old domain, and one for the new.
1931 self._unwatchVm()
1932 elif reason in ('poweroff', 'reboot'):
1933 restart_reason = reason
1934 else:
1935 self.destroy()
1937 elif self.dompath is None:
1938 # We have yet to manage to call introduceDomain on this
1939 # domain. This can happen if a restore is in progress, or has
1940 # failed. Ignore this domain.
1941 pass
1942 else:
1943 # Domain is alive. If we are shutting it down, log a message
1944 # if it seems unresponsive.
1945 if xeninfo['paused']:
1946 self._stateSet(DOM_STATE_PAUSED)
1947 else:
1948 self._stateSet(DOM_STATE_RUNNING)
1950 if self.shutdownStartTime:
1951 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1952 self.shutdownStartTime)
1953 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1954 log.info(
1955 "Domain shutdown timeout expired: name=%s id=%s",
1956 self.info['name_label'], self.domid)
1957 self.storeDom('xend/unresponsive', 'True')
1958 finally:
1959 self.refresh_shutdown_lock.release()
1961 if restart_reason and not self.restart_in_progress:
1962 self.restart_in_progress = True
1963 threading.Thread(target = self._maybeRestart,
1964 args = (restart_reason,)).start()
1968 # Restart functions - handling whether we come back up on shutdown.
1971 def _clearRestart(self):
1972 self._removeDom("xend/shutdown_start_time")
1974 def _maybeDumpCore(self, reason):
1975 if reason == 'crash':
1976 if xoptions.get_enable_dump() or self.get_on_crash() \
1977 in ['coredump_and_destroy', 'coredump_and_restart']:
1978 try:
1979 self.dumpCore()
1980 except XendError:
1981 # This error has been logged -- there's nothing more
1982 # we can do in this context.
1983 pass
1985 def _maybeRestart(self, reason):
1986 # Before taking configured action, dump core if configured to do so.
1988 self._maybeDumpCore(reason)
1990 # Dispatch to the correct method based upon the configured on_{reason}
1991 # behaviour.
1992 actions = {"destroy" : self.destroy,
1993 "restart" : self._restart,
1994 "preserve" : self._preserve,
1995 "rename-restart" : self._renameRestart,
1996 "coredump-destroy" : self.destroy,
1997 "coredump-restart" : self._restart}
1999 action_conf = {
2000 'poweroff': 'actions_after_shutdown',
2001 'reboot': 'actions_after_reboot',
2002 'crash': 'actions_after_crash',
2005 action_target = self.info.get(action_conf.get(reason))
2006 func = actions.get(action_target, None)
2007 if func and callable(func):
2008 func()
2009 else:
2010 self.destroy() # default to destroy
2012 def _renameRestart(self):
2013 self._restart(True)
2015 def _restart(self, rename = False):
2016 """Restart the domain after it has exited.
2018 @param rename True if the old domain is to be renamed and preserved,
2019 False if it is to be destroyed.
2020 """
2021 from xen.xend import XendDomain
2023 if self._readVm(RESTART_IN_PROGRESS):
2024 log.error('Xend failed during restart of domain %s. '
2025 'Refusing to restart to avoid loops.',
2026 str(self.domid))
2027 self.destroy()
2028 return
2030 old_domid = self.domid
2031 self._writeVm(RESTART_IN_PROGRESS, 'True')
2033 elapse = time.time() - self.info['start_time']
2034 if elapse < MINIMUM_RESTART_TIME:
2035 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2036 'Refusing to restart to avoid loops.',
2037 self.info['name_label'], elapse)
2038 self.destroy()
2039 return
2041 prev_vm_xend = self._listRecursiveVm('xend')
2042 new_dom_info = self.info
2043 try:
2044 if rename:
2045 new_dom_info = self._preserveForRestart()
2046 else:
2047 self._unwatchVm()
2048 self.destroy()
2050 # new_dom's VM will be the same as this domain's VM, except where
2051 # the rename flag has instructed us to call preserveForRestart.
2052 # In that case, it is important that we remove the
2053 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2054 # once the new one is available.
2056 new_dom = None
2057 try:
2058 new_dom = XendDomain.instance().domain_create_from_dict(
2059 new_dom_info)
2060 for x in prev_vm_xend[0][1]:
2061 new_dom._writeVm('xend/%s' % x[0], x[1])
2062 new_dom.waitForDevices()
2063 new_dom.unpause()
2064 rst_cnt = new_dom._readVm('xend/restart_count')
2065 rst_cnt = int(rst_cnt) + 1
2066 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2067 new_dom._removeVm(RESTART_IN_PROGRESS)
2068 except:
2069 if new_dom:
2070 new_dom._removeVm(RESTART_IN_PROGRESS)
2071 new_dom.destroy()
2072 else:
2073 self._removeVm(RESTART_IN_PROGRESS)
2074 raise
2075 except:
2076 log.exception('Failed to restart domain %s.', str(old_domid))
2078 def _preserveForRestart(self):
2079 """Preserve a domain that has been shut down, by giving it a new UUID,
2080 cloning the VM details, and giving it a new name. This allows us to
2081 keep this domain for debugging, but restart a new one in its place
2082 preserving the restart semantics (name and UUID preserved).
2083 """
2085 new_uuid = uuid.createString()
2086 new_name = 'Domain-%s' % new_uuid
2087 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2088 self.info['name_label'], self.domid, self.info['uuid'],
2089 new_name, new_uuid)
2090 self._unwatchVm()
2091 self._releaseDevices()
2092 # Remove existing vm node in xenstore
2093 self._removeVm()
2094 new_dom_info = self.info.copy()
2095 new_dom_info['name_label'] = self.info['name_label']
2096 new_dom_info['uuid'] = self.info['uuid']
2097 self.info['name_label'] = new_name
2098 self.info['uuid'] = new_uuid
2099 self.vmpath = XS_VMROOT + new_uuid
2100 # Write out new vm node to xenstore
2101 self._storeVmDetails()
2102 self._preserve()
2103 return new_dom_info
2106 def _preserve(self):
2107 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2108 self.domid)
2109 self._unwatchVm()
2110 self.storeDom('xend/shutdown_completed', 'True')
2111 self._stateSet(DOM_STATE_HALTED)
2114 # Debugging ..
2117 def dumpCore(self, corefile = None):
2118 """Create a core dump for this domain.
2120 @raise: XendError if core dumping failed.
2121 """
2123 if not corefile:
2124 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2125 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2126 self.info['name_label'], self.domid)
2128 if os.path.isdir(corefile):
2129 raise XendError("Cannot dump core in a directory: %s" %
2130 corefile)
2132 try:
2133 try:
2134 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2135 xc.domain_dumpcore(self.domid, corefile)
2136 except RuntimeError, ex:
2137 corefile_incomp = corefile+'-incomplete'
2138 try:
2139 os.rename(corefile, corefile_incomp)
2140 except:
2141 pass
2143 log.error("core dump failed: id = %s name = %s: %s",
2144 self.domid, self.info['name_label'], str(ex))
2145 raise XendError("Failed to dump core: %s" % str(ex))
2146 finally:
2147 self._removeVm(DUMPCORE_IN_PROGRESS)
2150 # Device creation/deletion functions
2153 def _createDevice(self, deviceClass, devConfig):
2154 return self.getDeviceController(deviceClass).createDevice(devConfig)
2156 def _waitForDevice(self, deviceClass, devid):
2157 return self.getDeviceController(deviceClass).waitForDevice(devid)
2159 def _waitForDeviceUUID(self, dev_uuid):
2160 deviceClass, config = self.info['devices'].get(dev_uuid)
2161 self._waitForDevice(deviceClass, config['devid'])
2163 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2164 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2165 devid, backpath)
2167 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2168 return self.getDeviceController(deviceClass).reconfigureDevice(
2169 devid, devconfig)
2171 def _createDevices(self):
2172 """Create the devices for a vm.
2174 @raise: VmError for invalid devices
2175 """
2176 if self.image:
2177 self.image.prepareEnvironment()
2179 vscsi_uuidlist = {}
2180 vscsi_devidlist = []
2181 ordered_refs = self.info.ordered_device_refs()
2182 for dev_uuid in ordered_refs:
2183 devclass, config = self.info['devices'][dev_uuid]
2184 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2185 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2186 dev_uuid = config.get('uuid')
2187 devid = self._createDevice(devclass, config)
2189 # store devid in XendConfig for caching reasons
2190 if dev_uuid in self.info['devices']:
2191 self.info['devices'][dev_uuid][1]['devid'] = devid
2193 elif devclass == 'vscsi':
2194 vscsi_config = config.get('devs', [])[0]
2195 devid = vscsi_config.get('devid', '')
2196 dev_uuid = config.get('uuid')
2197 vscsi_uuidlist[devid] = dev_uuid
2198 vscsi_devidlist.append(devid)
2200 #It is necessary to sorted it for /dev/sdxx in guest.
2201 if len(vscsi_uuidlist) > 0:
2202 vscsi_devidlist.sort()
2203 for vscsiid in vscsi_devidlist:
2204 dev_uuid = vscsi_uuidlist[vscsiid]
2205 devclass, config = self.info['devices'][dev_uuid]
2206 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2207 dev_uuid = config.get('uuid')
2208 devid = self._createDevice(devclass, config)
2209 # store devid in XendConfig for caching reasons
2210 if dev_uuid in self.info['devices']:
2211 self.info['devices'][dev_uuid][1]['devid'] = devid
2214 if self.image:
2215 self.image.createDeviceModel()
2217 #if have pass-through devs, need the virtual pci slots info from qemu
2218 self.sync_pcidev_info()
2220 def _releaseDevices(self, suspend = False):
2221 """Release all domain's devices. Nothrow guarantee."""
2222 if self.image:
2223 try:
2224 log.debug("Destroying device model")
2225 self.image.destroyDeviceModel()
2226 except Exception, e:
2227 log.exception("Device model destroy failed %s" % str(e))
2228 else:
2229 log.debug("No device model")
2231 log.debug("Releasing devices")
2232 t = xstransact("%s/device" % self.dompath)
2233 try:
2234 for devclass in XendDevices.valid_devices():
2235 for dev in t.list(devclass):
2236 try:
2237 true_devclass = devclass
2238 if devclass == 'vbd':
2239 # In the case of "vbd", the true device class
2240 # may possibly be "tap". Just in case, verify
2241 # device class.
2242 devid = dev.split('/')[-1]
2243 true_devclass = self.getBlockDeviceClass(devid)
2244 log.debug("Removing %s", dev);
2245 self.destroyDevice(true_devclass, dev, False);
2246 except:
2247 # Log and swallow any exceptions in removal --
2248 # there's nothing more we can do.
2249 log.exception("Device release failed: %s; %s; %s",
2250 self.info['name_label'],
2251 true_devclass, dev)
2252 finally:
2253 t.abort()
2255 def getDeviceController(self, name):
2256 """Get the device controller for this domain, and if it
2257 doesn't exist, create it.
2259 @param name: device class name
2260 @type name: string
2261 @rtype: subclass of DevController
2262 """
2263 if name not in self._deviceControllers:
2264 devController = XendDevices.make_controller(name, self)
2265 if not devController:
2266 raise XendError("Unknown device type: %s" % name)
2267 self._deviceControllers[name] = devController
2269 return self._deviceControllers[name]
2272 # Migration functions (public)
2275 def testMigrateDevices(self, network, dst):
2276 """ Notify all device about intention of migration
2277 @raise: XendError for a device that cannot be migrated
2278 """
2279 for (n, c) in self.info.all_devices_sxpr():
2280 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2281 if rc != 0:
2282 raise XendError("Device of type '%s' refuses migration." % n)
2284 def migrateDevices(self, network, dst, step, domName=''):
2285 """Notify the devices about migration
2286 """
2287 ctr = 0
2288 try:
2289 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2290 self.migrateDevice(dev_type, dev_conf, network, dst,
2291 step, domName)
2292 ctr = ctr + 1
2293 except:
2294 for dev_type, dev_conf in self.info.all_devices_sxpr():
2295 if ctr == 0:
2296 step = step - 1
2297 ctr = ctr - 1
2298 self._recoverMigrateDevice(dev_type, dev_conf, network,
2299 dst, step, domName)
2300 raise
2302 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2303 step, domName=''):
2304 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2305 network, dst, step, domName)
2307 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2308 dst, step, domName=''):
2309 return self.getDeviceController(deviceClass).recover_migrate(
2310 deviceConfig, network, dst, step, domName)
2313 ## private:
2315 def _constructDomain(self):
2316 """Construct the domain.
2318 @raise: VmError on error
2319 """
2321 log.debug('XendDomainInfo.constructDomain')
2323 self.shutdownStartTime = None
2324 self.restart_in_progress = False
2326 hap = 0
2327 hvm = self.info.is_hvm()
2328 if hvm:
2329 hap = self.info.is_hap()
2330 info = xc.xeninfo()
2331 if 'hvm' not in info['xen_caps']:
2332 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2333 "supported by your CPU and enabled in your "
2334 "BIOS?")
2336 # Hack to pre-reserve some memory for initial domain creation.
2337 # There is an implicit memory overhead for any domain creation. This
2338 # overhead is greater for some types of domain than others. For
2339 # example, an x86 HVM domain will have a default shadow-pagetable
2340 # allocation of 1MB. We free up 4MB here to be on the safe side.
2341 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2342 balloon.free(4*1024, self) # 4MB should be plenty
2344 ssidref = 0
2345 if security.on() == xsconstants.XS_POLICY_USE:
2346 ssidref = security.calc_dom_ssidref_from_info(self.info)
2347 if security.has_authorization(ssidref) == False:
2348 raise VmError("VM is not authorized to run.")
2350 s3_integrity = 0
2351 if self.info.has_key('s3_integrity'):
2352 s3_integrity = self.info['s3_integrity']
2353 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2355 try:
2356 self.domid = xc.domain_create(
2357 domid = 0,
2358 ssidref = ssidref,
2359 handle = uuid.fromString(self.info['uuid']),
2360 flags = flags,
2361 target = self.info.target())
2362 except Exception, e:
2363 # may get here if due to ACM the operation is not permitted
2364 if security.on() == xsconstants.XS_POLICY_ACM:
2365 raise VmError('Domain in conflict set with running domain?')
2367 if self.domid < 0:
2368 raise VmError('Creating domain failed: name=%s' %
2369 self.info['name_label'])
2371 self.dompath = GetDomainPath(self.domid)
2373 self._recreateDom()
2375 # Set timer configration of domain
2376 timer_mode = self.info["platform"].get("timer_mode")
2377 if hvm and timer_mode is not None:
2378 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2379 long(timer_mode))
2381 # Set Viridian interface configuration of domain
2382 viridian = self.info["platform"].get("viridian")
2383 if arch.type == "x86" and hvm and viridian is not None:
2384 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2386 # Optionally enable virtual HPET
2387 hpet = self.info["platform"].get("hpet")
2388 if hvm and hpet is not None:
2389 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2390 long(hpet))
2392 # Optionally enable periodic vpt aligning
2393 vpt_align = self.info["platform"].get("vpt_align")
2394 if hvm and vpt_align is not None:
2395 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2396 long(vpt_align))
2398 # Set maximum number of vcpus in domain
2399 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2401 # Check for cpu_{cap|weight} validity for credit scheduler
2402 if XendNode.instance().xenschedinfo() == 'credit':
2403 cap = self.getCap()
2404 weight = self.getWeight()
2406 assert type(weight) == int
2407 assert type(cap) == int
2409 if weight < 1 or weight > 65535:
2410 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2412 if cap < 0 or cap > self.getVCpuCount() * 100:
2413 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2414 (self.getVCpuCount() * 100))
2416 # Test whether the devices can be assigned with VT-d
2417 pci = self.info["platform"].get("pci")
2418 pci_str = ''
2419 if pci and len(pci) > 0:
2420 pci = map(lambda x: x[0:4], pci) # strip options
2421 pci_str = str(pci)
2422 if hvm and pci_str:
2423 bdf = xc.test_assign_device(0, pci_str)
2424 if bdf != 0:
2425 if bdf == -1:
2426 raise VmError("failed to assign device: maybe the platform"
2427 " doesn't support VT-d, or VT-d isn't enabled"
2428 " properly?")
2429 bus = (bdf >> 16) & 0xff
2430 devfn = (bdf >> 8) & 0xff
2431 dev = (devfn >> 3) & 0x1f
2432 func = devfn & 0x7
2433 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2434 " already been assigned to other domain, or maybe"
2435 " it doesn't exist." % (bus, dev, func))
2437 # register the domain in the list
2438 from xen.xend import XendDomain
2439 XendDomain.instance().add_domain(self)
2441 def _introduceDomain(self):
2442 assert self.domid is not None
2443 assert self.store_mfn is not None
2444 assert self.store_port is not None
2446 try:
2447 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2448 except RuntimeError, exn:
2449 raise XendError(str(exn))
2451 def _setTarget(self, target):
2452 assert self.domid is not None
2454 try:
2455 SetTarget(self.domid, target)
2456 self.storeDom('target', target)
2457 except RuntimeError, exn:
2458 raise XendError(str(exn))
2461 def _setCPUAffinity(self):
2462 """ Repin domain vcpus if a restricted cpus list is provided
2463 """
2465 def has_cpus():
2466 if self.info['cpus'] is not None:
2467 for c in self.info['cpus']:
2468 if c:
2469 return True
2470 return False
2472 if has_cpus():
2473 for v in range(0, self.info['VCPUs_max']):
2474 if self.info['cpus'][v]:
2475 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2476 else:
2477 def find_relaxed_node(node_list):
2478 import sys
2479 nr_nodes = info['nr_nodes']
2480 if node_list is None:
2481 node_list = range(0, nr_nodes)
2482 nodeload = [0]
2483 nodeload = nodeload * nr_nodes
2484 from xen.xend import XendDomain
2485 doms = XendDomain.instance().list('all')
2486 for dom in filter (lambda d: d.domid != self.domid, doms):
2487 cpuinfo = dom.getVCPUInfo()
2488 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2489 if sxp.child_value(vcpu, 'online') == 0: continue
2490 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2491 for i in range(0, nr_nodes):
2492 node_cpumask = info['node_to_cpu'][i]
2493 for j in node_cpumask:
2494 if j in cpumap:
2495 nodeload[i] += 1
2496 break
2497 for i in range(0, nr_nodes):
2498 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2499 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2500 else:
2501 nodeload[i] = sys.maxint
2502 index = nodeload.index( min(nodeload) )
2503 return index
2505 info = xc.physinfo()
2506 if info['nr_nodes'] > 1:
2507 node_memory_list = info['node_to_memory']
2508 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2509 candidate_node_list = []
2510 for i in range(0, info['nr_nodes']):
2511 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2512 candidate_node_list.append(i)
2513 index = find_relaxed_node(candidate_node_list)
2514 cpumask = info['node_to_cpu'][index]
2515 for v in range(0, self.info['VCPUs_max']):
2516 xc.vcpu_setaffinity(self.domid, v, cpumask)
2519 def _initDomain(self):
2520 log.debug('XendDomainInfo.initDomain: %s %s',
2521 self.domid,
2522 self.info['vcpus_params']['weight'])
2524 self._configureBootloader()
2526 try:
2527 self.image = image.create(self, self.info)
2529 # repin domain vcpus if a restricted cpus list is provided
2530 # this is done prior to memory allocation to aide in memory
2531 # distribution for NUMA systems.
2532 self._setCPUAffinity()
2534 # Use architecture- and image-specific calculations to determine
2535 # the various headrooms necessary, given the raw configured
2536 # values. maxmem, memory, and shadow are all in KiB.
2537 # but memory_static_max etc are all stored in bytes now.
2538 memory = self.image.getRequiredAvailableMemory(
2539 self.info['memory_dynamic_max'] / 1024)
2540 maxmem = self.image.getRequiredAvailableMemory(
2541 self.info['memory_static_max'] / 1024)
2542 shadow = self.image.getRequiredShadowMemory(
2543 self.info['shadow_memory'] * 1024,
2544 self.info['memory_static_max'] / 1024)
2546 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2547 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2548 # takes MiB and we must not round down and end up under-providing.
2549 shadow = ((shadow + 1023) / 1024) * 1024
2551 # set memory limit
2552 xc.domain_setmaxmem(self.domid, maxmem)
2554 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2555 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2556 # Round vtd_mem up to a multiple of a MiB.
2557 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2559 # Make sure there's enough RAM available for the domain
2560 balloon.free(memory + shadow + vtd_mem, self)
2562 # Set up the shadow memory
2563 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2564 self.info['shadow_memory'] = shadow_cur
2566 # machine address size
2567 if self.info.has_key('machine_address_size'):
2568 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2569 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2571 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2572 log.debug("_initDomain: suppressing spurious page faults")
2573 xc.domain_suppress_spurious_page_faults(self.domid)
2575 self._createChannels()
2577 channel_details = self.image.createImage()
2579 self.store_mfn = channel_details['store_mfn']
2580 if 'console_mfn' in channel_details:
2581 self.console_mfn = channel_details['console_mfn']
2582 if 'notes' in channel_details:
2583 self.info.set_notes(channel_details['notes'])
2584 if 'native_protocol' in channel_details:
2585 self.native_protocol = channel_details['native_protocol'];
2587 self._introduceDomain()
2588 if self.info.target():
2589 self._setTarget(self.info.target())
2591 self._createDevices()
2593 self.image.cleanupBootloading()
2595 self.info['start_time'] = time.time()
2597 self._stateSet(DOM_STATE_RUNNING)
2598 except VmError, exn:
2599 log.exception("XendDomainInfo.initDomain: exception occurred")
2600 if self.image:
2601 self.image.cleanupBootloading()
2602 raise exn
2603 except RuntimeError, exn:
2604 log.exception("XendDomainInfo.initDomain: exception occurred")
2605 if self.image:
2606 self.image.cleanupBootloading()
2607 raise VmError(str(exn))
2610 def cleanupDomain(self):
2611 """Cleanup domain resources; release devices. Idempotent. Nothrow
2612 guarantee."""
2614 self.refresh_shutdown_lock.acquire()
2615 try:
2616 self.unwatchShutdown()
2617 self._releaseDevices()
2618 bootloader_tidy(self)
2620 if self.image:
2621 self.image = None
2623 try:
2624 self._removeDom()
2625 except:
2626 log.exception("Removing domain path failed.")
2628 self._stateSet(DOM_STATE_HALTED)
2629 self.domid = None # Do not push into _stateSet()!
2630 finally:
2631 self.refresh_shutdown_lock.release()
2634 def unwatchShutdown(self):
2635 """Remove the watch on the domain's control/shutdown node, if any.
2636 Idempotent. Nothrow guarantee. Expects to be protected by the
2637 refresh_shutdown_lock."""
2639 try:
2640 try:
2641 if self.shutdownWatch:
2642 self.shutdownWatch.unwatch()
2643 finally:
2644 self.shutdownWatch = None
2645 except:
2646 log.exception("Unwatching control/shutdown failed.")
2648 def waitForShutdown(self):
2649 self.state_updated.acquire()
2650 try:
2651 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2652 self.state_updated.wait(timeout=1.0)
2653 finally:
2654 self.state_updated.release()
2656 def waitForSuspend(self):
2657 """Wait for the guest to respond to a suspend request by
2658 shutting down. If the guest hasn't re-written control/shutdown
2659 after a certain amount of time, it's obviously not listening and
2660 won't suspend, so we give up. HVM guests with no PV drivers
2661 should already be shutdown.
2662 """
2663 state = "suspend"
2664 nr_tries = 60
2666 self.state_updated.acquire()
2667 try:
2668 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2669 self.state_updated.wait(1.0)
2670 if state == "suspend":
2671 if nr_tries == 0:
2672 msg = ('Timeout waiting for domain %s to suspend'
2673 % self.domid)
2674 self._writeDom('control/shutdown', '')
2675 raise XendError(msg)
2676 state = self.readDom('control/shutdown')
2677 nr_tries -= 1
2678 finally:
2679 self.state_updated.release()
2682 # TODO: recategorise - called from XendCheckpoint
2685 def completeRestore(self, store_mfn, console_mfn):
2687 log.debug("XendDomainInfo.completeRestore")
2689 self.store_mfn = store_mfn
2690 self.console_mfn = console_mfn
2692 self._introduceDomain()
2693 self.image = image.create(self, self.info)
2694 if self.image:
2695 self.image.createDeviceModel(True)
2696 self._storeDomDetails()
2697 self._registerWatches()
2698 self.refreshShutdown()
2700 log.debug("XendDomainInfo.completeRestore done")
2703 def _endRestore(self):
2704 self.setResume(False)
2707 # VM Destroy
2710 def _prepare_phantom_paths(self):
2711 # get associated devices to destroy
2712 # build list of phantom devices to be removed after normal devices
2713 plist = []
2714 if self.domid is not None:
2715 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2716 try:
2717 for dev in t.list():
2718 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2719 % (self.dompath, dev))
2720 if backend_phantom_vbd is not None:
2721 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2722 % backend_phantom_vbd)
2723 plist.append(backend_phantom_vbd)
2724 plist.append(frontend_phantom_vbd)
2725 finally:
2726 t.abort()
2727 return plist
2729 def _cleanup_phantom_devs(self, plist):
2730 # remove phantom devices
2731 if not plist == []:
2732 time.sleep(2)
2733 for paths in plist:
2734 if paths.find('backend') != -1:
2735 # Modify online status /before/ updating state (latter is watched by
2736 # drivers, so this ordering avoids a race).
2737 xstransact.Write(paths, 'online', "0")
2738 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2739 # force
2740 xstransact.Remove(paths)
2742 def destroy(self):
2743 """Cleanup VM and destroy domain. Nothrow guarantee."""
2745 if self.domid is None:
2746 return
2748 from xen.xend import XendDomain
2749 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2751 paths = self._prepare_phantom_paths()
2753 if self.dompath is not None:
2754 try:
2755 xc.domain_destroy_hook(self.domid)
2756 xc.domain_pause(self.domid)
2757 do_FLR(self.domid)
2758 xc.domain_destroy(self.domid)
2759 for state in DOM_STATES_OLD:
2760 self.info[state] = 0
2761 self._stateSet(DOM_STATE_HALTED)
2762 except:
2763 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2765 XendDomain.instance().remove_domain(self)
2766 self.cleanupDomain()
2768 self._cleanup_phantom_devs(paths)
2769 self._cleanupVm()
2771 if "transient" in self.info["other_config"] \
2772 and bool(self.info["other_config"]["transient"]):
2773 XendDomain.instance().domain_delete_by_dominfo(self)
2776 def resetDomain(self):
2777 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2779 old_domid = self.domid
2780 prev_vm_xend = self._listRecursiveVm('xend')
2781 new_dom_info = self.info
2782 try:
2783 self._unwatchVm()
2784 self.destroy()
2786 new_dom = None
2787 try:
2788 from xen.xend import XendDomain
2789 new_dom_info['domid'] = None
2790 new_dom = XendDomain.instance().domain_create_from_dict(
2791 new_dom_info)
2792 for x in prev_vm_xend[0][1]:
2793 new_dom._writeVm('xend/%s' % x[0], x[1])
2794 new_dom.waitForDevices()
2795 new_dom.unpause()
2796 except:
2797 if new_dom:
2798 new_dom.destroy()
2799 raise
2800 except:
2801 log.exception('Failed to reset domain %s.', str(old_domid))
2804 def resumeDomain(self):
2805 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2807 # resume a suspended domain (e.g. after live checkpoint, or after
2808 # a later error during save or migate); checks that the domain
2809 # is currently suspended first so safe to call from anywhere
2811 xeninfo = dom_get(self.domid)
2812 if xeninfo is None:
2813 return
2814 if not xeninfo['shutdown']:
2815 return
2816 reason = shutdown_reason(xeninfo['shutdown_reason'])
2817 if reason != 'suspend':
2818 return
2820 try:
2821 # could also fetch a parsed note from xenstore
2822 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2823 if not fast:
2824 self._releaseDevices()
2825 self.testDeviceComplete()
2826 self.testvifsComplete()
2827 log.debug("XendDomainInfo.resumeDomain: devices released")
2829 self._resetChannels()
2831 self._removeDom('control/shutdown')
2832 self._removeDom('device-misc/vif/nextDeviceID')
2834 self._createChannels()
2835 self._introduceDomain()
2836 self._storeDomDetails()
2838 self._createDevices()
2839 log.debug("XendDomainInfo.resumeDomain: devices created")
2841 xc.domain_resume(self.domid, fast)
2842 ResumeDomain(self.domid)
2843 except:
2844 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2845 self.image.resumeDeviceModel()
2846 log.debug("XendDomainInfo.resumeDomain: completed")
2850 # Channels for xenstore and console
2853 def _createChannels(self):
2854 """Create the channels to the domain.
2855 """
2856 self.store_port = self._createChannel()
2857 self.console_port = self._createChannel()
2860 def _createChannel(self):
2861 """Create an event channel to the domain.
2862 """
2863 try:
2864 if self.domid != None:
2865 return xc.evtchn_alloc_unbound(domid = self.domid,
2866 remote_dom = 0)
2867 except:
2868 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2869 raise
2871 def _resetChannels(self):
2872 """Reset all event channels in the domain.
2873 """
2874 try:
2875 if self.domid != None:
2876 return xc.evtchn_reset(dom = self.domid)
2877 except:
2878 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2879 raise
2883 # Bootloader configuration
2886 def _configureBootloader(self):
2887 """Run the bootloader if we're configured to do so."""
2889 blexec = self.info['PV_bootloader']
2890 bootloader_args = self.info['PV_bootloader_args']
2891 kernel = self.info['PV_kernel']
2892 ramdisk = self.info['PV_ramdisk']
2893 args = self.info['PV_args']
2894 boot = self.info['HVM_boot_policy']
2896 if boot:
2897 # HVM booting.
2898 pass
2899 elif not blexec and kernel:
2900 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2901 # will be picked up by image.py.
2902 pass
2903 else:
2904 # Boot using bootloader
2905 if not blexec or blexec == 'pygrub':
2906 blexec = auxbin.pathTo('pygrub')
2908 blcfg = None
2909 disks = [x for x in self.info['vbd_refs']
2910 if self.info['devices'][x][1]['bootable']]
2912 if not disks:
2913 msg = "Had a bootloader specified, but no disks are bootable"
2914 log.error(msg)
2915 raise VmError(msg)
2917 devinfo = self.info['devices'][disks[0]]
2918 devtype = devinfo[0]
2919 disk = devinfo[1]['uname']
2921 fn = blkdev_uname_to_file(disk)
2922 taptype = blkdev_uname_to_taptype(disk)
2923 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2924 if mounted:
2925 # This is a file, not a device. pygrub can cope with a
2926 # file if it's raw, but if it's QCOW or other such formats
2927 # used through blktap, then we need to mount it first.
2929 log.info("Mounting %s on %s." %
2930 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2932 vbd = {
2933 'mode': 'RO',
2934 'device': BOOTLOADER_LOOPBACK_DEVICE,
2937 from xen.xend import XendDomain
2938 dom0 = XendDomain.instance().privilegedDomain()
2939 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2940 fn = BOOTLOADER_LOOPBACK_DEVICE
2942 try:
2943 blcfg = bootloader(blexec, fn, self, False,
2944 bootloader_args, kernel, ramdisk, args)
2945 finally:
2946 if mounted:
2947 log.info("Unmounting %s from %s." %
2948 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2950 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2952 if blcfg is None:
2953 msg = "Had a bootloader specified, but can't find disk"
2954 log.error(msg)
2955 raise VmError(msg)
2957 self.info.update_with_image_sxp(blcfg, True)
2961 # VM Functions
2964 def _readVMDetails(self, params):
2965 """Read the specified parameters from the store.
2966 """
2967 try:
2968 return self._gatherVm(*params)
2969 except ValueError:
2970 # One of the int/float entries in params has a corresponding store
2971 # entry that is invalid. We recover, because older versions of
2972 # Xend may have put the entry there (memory/target, for example),
2973 # but this is in general a bad situation to have reached.
2974 log.exception(
2975 "Store corrupted at %s! Domain %d's configuration may be "
2976 "affected.", self.vmpath, self.domid)
2977 return []
2979 def _cleanupVm(self):
2980 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2982 self._unwatchVm()
2984 try:
2985 self._removeVm()
2986 except:
2987 log.exception("Removing VM path failed.")
2990 def checkLiveMigrateMemory(self):
2991 """ Make sure there's enough memory to migrate this domain """
2992 overhead_kb = 0
2993 if arch.type == "x86":
2994 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2995 # the minimum that Xen would allocate if no value were given.
2996 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2997 (self.info['memory_static_max'] / 1024 / 1024) * 4
2998 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2999 # The domain might already have some shadow memory
3000 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3001 if overhead_kb > 0:
3002 balloon.free(overhead_kb, self)
3004 def _unwatchVm(self):
3005 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3006 guarantee."""
3007 try:
3008 try:
3009 if self.vmWatch:
3010 self.vmWatch.unwatch()
3011 finally:
3012 self.vmWatch = None
3013 except:
3014 log.exception("Unwatching VM path failed.")
3016 def testDeviceComplete(self):
3017 """ For Block IO migration safety we must ensure that
3018 the device has shutdown correctly, i.e. all blocks are
3019 flushed to disk
3020 """
3021 start = time.time()
3022 while True:
3023 test = 0
3024 diff = time.time() - start
3025 vbds = self.getDeviceController('vbd').deviceIDs()
3026 taps = self.getDeviceController('tap').deviceIDs()
3027 for i in vbds + taps:
3028 test = 1
3029 log.info("Dev %s still active, looping...", i)
3030 time.sleep(0.1)
3032 if test == 0:
3033 break
3034 if diff >= MIGRATE_TIMEOUT:
3035 log.info("Dev still active but hit max loop timeout")
3036 break
3038 def testvifsComplete(self):
3039 """ In case vifs are released and then created for the same
3040 domain, we need to wait the device shut down.
3041 """
3042 start = time.time()
3043 while True:
3044 test = 0
3045 diff = time.time() - start
3046 for i in self.getDeviceController('vif').deviceIDs():
3047 test = 1
3048 log.info("Dev %s still active, looping...", i)
3049 time.sleep(0.1)
3051 if test == 0:
3052 break
3053 if diff >= MIGRATE_TIMEOUT:
3054 log.info("Dev still active but hit max loop timeout")
3055 break
3057 def _storeVmDetails(self):
3058 to_store = {}
3060 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3061 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3062 if self._infoIsSet(info_key):
3063 to_store[key] = str(self.info[info_key])
3065 if self._infoIsSet("static_memory_min"):
3066 to_store["memory"] = str(self.info["static_memory_min"])
3067 if self._infoIsSet("static_memory_max"):
3068 to_store["maxmem"] = str(self.info["static_memory_max"])
3070 image_sxpr = self.info.image_sxpr()
3071 if image_sxpr:
3072 to_store['image'] = sxp.to_string(image_sxpr)
3074 if not self._readVm('xend/restart_count'):
3075 to_store['xend/restart_count'] = str(0)
3077 log.debug("Storing VM details: %s", scrub_password(to_store))
3079 self._writeVm(to_store)
3080 self._setVmPermissions()
3082 def _setVmPermissions(self):
3083 """Allow the guest domain to read its UUID. We don't allow it to
3084 access any other entry, for security."""
3085 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3086 { 'dom' : self.domid,
3087 'read' : True,
3088 'write' : False })
3091 # Utility functions
3094 def __getattr__(self, name):
3095 if name == "state":
3096 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3097 log.warn("".join(traceback.format_stack()))
3098 return self._stateGet()
3099 else:
3100 raise AttributeError(name)
3102 def __setattr__(self, name, value):
3103 if name == "state":
3104 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3105 log.warn("".join(traceback.format_stack()))
3106 self._stateSet(value)
3107 else:
3108 self.__dict__[name] = value
3110 def _stateSet(self, state):
3111 self.state_updated.acquire()
3112 try:
3113 # TODO Not sure this is correct...
3114 # _stateGet is live now. Why not fire event
3115 # even when it hasn't changed?
3116 if self._stateGet() != state:
3117 self.state_updated.notifyAll()
3118 import XendAPI
3119 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3120 'power_state')
3121 finally:
3122 self.state_updated.release()
3124 def _stateGet(self):
3125 # Lets try and reconsitute the state from xc
3126 # first lets try and get the domain info
3127 # from xc - this will tell us if the domain
3128 # exists
3129 info = dom_get(self.getDomid())
3130 if info is None or info['shutdown']:
3131 # We are either HALTED or SUSPENDED
3132 # check saved image exists
3133 from xen.xend import XendDomain
3134 managed_config_path = \
3135 XendDomain.instance()._managed_check_point_path( \
3136 self.get_uuid())
3137 if os.path.exists(managed_config_path):
3138 return XEN_API_VM_POWER_STATE_SUSPENDED
3139 else:
3140 return XEN_API_VM_POWER_STATE_HALTED
3141 elif info['crashed']:
3142 # Crashed
3143 return XEN_API_VM_POWER_STATE_CRASHED
3144 else:
3145 # We are either RUNNING or PAUSED
3146 if info['paused']:
3147 return XEN_API_VM_POWER_STATE_PAUSED
3148 else:
3149 return XEN_API_VM_POWER_STATE_RUNNING
3151 def _infoIsSet(self, name):
3152 return name in self.info and self.info[name] is not None
3154 def _checkName(self, name):
3155 """Check if a vm name is valid. Valid names contain alphabetic
3156 characters, digits, or characters in '_-.:/+'.
3157 The same name cannot be used for more than one vm at the same time.
3159 @param name: name
3160 @raise: VmError if invalid
3161 """
3162 from xen.xend import XendDomain
3164 if name is None or name == '':
3165 raise VmError('Missing VM Name')
3167 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3168 raise VmError('Invalid VM Name')
3170 dom = XendDomain.instance().domain_lookup_nr(name)
3171 if dom and dom.info['uuid'] != self.info['uuid']:
3172 raise VmError("VM name '%s' already exists%s" %
3173 (name,
3174 dom.domid is not None and
3175 (" as domain %s" % str(dom.domid)) or ""))
3178 def update(self, info = None, refresh = True, transaction = None):
3179 """Update with info from xc.domain_getinfo().
3180 """
3181 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3182 str(self.domid))
3184 if not info:
3185 info = dom_get(self.domid)
3186 if not info:
3187 return
3189 if info["maxmem_kb"] < 0:
3190 info["maxmem_kb"] = XendNode.instance() \
3191 .physinfo_dict()['total_memory'] * 1024
3193 # make sure state is reset for info
3194 # TODO: we should eventually get rid of old_dom_states
3196 self.info.update_config(info)
3197 self._update_consoles(transaction)
3199 if refresh:
3200 self.refreshShutdown(info)
3202 log.trace("XendDomainInfo.update done on domain %s: %s",
3203 str(self.domid), self.info)
3205 def sxpr(self, ignore_store = False, legacy_only = True):
3206 result = self.info.to_sxp(domain = self,
3207 ignore_devices = ignore_store,
3208 legacy_only = legacy_only)
3210 return result
3212 # Xen API
3213 # ----------------------------------------------------------------
3215 def get_uuid(self):
3216 dom_uuid = self.info.get('uuid')
3217 if not dom_uuid: # if it doesn't exist, make one up
3218 dom_uuid = uuid.createString()
3219 self.info['uuid'] = dom_uuid
3220 return dom_uuid
3222 def get_memory_static_max(self):
3223 return self.info.get('memory_static_max', 0)
3224 def get_memory_static_min(self):
3225 return self.info.get('memory_static_min', 0)
3226 def get_memory_dynamic_max(self):
3227 return self.info.get('memory_dynamic_max', 0)
3228 def get_memory_dynamic_min(self):
3229 return self.info.get('memory_dynamic_min', 0)
3231 # only update memory-related config values if they maintain sanity
3232 def _safe_set_memory(self, key, newval):
3233 oldval = self.info.get(key, 0)
3234 try:
3235 self.info[key] = newval
3236 self.info._memory_sanity_check()
3237 except Exception, ex:
3238 self.info[key] = oldval
3239 raise
3241 def set_memory_static_max(self, val):
3242 self._safe_set_memory('memory_static_max', val)
3243 def set_memory_static_min(self, val):
3244 self._safe_set_memory('memory_static_min', val)
3245 def set_memory_dynamic_max(self, val):
3246 self._safe_set_memory('memory_dynamic_max', val)
3247 def set_memory_dynamic_min(self, val):
3248 self._safe_set_memory('memory_dynamic_min', val)
3250 def get_vcpus_params(self):
3251 if self.getDomid() is None:
3252 return self.info['vcpus_params']
3254 retval = xc.sched_credit_domain_get(self.getDomid())
3255 return retval
3256 def get_power_state(self):
3257 return XEN_API_VM_POWER_STATE[self._stateGet()]
3258 def get_platform(self):
3259 return self.info.get('platform', {})
3260 def get_pci_bus(self):
3261 return self.info.get('pci_bus', '')
3262 def get_tools_version(self):
3263 return self.info.get('tools_version', {})
3264 def get_metrics(self):
3265 return self.metrics.get_uuid();
3268 def get_security_label(self, xspol=None):
3269 import xen.util.xsm.xsm as security
3270 label = security.get_security_label(self, xspol)
3271 return label
3273 def set_security_label(self, seclab, old_seclab, xspol=None,
3274 xspol_old=None):
3275 """
3276 Set the security label of a domain from its old to
3277 a new value.
3278 @param seclab New security label formatted in the form
3279 <policy type>:<policy name>:<vm label>
3280 @param old_seclab The current security label that the
3281 VM must have.
3282 @param xspol An optional policy under which this
3283 update should be done. If not given,
3284 then the current active policy is used.
3285 @param xspol_old The old policy; only to be passed during
3286 the updating of a policy
3287 @return Returns return code, a string with errors from
3288 the hypervisor's operation, old label of the
3289 domain
3290 """
3291 rc = 0
3292 errors = ""
3293 old_label = ""
3294 new_ssidref = 0
3295 domid = self.getDomid()
3296 res_labels = None
3297 is_policy_update = (xspol_old != None)
3299 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3301 state = self._stateGet()
3302 # Relabel only HALTED or RUNNING or PAUSED domains
3303 if domid != 0 and \
3304 state not in \
3305 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3306 DOM_STATE_SUSPENDED ]:
3307 log.warn("Relabeling domain not possible in state '%s'" %
3308 DOM_STATES[state])
3309 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3311 # Remove security label. Works only for halted or suspended domains
3312 if not seclab or seclab == "":
3313 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3314 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3316 if self.info.has_key('security_label'):
3317 old_label = self.info['security_label']
3318 # Check label against expected one.
3319 if old_label != old_seclab:
3320 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3321 del self.info['security_label']
3322 xen.xend.XendDomain.instance().managed_config_save(self)
3323 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3325 tmp = seclab.split(":")
3326 if len(tmp) != 3:
3327 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3328 typ, policy, label = tmp
3330 poladmin = XSPolicyAdminInstance()
3331 if not xspol:
3332 xspol = poladmin.get_policy_by_name(policy)
3334 try:
3335 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3337 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3338 #if domain is running or paused try to relabel in hypervisor
3339 if not xspol:
3340 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3342 if typ != xspol.get_type_name() or \
3343 policy != xspol.get_name():
3344 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3346 if typ == xsconstants.ACM_POLICY_ID:
3347 new_ssidref = xspol.vmlabel_to_ssidref(label)
3348 if new_ssidref == xsconstants.INVALID_SSIDREF:
3349 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3351 # Check that all used resources are accessible under the
3352 # new label
3353 if not is_policy_update and \
3354 not security.resources_compatible_with_vmlabel(xspol,
3355 self, label):
3356 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3358 #Check label against expected one. Can only do this
3359 # if the policy hasn't changed underneath in the meantime
3360 if xspol_old == None:
3361 old_label = self.get_security_label()
3362 if old_label != old_seclab:
3363 log.info("old_label != old_seclab: %s != %s" %
3364 (old_label, old_seclab))
3365 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3367 # relabel domain in the hypervisor
3368 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3369 log.info("rc from relabeling in HV: %d" % rc)
3370 else:
3371 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3373 if rc == 0:
3374 # HALTED, RUNNING or PAUSED
3375 if domid == 0:
3376 if xspol:
3377 self.info['security_label'] = seclab
3378 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3379 else:
3380 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3381 else:
3382 if self.info.has_key('security_label'):
3383 old_label = self.info['security_label']
3384 # Check label against expected one, unless wildcard
3385 if old_label != old_seclab:
3386 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3388 self.info['security_label'] = seclab
3390 try:
3391 xen.xend.XendDomain.instance().managed_config_save(self)
3392 except:
3393 pass
3394 return (rc, errors, old_label, new_ssidref)
3395 finally:
3396 xen.xend.XendDomain.instance().policy_lock.release()
3398 def get_on_shutdown(self):
3399 after_shutdown = self.info.get('actions_after_shutdown')
3400 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3401 return XEN_API_ON_NORMAL_EXIT[-1]
3402 return after_shutdown
3404 def get_on_reboot(self):
3405 after_reboot = self.info.get('actions_after_reboot')
3406 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3407 return XEN_API_ON_NORMAL_EXIT[-1]
3408 return after_reboot
3410 def get_on_suspend(self):
3411 # TODO: not supported
3412 after_suspend = self.info.get('actions_after_suspend')
3413 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3414 return XEN_API_ON_NORMAL_EXIT[-1]
3415 return after_suspend
3417 def get_on_crash(self):
3418 after_crash = self.info.get('actions_after_crash')
3419 if not after_crash or after_crash not in \
3420 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3421 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3422 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3424 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3425 """ Get's a device configuration either from XendConfig or
3426 from the DevController.
3428 @param dev_class: device class, either, 'vbd' or 'vif'
3429 @param dev_uuid: device UUID
3431 @rtype: dictionary
3432 """
3433 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3435 # shortcut if the domain isn't started because
3436 # the devcontrollers will have no better information
3437 # than XendConfig.
3438 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3439 XEN_API_VM_POWER_STATE_SUSPENDED):
3440 if dev_config:
3441 return copy.deepcopy(dev_config)
3442 return None
3444 # instead of using dev_class, we use the dev_type
3445 # that is from XendConfig.
3446 controller = self.getDeviceController(dev_type)
3447 if not controller:
3448 return None
3450 all_configs = controller.getAllDeviceConfigurations()
3451 if not all_configs:
3452 return None
3454 updated_dev_config = copy.deepcopy(dev_config)
3455 for _devid, _devcfg in all_configs.items():
3456 if _devcfg.get('uuid') == dev_uuid:
3457 updated_dev_config.update(_devcfg)
3458 updated_dev_config['id'] = _devid
3459 return updated_dev_config
3461 return updated_dev_config
3463 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3464 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3465 if not config:
3466 return {}
3468 config['VM'] = self.get_uuid()
3470 if dev_class == 'vif':
3471 if not config.has_key('name'):
3472 config['name'] = config.get('vifname', '')
3473 if not config.has_key('MAC'):
3474 config['MAC'] = config.get('mac', '')
3475 if not config.has_key('type'):
3476 config['type'] = 'paravirtualised'
3477 if not config.has_key('device'):
3478 devid = config.get('id')
3479 if devid != None:
3480 config['device'] = 'eth%s' % devid
3481 else:
3482 config['device'] = ''
3484 if not config.has_key('network'):
3485 try:
3486 bridge = config.get('bridge', None)
3487 if bridge is None:
3488 from xen.util import Brctl
3489 if_to_br = dict([(i,b)
3490 for (b,ifs) in Brctl.get_state().items()
3491 for i in ifs])
3492 vifname = "vif%s.%s" % (self.getDomid(),
3493 config.get('id'))
3494 bridge = if_to_br.get(vifname, None)
3495 config['network'] = \
3496 XendNode.instance().bridge_to_network(
3497 config.get('bridge')).get_uuid()
3498 except Exception:
3499 log.exception('bridge_to_network')
3500 # Ignore this for now -- it may happen if the device
3501 # has been specified using the legacy methods, but at
3502 # some point we're going to have to figure out how to
3503 # handle that properly.
3505 config['MTU'] = 1500 # TODO
3507 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3508 xennode = XendNode.instance()
3509 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3510 config['io_read_kbs'] = rx_bps/1024
3511 config['io_write_kbs'] = tx_bps/1024
3512 rx, tx = xennode.get_vif_stat(self.domid, devid)
3513 config['io_total_read_kbs'] = rx/1024
3514 config['io_total_write_kbs'] = tx/1024
3515 else:
3516 config['io_read_kbs'] = 0.0
3517 config['io_write_kbs'] = 0.0
3518 config['io_total_read_kbs'] = 0.0
3519 config['io_total_write_kbs'] = 0.0
3521 config['security_label'] = config.get('security_label', '')
3523 if dev_class == 'vbd':
3525 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3526 controller = self.getDeviceController(dev_class)
3527 devid, _1, _2 = controller.getDeviceDetails(config)
3528 xennode = XendNode.instance()
3529 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3530 config['io_read_kbs'] = rd_blkps
3531 config['io_write_kbs'] = wr_blkps
3532 else:
3533 config['io_read_kbs'] = 0.0
3534 config['io_write_kbs'] = 0.0
3536 config['VDI'] = config.get('VDI', '')
3537 config['device'] = config.get('dev', '')
3538 if ':' in config['device']:
3539 vbd_name, vbd_type = config['device'].split(':', 1)
3540 config['device'] = vbd_name
3541 if vbd_type == 'cdrom':
3542 config['type'] = XEN_API_VBD_TYPE[0]
3543 else:
3544 config['type'] = XEN_API_VBD_TYPE[1]
3546 config['driver'] = 'paravirtualised' # TODO
3547 config['image'] = config.get('uname', '')
3549 if config.get('mode', 'r') == 'r':
3550 config['mode'] = 'RO'
3551 else:
3552 config['mode'] = 'RW'
3554 if dev_class == 'vtpm':
3555 if not config.has_key('type'):
3556 config['type'] = 'paravirtualised' # TODO
3557 if not config.has_key('backend'):
3558 config['backend'] = "00000000-0000-0000-0000-000000000000"
3560 return config
3562 def get_dev_property(self, dev_class, dev_uuid, field):
3563 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3564 try:
3565 return config[field]
3566 except KeyError:
3567 raise XendError('Invalid property for device: %s' % field)
3569 def set_dev_property(self, dev_class, dev_uuid, field, value):
3570 self.info['devices'][dev_uuid][1][field] = value
3572 def get_vcpus_util(self):
3573 vcpu_util = {}
3574 xennode = XendNode.instance()
3575 if 'VCPUs_max' in self.info and self.domid != None:
3576 for i in range(0, self.info['VCPUs_max']):
3577 util = xennode.get_vcpu_util(self.domid, i)
3578 vcpu_util[str(i)] = util
3580 return vcpu_util
3582 def get_consoles(self):
3583 return self.info.get('console_refs', [])
3585 def get_vifs(self):
3586 return self.info.get('vif_refs', [])
3588 def get_vbds(self):
3589 return self.info.get('vbd_refs', [])
3591 def get_vtpms(self):
3592 return self.info.get('vtpm_refs', [])
3594 def get_dpcis(self):
3595 return XendDPCI.get_by_VM(self.info.get('uuid'))
3597 def get_dscsis(self):
3598 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3600 def create_vbd(self, xenapi_vbd, vdi_image_path):
3601 """Create a VBD using a VDI from XendStorageRepository.
3603 @param xenapi_vbd: vbd struct from the Xen API
3604 @param vdi_image_path: VDI UUID
3605 @rtype: string
3606 @return: uuid of the device
3607 """
3608 xenapi_vbd['image'] = vdi_image_path
3609 if vdi_image_path.startswith('tap'):
3610 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3611 else:
3612 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3614 if not dev_uuid:
3615 raise XendError('Failed to create device')
3617 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3618 XEN_API_VM_POWER_STATE_PAUSED):
3619 _, config = self.info['devices'][dev_uuid]
3621 if vdi_image_path.startswith('tap'):
3622 dev_control = self.getDeviceController('tap')
3623 else:
3624 dev_control = self.getDeviceController('vbd')
3626 try:
3627 devid = dev_control.createDevice(config)
3628 dev_control.waitForDevice(devid)
3629 self.info.device_update(dev_uuid,
3630 cfg_xenapi = {'devid': devid})
3631 except Exception, exn:
3632 log.exception(exn)
3633 del self.info['devices'][dev_uuid]
3634 self.info['vbd_refs'].remove(dev_uuid)
3635 raise
3637 return dev_uuid
3639 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3640 """Create a VBD using a VDI from XendStorageRepository.
3642 @param xenapi_vbd: vbd struct from the Xen API
3643 @param vdi_image_path: VDI UUID
3644 @rtype: string
3645 @return: uuid of the device
3646 """
3647 xenapi_vbd['image'] = vdi_image_path
3648 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3649 if not dev_uuid:
3650 raise XendError('Failed to create device')
3652 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3653 _, config = self.info['devices'][dev_uuid]
3654 config['devid'] = self.getDeviceController('tap').createDevice(config)
3656 return config['devid']
3658 def create_vif(self, xenapi_vif):
3659 """Create VIF device from the passed struct in Xen API format.
3661 @param xenapi_vif: Xen API VIF Struct.
3662 @rtype: string
3663 @return: UUID
3664 """
3665 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3666 if not dev_uuid:
3667 raise XendError('Failed to create device')
3669 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3670 XEN_API_VM_POWER_STATE_PAUSED):
3672 _, config = self.info['devices'][dev_uuid]
3673 dev_control = self.getDeviceController('vif')
3675 try:
3676 devid = dev_control.createDevice(config)
3677 dev_control.waitForDevice(devid)
3678 self.info.device_update(dev_uuid,
3679 cfg_xenapi = {'devid': devid})
3680 except Exception, exn:
3681 log.exception(exn)
3682 del self.info['devices'][dev_uuid]
3683 self.info['vif_refs'].remove(dev_uuid)
3684 raise
3686 return dev_uuid
3688 def create_vtpm(self, xenapi_vtpm):
3689 """Create a VTPM device from the passed struct in Xen API format.
3691 @return: uuid of the device
3692 @rtype: string
3693 """
3695 if self._stateGet() not in (DOM_STATE_HALTED,):
3696 raise VmError("Can only add vTPM to a halted domain.")
3697 if self.get_vtpms() != []:
3698 raise VmError('Domain already has a vTPM.')
3699 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3700 if not dev_uuid:
3701 raise XendError('Failed to create device')
3703 return dev_uuid
3705 def create_console(self, xenapi_console):
3706 """ Create a console device from a Xen API struct.
3708 @return: uuid of device
3709 @rtype: string
3710 """
3711 if self._stateGet() not in (DOM_STATE_HALTED,):
3712 raise VmError("Can only add console to a halted domain.")
3714 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3715 if not dev_uuid:
3716 raise XendError('Failed to create device')
3718 return dev_uuid
3720 def set_console_other_config(self, console_uuid, other_config):
3721 self.info.console_update(console_uuid, 'other_config', other_config)
3723 def create_dpci(self, xenapi_pci):
3724 """Create pci device from the passed struct in Xen API format.
3726 @param xenapi_pci: DPCI struct from Xen API
3727 @rtype: bool
3728 #@rtype: string
3729 @return: True if successfully created device
3730 #@return: UUID
3731 """
3733 dpci_uuid = uuid.createString()
3735 dpci_opts = []
3736 opts_dict = xenapi_pci.get('options')
3737 for k in opts_dict.keys():
3738 dpci_opts.append([k, opts_dict[k]])
3740 # Convert xenapi to sxp
3741 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3743 target_pci_sxp = \
3744 ['pci',
3745 ['dev',
3746 ['domain', '0x%02x' % ppci.get_domain()],
3747 ['bus', '0x%02x' % ppci.get_bus()],
3748 ['slot', '0x%02x' % ppci.get_slot()],
3749 ['func', '0x%1x' % ppci.get_func()],
3750 ['vslot', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3751 ['opts', dpci_opts],
3752 ['uuid', dpci_uuid]
3753 ],
3754 ['state', 'Initialising']
3757 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3759 old_pci_sxp = self._getDeviceInfo_pci(0)
3761 if old_pci_sxp is None:
3762 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3763 if not dev_uuid:
3764 raise XendError('Failed to create device')
3766 else:
3767 new_pci_sxp = ['pci']
3768 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3769 new_pci_sxp.append(existing_dev)
3770 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3772 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3773 self.info.device_update(dev_uuid, new_pci_sxp)
3775 xen.xend.XendDomain.instance().managed_config_save(self)
3777 else:
3778 try:
3779 self.device_configure(target_pci_sxp)
3781 except Exception, exn:
3782 raise XendError('Failed to create device')
3784 return dpci_uuid
3786 def create_dscsi(self, xenapi_dscsi):
3787 """Create scsi device from the passed struct in Xen API format.
3789 @param xenapi_dscsi: DSCSI struct from Xen API
3790 @rtype: string
3791 @return: UUID
3792 """
3794 dscsi_uuid = uuid.createString()
3796 # Convert xenapi to sxp
3797 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3798 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3799 target_vscsi_sxp = \
3800 ['vscsi',
3801 ['dev',
3802 ['devid', devid],
3803 ['p-devname', pscsi.get_dev_name()],
3804 ['p-dev', pscsi.get_physical_HCTL()],
3805 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3806 ['state', xenbusState['Initialising']],
3807 ['uuid', dscsi_uuid]
3808 ],
3809 ['feature-host', 0]
3812 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3814 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3816 if cur_vscsi_sxp is None:
3817 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3818 if not dev_uuid:
3819 raise XendError('Failed to create device')
3821 else:
3822 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3823 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3824 new_vscsi_sxp.append(existing_dev)
3825 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3827 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3828 self.info.device_update(dev_uuid, new_vscsi_sxp)
3830 xen.xend.XendDomain.instance().managed_config_save(self)
3832 else:
3833 try:
3834 self.device_configure(target_vscsi_sxp)
3836 except Exception, exn:
3837 raise XendError('Failed to create device')
3839 return dscsi_uuid
3842 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3843 if dev_uuid not in self.info['devices']:
3844 raise XendError('Device does not exist')
3846 try:
3847 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3848 XEN_API_VM_POWER_STATE_PAUSED):
3849 _, config = self.info['devices'][dev_uuid]
3850 devid = config.get('devid')
3851 if devid != None:
3852 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3853 else:
3854 raise XendError('Unable to get devid for device: %s:%s' %
3855 (dev_type, dev_uuid))
3856 finally:
3857 del self.info['devices'][dev_uuid]
3858 self.info['%s_refs' % dev_type].remove(dev_uuid)
3860 def destroy_vbd(self, dev_uuid):
3861 self.destroy_device_by_uuid('vbd', dev_uuid)
3863 def destroy_vif(self, dev_uuid):
3864 self.destroy_device_by_uuid('vif', dev_uuid)
3866 def destroy_vtpm(self, dev_uuid):
3867 self.destroy_device_by_uuid('vtpm', dev_uuid)
3869 def destroy_dpci(self, dev_uuid):
3871 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3872 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3874 old_pci_sxp = self._getDeviceInfo_pci(0)
3875 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3876 target_dev = None
3877 new_pci_sxp = ['pci']
3878 for dev in sxp.children(old_pci_sxp, 'dev'):
3879 domain = int(sxp.child_value(dev, 'domain'), 16)
3880 bus = int(sxp.child_value(dev, 'bus'), 16)
3881 slot = int(sxp.child_value(dev, 'slot'), 16)
3882 func = int(sxp.child_value(dev, 'func'), 16)
3883 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3884 if ppci.get_name() == name:
3885 target_dev = dev
3886 else:
3887 new_pci_sxp.append(dev)
3889 if target_dev is None:
3890 raise XendError('Failed to destroy device')
3892 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3894 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3896 self.info.device_update(dev_uuid, new_pci_sxp)
3897 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3898 del self.info['devices'][dev_uuid]
3899 xen.xend.XendDomain.instance().managed_config_save(self)
3901 else:
3902 try:
3903 self.device_configure(target_pci_sxp)
3905 except Exception, exn:
3906 raise XendError('Failed to destroy device')
3908 def destroy_dscsi(self, dev_uuid):
3909 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3910 devid = dscsi.get_virtual_host()
3911 vHCTL = dscsi.get_virtual_HCTL()
3912 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3913 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3915 target_dev = None
3916 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3917 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3918 if vHCTL == sxp.child_value(dev, 'v-dev'):
3919 target_dev = dev
3920 else:
3921 new_vscsi_sxp.append(dev)
3923 if target_dev is None:
3924 raise XendError('Failed to destroy device')
3926 target_dev.append(['state', xenbusState['Closing']])
3927 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3929 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3931 self.info.device_update(dev_uuid, new_vscsi_sxp)
3932 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3933 del self.info['devices'][dev_uuid]
3934 xen.xend.XendDomain.instance().managed_config_save(self)
3936 else:
3937 try:
3938 self.device_configure(target_vscsi_sxp)
3940 except Exception, exn:
3941 raise XendError('Failed to destroy device')
3943 def destroy_xapi_instances(self):
3944 """Destroy Xen-API instances stored in XendAPIStore.
3945 """
3946 # Xen-API classes based on XendBase have their instances stored
3947 # in XendAPIStore. Cleanup these instances here, if they are supposed
3948 # to be destroyed when the parent domain is dead.
3950 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3951 # XendBase and there's no need to remove them from XendAPIStore.
3953 from xen.xend import XendDomain
3954 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3955 # domain still exists.
3956 return
3958 # Destroy the VMMetrics instance.
3959 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3960 is not None:
3961 self.metrics.destroy()
3963 # Destroy DPCI instances.
3964 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3965 XendAPIStore.deregister(dpci_uuid, "DPCI")
3967 # Destroy DSCSI instances.
3968 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3969 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3971 def has_device(self, dev_class, dev_uuid):
3972 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3974 def __str__(self):
3975 return '<domain id=%s name=%s memory=%s state=%s>' % \
3976 (str(self.domid), self.info['name_label'],
3977 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3979 __repr__ = __str__