2006-06-29 03:50:17 +08:00
|
|
|
#
|
|
|
|
# Copyright (C) 2006 Red Hat, Inc.
|
|
|
|
# Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
2007-11-21 00:12:20 +08:00
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
# MA 02110-1301 USA.
|
2006-06-29 03:50:17 +08:00
|
|
|
#
|
2006-06-14 22:59:40 +08:00
|
|
|
|
2013-04-12 05:16:33 +08:00
|
|
|
# pylint: disable=E0611
|
2012-05-14 21:24:56 +08:00
|
|
|
from gi.repository import GObject
|
2013-04-12 05:16:33 +08:00
|
|
|
# pylint: enable=E0611
|
2012-05-14 21:24:56 +08:00
|
|
|
|
2006-10-26 00:18:06 +08:00
|
|
|
import logging
|
2010-12-11 00:47:07 +08:00
|
|
|
import os
|
2010-01-07 02:09:33 +08:00
|
|
|
import re
|
2012-01-28 07:31:21 +08:00
|
|
|
import socket
|
2010-01-07 02:09:33 +08:00
|
|
|
import threading
|
2011-04-29 05:11:04 +08:00
|
|
|
import time
|
2012-01-28 07:31:21 +08:00
|
|
|
import traceback
|
2010-01-07 02:09:33 +08:00
|
|
|
|
|
|
|
import libvirt
|
2008-02-23 00:48:29 +08:00
|
|
|
import virtinst
|
2006-06-14 22:59:40 +08:00
|
|
|
|
2009-07-12 09:23:16 +08:00
|
|
|
from virtManager import util
|
2012-02-10 20:54:32 +08:00
|
|
|
from virtManager import connectauth
|
2011-04-19 00:39:53 +08:00
|
|
|
from virtManager.baseclass import vmmGObject
|
2006-06-15 05:52:49 +08:00
|
|
|
from virtManager.domain import vmmDomain
|
2009-11-20 06:35:27 +08:00
|
|
|
from virtManager.interface import vmmInterface
|
2009-12-01 05:16:43 +08:00
|
|
|
from virtManager.mediadev import vmmMediaDevice
|
2012-02-10 20:54:32 +08:00
|
|
|
from virtManager.netdev import vmmNetDevice
|
|
|
|
from virtManager.network import vmmNetwork
|
2011-04-10 07:32:30 +08:00
|
|
|
from virtManager.nodedev import vmmNodeDevice
|
2012-02-10 20:54:32 +08:00
|
|
|
from virtManager.storagepool import vmmStoragePool
|
2006-06-14 22:59:40 +08:00
|
|
|
|
2012-01-28 07:31:21 +08:00
|
|
|
|
2010-12-10 01:37:48 +08:00
|
|
|
class vmmConnection(vmmGObject):
|
2012-05-14 21:24:56 +08:00
|
|
|
__gsignals__ = {
|
|
|
|
"vm-added": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"vm-removed": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"net-added": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"net-removed": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"net-started": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"net-stopped": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"pool-added": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"pool-removed": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"pool-started": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"pool-stopped": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"interface-added": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"interface-removed": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"interface-started": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"interface-stopped": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"nodedev-added": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"nodedev-removed": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"mediadev-added": (GObject.SignalFlags.RUN_FIRST, None, [object]),
|
|
|
|
"mediadev-removed": (GObject.SignalFlags.RUN_FIRST, None, [str]),
|
|
|
|
"resources-sampled": (GObject.SignalFlags.RUN_FIRST, None, []),
|
|
|
|
"state-changed": (GObject.SignalFlags.RUN_FIRST, None, []),
|
|
|
|
"connect-error": (GObject.SignalFlags.RUN_FIRST, None, [str, str, bool]),
|
|
|
|
}
|
2006-06-15 01:52:46 +08:00
|
|
|
|
2007-09-10 10:57:24 +08:00
|
|
|
STATE_DISCONNECTED = 0
|
|
|
|
STATE_CONNECTING = 1
|
|
|
|
STATE_ACTIVE = 2
|
|
|
|
STATE_INACTIVE = 3
|
|
|
|
|
2013-07-04 01:56:43 +08:00
|
|
|
def __init__(self, uri):
|
2010-12-10 01:37:48 +08:00
|
|
|
vmmGObject.__init__(self)
|
2009-07-02 02:59:13 +08:00
|
|
|
|
2011-05-04 01:11:33 +08:00
|
|
|
self._uri = uri
|
|
|
|
if self._uri is None or self._uri.lower() == "xen":
|
|
|
|
self._uri = "xen:///"
|
2007-04-13 07:43:31 +08:00
|
|
|
|
2007-09-10 10:57:24 +08:00
|
|
|
self.state = self.STATE_DISCONNECTED
|
2011-05-04 01:11:33 +08:00
|
|
|
self.connectThread = None
|
|
|
|
self.connectError = None
|
2012-01-30 01:07:39 +08:00
|
|
|
self._ticklock = threading.Lock()
|
2013-07-05 20:59:58 +08:00
|
|
|
self._backend = virtinst.VirtualConnection(self._uri)
|
2009-11-20 06:38:43 +08:00
|
|
|
|
2010-02-11 06:05:56 +08:00
|
|
|
self._caps = None
|
|
|
|
self._caps_xml = None
|
|
|
|
|
2013-07-07 05:24:51 +08:00
|
|
|
self._network_capable = None
|
2011-04-08 05:57:19 +08:00
|
|
|
self._storage_capable = None
|
2013-07-07 05:24:51 +08:00
|
|
|
self._interface_capable = None
|
2009-11-26 03:50:27 +08:00
|
|
|
self._nodedev_capable = None
|
2010-02-26 08:35:01 +08:00
|
|
|
|
|
|
|
self._xml_flags = {}
|
2010-05-13 00:57:32 +08:00
|
|
|
self._support_dict = {}
|
2006-06-14 22:59:40 +08:00
|
|
|
|
2009-11-26 03:50:27 +08:00
|
|
|
# Physical network interfaces: name -> virtinst.NodeDevice
|
|
|
|
self.nodedevs = {}
|
2009-11-26 05:12:03 +08:00
|
|
|
# Physical network interfaces: name (eth0) -> vmmNetDevice
|
|
|
|
self.netdevs = {}
|
2009-12-01 00:56:41 +08:00
|
|
|
# Physical media devices: vmmMediaDevice.key -> vmmMediaDevice
|
2009-12-11 09:04:26 +08:00
|
|
|
self.mediadevs = {}
|
2009-11-20 06:35:27 +08:00
|
|
|
# Connection Storage pools: name -> vmmInterface
|
|
|
|
self.interfaces = {}
|
2008-08-08 05:37:16 +08:00
|
|
|
# Connection Storage pools: UUID -> vmmStoragePool
|
|
|
|
self.pools = {}
|
2007-09-10 10:57:24 +08:00
|
|
|
# Virtual networks UUUID -> vmmNetwork object
|
2007-03-22 00:28:36 +08:00
|
|
|
self.nets = {}
|
2007-09-10 10:57:24 +08:00
|
|
|
# Virtual machines. UUID -> vmmDomain object
|
2006-06-14 22:59:40 +08:00
|
|
|
self.vms = {}
|
2007-09-10 10:57:24 +08:00
|
|
|
# Resource utilization statistics
|
2007-03-28 07:52:00 +08:00
|
|
|
self.record = []
|
2007-09-10 10:57:24 +08:00
|
|
|
self.hostinfo = None
|
2006-06-14 22:59:40 +08:00
|
|
|
|
2009-11-26 06:07:12 +08:00
|
|
|
self.netdev_initialized = False
|
|
|
|
self.netdev_error = ""
|
|
|
|
self.netdev_use_libvirt = False
|
2009-11-26 05:12:03 +08:00
|
|
|
|
2009-12-11 09:04:26 +08:00
|
|
|
self.mediadev_initialized = False
|
|
|
|
self.mediadev_error = ""
|
|
|
|
self.mediadev_use_libvirt = False
|
2009-03-10 04:21:32 +08:00
|
|
|
|
2013-07-05 20:59:58 +08:00
|
|
|
|
2009-11-26 06:07:12 +08:00
|
|
|
#################
|
|
|
|
# Init routines #
|
|
|
|
#################
|
|
|
|
|
|
|
|
def _init_netdev(self):
|
|
|
|
"""
|
|
|
|
Determine how we will be polling for net devices (HAL or libvirt)
|
|
|
|
"""
|
2011-04-10 09:51:50 +08:00
|
|
|
if self.is_nodedev_capable() and self.is_interface_capable():
|
2009-11-26 06:07:12 +08:00
|
|
|
try:
|
|
|
|
self._build_libvirt_netdev_list()
|
|
|
|
self.netdev_use_libvirt = True
|
|
|
|
except Exception, e:
|
2009-12-09 23:58:47 +08:00
|
|
|
self.netdev_error = _("Could not build physical interface "
|
2009-11-26 06:07:12 +08:00
|
|
|
"list via libvirt: %s") % str(e)
|
|
|
|
else:
|
|
|
|
self.netdev_error = _("Libvirt version does not support "
|
2009-12-01 00:56:41 +08:00
|
|
|
"physical interface listing.")
|
2009-11-26 06:07:12 +08:00
|
|
|
|
|
|
|
self.netdev_initialized = True
|
|
|
|
if self.netdev_error:
|
|
|
|
logging.debug(self.netdev_error)
|
|
|
|
else:
|
|
|
|
if self.netdev_use_libvirt:
|
|
|
|
logging.debug("Using libvirt API for netdev enumeration")
|
|
|
|
else:
|
|
|
|
logging.debug("Using HAL for netdev enumeration")
|
|
|
|
|
2009-12-11 09:04:26 +08:00
|
|
|
def _init_mediadev(self):
|
2009-12-01 05:16:43 +08:00
|
|
|
if self.is_nodedev_capable():
|
|
|
|
try:
|
2009-12-11 09:04:26 +08:00
|
|
|
self.connect("nodedev-added", self._nodedev_mediadev_added)
|
|
|
|
self.connect("nodedev-removed", self._nodedev_mediadev_removed)
|
|
|
|
self.mediadev_use_libvirt = True
|
2009-12-01 05:16:43 +08:00
|
|
|
except Exception, e:
|
2009-12-11 09:04:26 +08:00
|
|
|
self.mediadev_error = _("Could not build media "
|
|
|
|
"list via libvirt: %s") % str(e)
|
2009-12-01 00:56:41 +08:00
|
|
|
else:
|
2009-12-11 09:04:26 +08:00
|
|
|
self.mediadev_error = _("Libvirt version does not support "
|
|
|
|
"media listing.")
|
2009-12-01 00:56:41 +08:00
|
|
|
|
2009-12-11 09:04:26 +08:00
|
|
|
self.mediadev_initialized = True
|
|
|
|
if self.mediadev_error:
|
|
|
|
logging.debug(self.mediadev_error)
|
2009-12-01 00:56:41 +08:00
|
|
|
else:
|
2009-12-11 09:04:26 +08:00
|
|
|
if self.mediadev_use_libvirt:
|
|
|
|
logging.debug("Using libvirt API for mediadev enumeration")
|
2009-12-01 05:16:43 +08:00
|
|
|
else:
|
2009-12-11 09:04:26 +08:00
|
|
|
logging.debug("Using HAL for mediadev enumeration")
|
2009-11-26 06:07:12 +08:00
|
|
|
|
2013-04-16 07:34:10 +08:00
|
|
|
|
2009-11-26 06:07:12 +08:00
|
|
|
########################
|
|
|
|
# General data getters #
|
|
|
|
########################
|
2009-11-26 05:12:03 +08:00
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
def get_uri(self):
|
2011-05-04 01:11:33 +08:00
|
|
|
return self._uri
|
2013-07-05 20:59:58 +08:00
|
|
|
def get_backend(self):
|
|
|
|
return self._backend
|
2009-03-10 04:16:45 +08:00
|
|
|
|
2010-12-09 01:17:14 +08:00
|
|
|
def invalidate_caps(self):
|
2013-07-07 02:12:13 +08:00
|
|
|
return self._backend.invalidate_caps()
|
|
|
|
caps = property(lambda self: getattr(self, "_backend").caps)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
def get_host_info(self):
|
|
|
|
return self.hostinfo
|
|
|
|
|
|
|
|
def pretty_host_memory_size(self):
|
2013-07-05 20:59:58 +08:00
|
|
|
if not self._backend.is_open():
|
2009-11-26 05:13:46 +08:00
|
|
|
return ""
|
2011-04-11 05:08:23 +08:00
|
|
|
return util.pretty_mem(self.host_memory_size())
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
def host_memory_size(self):
|
2013-07-05 20:59:58 +08:00
|
|
|
if not self._backend.is_open():
|
2009-11-26 05:13:46 +08:00
|
|
|
return 0
|
2010-12-11 00:47:07 +08:00
|
|
|
return self.hostinfo[1] * 1024
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
def host_architecture(self):
|
2013-07-05 20:59:58 +08:00
|
|
|
if not self._backend.is_open():
|
2009-11-26 05:13:46 +08:00
|
|
|
return ""
|
|
|
|
return self.hostinfo[0]
|
|
|
|
|
|
|
|
def host_active_processor_count(self):
|
2013-07-05 20:59:58 +08:00
|
|
|
if not self._backend.is_open():
|
2009-11-26 05:13:46 +08:00
|
|
|
return 0
|
|
|
|
return self.hostinfo[2]
|
|
|
|
|
|
|
|
def host_maximum_processor_count(self):
|
2013-07-05 20:59:58 +08:00
|
|
|
if not self._backend.is_open():
|
2009-11-26 05:13:46 +08:00
|
|
|
return 0
|
|
|
|
return (self.hostinfo[4] * self.hostinfo[5] *
|
|
|
|
self.hostinfo[6] * self.hostinfo[7])
|
|
|
|
|
2009-12-01 00:56:41 +08:00
|
|
|
def connect(self, name, callback, *args):
|
2011-04-12 01:06:59 +08:00
|
|
|
handle_id = vmmGObject.connect(self, name, callback, *args)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
if name == "vm-added":
|
|
|
|
for uuid in self.vms.keys():
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("vm-added", uuid)
|
2009-12-11 09:04:26 +08:00
|
|
|
elif name == "mediadev-added":
|
|
|
|
for dev in self.mediadevs.values():
|
|
|
|
self.emit("mediadev-added", dev)
|
2009-12-01 05:16:43 +08:00
|
|
|
elif name == "nodedev-added":
|
|
|
|
for key in self.nodedevs.keys():
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("nodedev-added", key)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
return handle_id
|
|
|
|
|
|
|
|
##########################
|
|
|
|
# URI + hostname helpers #
|
|
|
|
##########################
|
2009-11-16 04:17:03 +08:00
|
|
|
|
|
|
|
def get_qualified_hostname(self):
|
2013-07-06 23:20:28 +08:00
|
|
|
if self.check_conn_support(self._backend.SUPPORT_CONN_GETHOSTNAME):
|
2013-07-05 20:59:58 +08:00
|
|
|
return self._backend.getHostname()
|
2009-11-16 04:17:03 +08:00
|
|
|
|
|
|
|
uri_hostname = self.get_uri_hostname()
|
|
|
|
if self.is_remote() and uri_hostname.lower() != "localhost":
|
|
|
|
return uri_hostname
|
|
|
|
|
|
|
|
# This can throw an exception, so beware when calling!
|
2011-04-29 05:11:04 +08:00
|
|
|
return socket.gethostbyaddr(socket.gethostname())[0]
|
2009-11-16 04:17:03 +08:00
|
|
|
|
2007-09-10 10:57:24 +08:00
|
|
|
def get_short_hostname(self):
|
|
|
|
hostname = self.get_hostname()
|
|
|
|
offset = hostname.find(".")
|
|
|
|
if offset > 0 and not hostname[0].isdigit():
|
|
|
|
return hostname[0:offset]
|
|
|
|
return hostname
|
|
|
|
|
|
|
|
def get_hostname(self, resolveLocal=False):
|
2010-12-10 00:22:35 +08:00
|
|
|
ignore = resolveLocal
|
2009-11-16 04:17:03 +08:00
|
|
|
try:
|
|
|
|
return self.get_qualified_hostname()
|
|
|
|
except:
|
|
|
|
return self.get_uri_hostname()
|
2006-11-06 23:34:54 +08:00
|
|
|
|
2013-07-06 08:36:28 +08:00
|
|
|
get_uri_hostname = property(lambda s:
|
|
|
|
getattr(s, "_backend").get_uri_hostname)
|
|
|
|
get_transport = property(lambda s:
|
|
|
|
getattr(s, "_backend").get_uri_transport)
|
|
|
|
get_driver = property(lambda s: getattr(s, "_backend").get_uri_driver)
|
|
|
|
is_container = property(lambda s: getattr(s, "_backend").is_container)
|
|
|
|
is_lxc = property(lambda s: getattr(s, "_backend").is_lxc)
|
|
|
|
is_openvz = property(lambda s: getattr(s, "_backend").is_openvz)
|
|
|
|
is_xen = property(lambda s: getattr(s, "_backend").is_xen)
|
|
|
|
is_remote = property(lambda s: getattr(s, "_backend").is_remote)
|
|
|
|
is_qemu = property(lambda s: getattr(s, "_backend").is_qemu)
|
|
|
|
is_qemu_system = property(lambda s: getattr(s, "_backend").is_qemu_system)
|
|
|
|
is_qemu_session = property(lambda s:
|
|
|
|
getattr(s, "_backend").is_qemu_session)
|
|
|
|
is_test_conn = property(lambda s: getattr(s, "_backend").is_test)
|
|
|
|
is_session_uri = property(lambda s: getattr(s, "_backend").is_session_uri)
|
2009-03-10 04:16:45 +08:00
|
|
|
|
2010-12-11 07:03:51 +08:00
|
|
|
|
2010-02-11 09:26:40 +08:00
|
|
|
# Connection capabilities debug helpers
|
2011-07-23 01:13:26 +08:00
|
|
|
def rhel6_defaults(self, emulator):
|
2011-07-23 00:22:09 +08:00
|
|
|
if not self.is_qemu_system():
|
|
|
|
return True
|
|
|
|
if not str(emulator).startswith("/usr/libexec"):
|
|
|
|
return True
|
2011-07-23 01:13:26 +08:00
|
|
|
return self.config.rhel6_defaults
|
2011-07-23 00:22:09 +08:00
|
|
|
|
2011-09-27 06:53:00 +08:00
|
|
|
def rhel6_defaults_caps(self):
|
2013-07-07 02:12:13 +08:00
|
|
|
for guest in self.caps.guests:
|
2011-09-27 06:53:00 +08:00
|
|
|
for dom in guest.domains:
|
|
|
|
if dom.emulator.startswith("/usr/libexec"):
|
|
|
|
return self.config.rhel6_defaults
|
|
|
|
return True
|
|
|
|
|
2010-02-11 09:26:40 +08:00
|
|
|
|
2013-07-07 02:12:13 +08:00
|
|
|
####################################
|
|
|
|
# Connection pretty print routines #
|
|
|
|
####################################
|
2010-02-11 09:26:40 +08:00
|
|
|
|
2010-12-12 12:51:20 +08:00
|
|
|
def _get_pretty_desc(self, active, shorthost, show_trans):
|
2010-01-07 02:09:33 +08:00
|
|
|
def match_whole_string(orig, reg):
|
|
|
|
match = re.match(reg, orig)
|
|
|
|
if not match:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return ((match.end() - match.start()) == len(orig))
|
|
|
|
|
|
|
|
def is_ip_addr(orig):
|
|
|
|
return match_whole_string(orig, "[0-9.]+")
|
|
|
|
|
2010-12-12 12:51:20 +08:00
|
|
|
(scheme, username, hostname,
|
2013-07-06 08:36:28 +08:00
|
|
|
path, ignore, ignore) = virtinst.util.uri_split(self.get_uri())
|
2009-03-10 04:16:45 +08:00
|
|
|
|
2009-07-21 02:47:50 +08:00
|
|
|
hv = ""
|
|
|
|
rest = ""
|
2010-12-12 12:51:20 +08:00
|
|
|
transport = ""
|
|
|
|
port = ""
|
|
|
|
if scheme.count("+"):
|
|
|
|
transport = scheme.split("+")[1]
|
|
|
|
scheme = scheme.split("+")[0]
|
2009-03-10 04:16:45 +08:00
|
|
|
|
2010-01-07 02:09:33 +08:00
|
|
|
if hostname.count(":"):
|
2010-12-12 12:51:20 +08:00
|
|
|
port = hostname.split(":")[1]
|
2010-01-07 02:09:33 +08:00
|
|
|
hostname = hostname.split(":")[0]
|
|
|
|
|
2009-07-29 07:21:56 +08:00
|
|
|
if hostname:
|
2010-01-07 02:09:33 +08:00
|
|
|
if shorthost and not is_ip_addr(hostname):
|
2009-07-29 07:21:56 +08:00
|
|
|
rest = hostname.split(".")[0]
|
|
|
|
else:
|
|
|
|
rest = hostname
|
|
|
|
else:
|
|
|
|
rest = "localhost"
|
|
|
|
|
2011-06-08 07:41:02 +08:00
|
|
|
pretty_map = {
|
|
|
|
"esx" : "ESX",
|
|
|
|
"gsx" : "GSX",
|
|
|
|
"libxl" : "libxl",
|
|
|
|
"lxc" : "LXC",
|
|
|
|
"openvz" : "OpenVZ",
|
|
|
|
"phyp" : "phyp",
|
|
|
|
"qemu" : "QEMU",
|
|
|
|
"test" : "test",
|
|
|
|
"uml" : "UML",
|
|
|
|
"vbox" : "VBox",
|
|
|
|
"vmware" : "VMWare",
|
|
|
|
"xen" : "xen",
|
|
|
|
"xenapi" : "XenAPI",
|
|
|
|
}
|
|
|
|
|
|
|
|
hv = scheme
|
|
|
|
if scheme in pretty_map:
|
|
|
|
hv = pretty_map[scheme]
|
|
|
|
|
2013-07-07 02:12:13 +08:00
|
|
|
if hv == "QEMU" and active and self.caps.is_kvm_available():
|
2011-06-08 07:41:02 +08:00
|
|
|
hv += "/KVM"
|
2009-03-10 04:16:45 +08:00
|
|
|
|
2010-12-12 12:51:20 +08:00
|
|
|
if show_trans:
|
|
|
|
if transport:
|
|
|
|
hv += "+" + transport
|
|
|
|
if username:
|
|
|
|
hostname = username + "@" + hostname
|
|
|
|
if port:
|
|
|
|
hostname += ":" + port
|
|
|
|
|
2009-12-03 23:22:54 +08:00
|
|
|
if path and path != "/system" and path != "/":
|
2009-07-21 02:47:50 +08:00
|
|
|
if path == "/session":
|
2009-07-29 07:21:56 +08:00
|
|
|
hv += " Usermode"
|
2009-07-21 02:47:50 +08:00
|
|
|
else:
|
2009-07-29 07:21:56 +08:00
|
|
|
hv += " %s" % os.path.basename(path)
|
2009-07-21 02:47:50 +08:00
|
|
|
|
2009-07-29 07:21:56 +08:00
|
|
|
return "%s (%s)" % (rest, hv)
|
2009-07-21 02:47:50 +08:00
|
|
|
|
2010-12-12 12:51:20 +08:00
|
|
|
def get_pretty_desc_inactive(self, shorthost=True, transport=False):
|
|
|
|
return self._get_pretty_desc(False, shorthost, transport)
|
2009-07-21 02:47:50 +08:00
|
|
|
|
2010-12-12 12:51:20 +08:00
|
|
|
def get_pretty_desc_active(self, shorthost=True, transport=False):
|
|
|
|
return self._get_pretty_desc(True, shorthost, transport)
|
2009-03-10 04:16:45 +08:00
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
#######################
|
|
|
|
# API support helpers #
|
|
|
|
#######################
|
|
|
|
|
2013-07-06 23:20:28 +08:00
|
|
|
for _supportname in [_supportname for _supportname in
|
|
|
|
dir(virtinst.VirtualConnection) if
|
|
|
|
_supportname.startswith("SUPPORT_")]:
|
|
|
|
locals()[_supportname] = getattr(virtinst.VirtualConnection,
|
|
|
|
_supportname)
|
|
|
|
def check_conn_support(self, *args):
|
|
|
|
return self._backend.check_conn_support(*args)
|
|
|
|
def check_conn_hv_support(self, *args):
|
|
|
|
return self._backend.check_conn_hv_support(*args)
|
|
|
|
def check_domain_support(self, *args):
|
|
|
|
return self._backend.check_domain_support(*args)
|
|
|
|
def check_pool_support(self, *args):
|
|
|
|
return self._backend.check_pool_support(*args)
|
|
|
|
def check_nodedev_support(self, *args):
|
|
|
|
return self._backend.check_nodedev_support(*args)
|
|
|
|
def check_interface_support(self, *args):
|
|
|
|
return self._backend.check_interface_support(*args)
|
|
|
|
def check_stream_support(self, *args):
|
|
|
|
return self._backend.check_stream_support(*args)
|
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
def is_storage_capable(self):
|
2012-11-08 21:15:02 +08:00
|
|
|
if self._storage_capable is None:
|
2013-07-06 23:20:28 +08:00
|
|
|
self._storage_capable = self.check_conn_support(
|
|
|
|
self._backend.SUPPORT_CONN_STORAGE)
|
2011-04-08 05:57:19 +08:00
|
|
|
if self._storage_capable is False:
|
|
|
|
logging.debug("Connection doesn't seem to support storage "
|
|
|
|
"APIs. Skipping all storage polling.")
|
|
|
|
else:
|
|
|
|
# Try to create the default storage pool
|
|
|
|
try:
|
|
|
|
util.build_default_pool(self)
|
|
|
|
except Exception, e:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("Building default pool failed: %s", str(e))
|
2011-04-08 05:57:19 +08:00
|
|
|
|
|
|
|
return self._storage_capable
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
def is_network_capable(self):
|
2013-07-07 05:24:51 +08:00
|
|
|
if self._network_capable is None:
|
|
|
|
self._network_capable = self.check_conn_support(
|
2013-07-06 23:20:28 +08:00
|
|
|
self._backend.SUPPORT_CONN_NETWORK)
|
2013-07-07 05:24:51 +08:00
|
|
|
if self._network_capable is False:
|
2011-04-10 09:51:50 +08:00
|
|
|
logging.debug("Connection doesn't seem to support network "
|
|
|
|
"APIs. Skipping all network polling.")
|
|
|
|
|
2013-07-07 05:24:51 +08:00
|
|
|
return self._network_capable
|
2011-04-10 09:51:50 +08:00
|
|
|
|
|
|
|
def is_interface_capable(self):
|
2013-07-07 05:24:51 +08:00
|
|
|
if self._interface_capable is None:
|
|
|
|
self._interface_capable = self.check_conn_support(
|
2013-07-06 23:20:28 +08:00
|
|
|
self._backend.SUPPORT_CONN_INTERFACE)
|
2013-07-07 05:24:51 +08:00
|
|
|
if self._interface_capable is False:
|
2011-04-10 09:51:50 +08:00
|
|
|
logging.debug("Connection doesn't seem to support interface "
|
|
|
|
"APIs. Skipping all interface polling.")
|
|
|
|
|
2013-07-07 05:24:51 +08:00
|
|
|
return self._interface_capable
|
2011-04-10 09:51:50 +08:00
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
def is_nodedev_capable(self):
|
2012-11-08 21:15:02 +08:00
|
|
|
if self._nodedev_capable is None:
|
2013-07-06 23:20:28 +08:00
|
|
|
self._nodedev_capable = self.check_conn_support(
|
|
|
|
self._backend.SUPPORT_CONN_NODEDEV)
|
2009-11-26 03:50:27 +08:00
|
|
|
return self._nodedev_capable
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2010-02-26 08:35:01 +08:00
|
|
|
def _get_flags_helper(self, obj, key, check_func):
|
2010-12-10 00:22:35 +08:00
|
|
|
ignore = obj
|
2010-02-26 08:35:01 +08:00
|
|
|
flags_dict = self._xml_flags.get(key)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2012-11-08 21:15:02 +08:00
|
|
|
if flags_dict is None:
|
2010-02-26 08:35:01 +08:00
|
|
|
# Flags already set
|
|
|
|
inact, act = check_func()
|
|
|
|
flags_dict = {}
|
|
|
|
flags_dict["active"] = act
|
|
|
|
flags_dict["inactive"] = inact
|
|
|
|
|
|
|
|
self._xml_flags[key] = flags_dict
|
|
|
|
|
|
|
|
active_flags = flags_dict["active"]
|
|
|
|
inactive_flags = flags_dict["inactive"]
|
|
|
|
|
|
|
|
return (inactive_flags, active_flags)
|
|
|
|
|
|
|
|
def get_dom_flags(self, vm):
|
|
|
|
key = "domain"
|
|
|
|
|
|
|
|
def check_func():
|
|
|
|
act = 0
|
|
|
|
inact = 0
|
|
|
|
|
2013-07-06 23:20:28 +08:00
|
|
|
if self.check_domain_support(vm,
|
|
|
|
self._backend.SUPPORT_DOMAIN_XML_INACTIVE):
|
2010-02-26 08:35:01 +08:00
|
|
|
inact = libvirt.VIR_DOMAIN_XML_INACTIVE
|
|
|
|
else:
|
|
|
|
logging.debug("Domain XML inactive flag not supported.")
|
|
|
|
|
2013-07-06 23:20:28 +08:00
|
|
|
if self.check_domain_support(vm,
|
|
|
|
self._backend.SUPPORT_DOMAIN_XML_SECURE):
|
2010-02-26 08:35:01 +08:00
|
|
|
inact |= libvirt.VIR_DOMAIN_XML_SECURE
|
|
|
|
act = libvirt.VIR_DOMAIN_XML_SECURE
|
|
|
|
else:
|
|
|
|
logging.debug("Domain XML secure flag not supported.")
|
|
|
|
|
2013-07-06 23:20:28 +08:00
|
|
|
if self.check_domain_support(vm,
|
|
|
|
self._backend.SUPPORT_DOMAIN_CPU_HOST_MODEL):
|
2013-04-18 15:47:33 +08:00
|
|
|
inact |= libvirt.VIR_DOMAIN_XML_UPDATE_CPU
|
|
|
|
act |= libvirt.VIR_DOMAIN_XML_UPDATE_CPU
|
|
|
|
else:
|
|
|
|
logging.debug("Domain XML update flag not supported.")
|
|
|
|
|
2010-02-26 08:35:01 +08:00
|
|
|
return inact, act
|
|
|
|
|
|
|
|
return self._get_flags_helper(vm, key, check_func)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2010-05-13 00:57:32 +08:00
|
|
|
def get_dom_managedsave_supported(self, vm):
|
2013-07-06 23:20:28 +08:00
|
|
|
key = self._backend.SUPPORT_DOMAIN_MANAGED_SAVE
|
2010-05-13 00:57:32 +08:00
|
|
|
if key not in self._support_dict:
|
2013-07-06 23:20:28 +08:00
|
|
|
val = self.check_domain_support(vm, key)
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("Connection managed save support: %s", val)
|
2010-05-13 00:57:32 +08:00
|
|
|
self._support_dict[key] = val
|
|
|
|
|
|
|
|
return self._support_dict[key]
|
|
|
|
|
2010-02-26 08:35:01 +08:00
|
|
|
def get_interface_flags(self, iface):
|
|
|
|
key = "interface"
|
|
|
|
|
|
|
|
def check_func():
|
|
|
|
act = 0
|
|
|
|
inact = 0
|
|
|
|
|
2013-07-06 23:20:28 +08:00
|
|
|
if self.check_interface_support(iface,
|
|
|
|
self._backend.SUPPORT_INTERFACE_XML_INACTIVE):
|
2010-02-26 08:35:01 +08:00
|
|
|
inact = libvirt.VIR_INTERFACE_XML_INACTIVE
|
|
|
|
else:
|
|
|
|
logging.debug("Interface XML inactive flag not supported.")
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2010-02-26 08:35:01 +08:00
|
|
|
return (inact, act)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2010-02-26 08:35:01 +08:00
|
|
|
return self._get_flags_helper(iface, key, check_func)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2013-07-06 23:20:28 +08:00
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
###################################
|
|
|
|
# Connection state getter/setters #
|
|
|
|
###################################
|
|
|
|
|
|
|
|
def _change_state(self, newstate):
|
|
|
|
if self.state != newstate:
|
|
|
|
self.state = newstate
|
|
|
|
self.emit("state-changed")
|
|
|
|
|
|
|
|
def get_state(self):
|
|
|
|
return self.state
|
|
|
|
|
|
|
|
def get_state_text(self):
|
|
|
|
if self.state == self.STATE_DISCONNECTED:
|
|
|
|
return _("Disconnected")
|
|
|
|
elif self.state == self.STATE_CONNECTING:
|
|
|
|
return _("Connecting")
|
|
|
|
elif self.state == self.STATE_ACTIVE:
|
2013-07-04 01:56:43 +08:00
|
|
|
return _("Active")
|
2009-11-26 05:13:46 +08:00
|
|
|
elif self.state == self.STATE_INACTIVE:
|
|
|
|
return _("Inactive")
|
|
|
|
else:
|
|
|
|
return _("Unknown")
|
|
|
|
|
|
|
|
def pause(self):
|
|
|
|
if self.state != self.STATE_ACTIVE:
|
|
|
|
return
|
|
|
|
self._change_state(self.STATE_INACTIVE)
|
|
|
|
|
|
|
|
def resume(self):
|
|
|
|
if self.state != self.STATE_INACTIVE:
|
|
|
|
return
|
|
|
|
self._change_state(self.STATE_ACTIVE)
|
|
|
|
|
|
|
|
def is_active(self):
|
|
|
|
return self.state == self.STATE_ACTIVE
|
|
|
|
|
|
|
|
def is_paused(self):
|
|
|
|
return self.state == self.STATE_INACTIVE
|
|
|
|
|
|
|
|
def is_disconnected(self):
|
|
|
|
return self.state == self.STATE_DISCONNECTED
|
|
|
|
|
|
|
|
def is_connecting(self):
|
|
|
|
return self.state == self.STATE_CONNECTING
|
|
|
|
|
|
|
|
#################################
|
|
|
|
# Libvirt object lookup methods #
|
|
|
|
#################################
|
2006-06-15 02:36:26 +08:00
|
|
|
|
2009-11-26 06:07:12 +08:00
|
|
|
def _build_libvirt_netdev_list(self):
|
|
|
|
bridges = []
|
|
|
|
netdev_list = {}
|
|
|
|
|
|
|
|
def interface_to_netdev(interface):
|
|
|
|
name = interface.get_name()
|
|
|
|
mac = interface.get_mac()
|
|
|
|
is_bridge = interface.is_bridge()
|
|
|
|
slave_names = interface.get_slave_names()
|
|
|
|
|
|
|
|
if is_bridge and slave_names:
|
|
|
|
bridges.append((name, slave_names))
|
|
|
|
else:
|
|
|
|
netdev_list[name] = vmmNetDevice(name, mac, is_bridge, None)
|
|
|
|
|
|
|
|
def nodedev_to_netdev(nodedev):
|
|
|
|
name = nodedev.interface
|
|
|
|
mac = nodedev.address
|
|
|
|
|
|
|
|
if name not in netdev_list.keys():
|
|
|
|
netdev_list[name] = vmmNetDevice(name, mac, False, None)
|
|
|
|
else:
|
|
|
|
# Believe this info over libvirt interface APIs, since
|
|
|
|
# this comes from the hardware
|
|
|
|
if mac:
|
|
|
|
netdev_list[name].mac = mac
|
|
|
|
|
|
|
|
for name, iface in self.interfaces.items():
|
|
|
|
interface_to_netdev(iface)
|
|
|
|
|
2011-04-10 07:31:55 +08:00
|
|
|
for nodedev in self.get_nodedevs("net"):
|
2009-11-26 06:07:12 +08:00
|
|
|
nodedev_to_netdev(nodedev)
|
|
|
|
|
|
|
|
# Mark NetDevices as bridged where appropriate
|
|
|
|
for bridge_name, slave_names in bridges:
|
|
|
|
for name, netdev in netdev_list.items():
|
|
|
|
if name not in slave_names:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# XXX: Can a physical device be in two bridges?
|
|
|
|
netdev.bridge = bridge_name
|
|
|
|
netdev.shared = True
|
|
|
|
break
|
|
|
|
|
|
|
|
# XXX: How to handle added/removed signals to clients?
|
|
|
|
return netdev_list
|
|
|
|
|
2006-06-15 04:20:06 +08:00
|
|
|
def get_vm(self, uuid):
|
|
|
|
return self.vms[uuid]
|
2007-03-22 00:28:36 +08:00
|
|
|
def get_net(self, uuid):
|
|
|
|
return self.nets[uuid]
|
2007-04-11 06:27:37 +08:00
|
|
|
def get_net_device(self, path):
|
2009-11-26 05:12:03 +08:00
|
|
|
return self.netdevs[path]
|
2008-08-08 05:37:16 +08:00
|
|
|
def get_pool(self, uuid):
|
|
|
|
return self.pools[uuid]
|
2009-11-20 06:35:27 +08:00
|
|
|
def get_interface(self, name):
|
|
|
|
return self.interfaces[name]
|
2009-12-01 05:16:43 +08:00
|
|
|
def get_nodedev(self, name):
|
|
|
|
return self.nodedevs[name]
|
2011-04-08 06:08:29 +08:00
|
|
|
def get_nodedevs(self, devtype=None, devcap=None):
|
2009-03-10 04:20:23 +08:00
|
|
|
retdevs = []
|
2011-04-10 07:32:30 +08:00
|
|
|
for dev in self.nodedevs.values():
|
|
|
|
vdev = dev.get_virtinst_obj()
|
2009-11-26 03:50:27 +08:00
|
|
|
if devtype and vdev.device_type != devtype:
|
2009-03-10 04:20:23 +08:00
|
|
|
continue
|
|
|
|
|
2009-11-26 03:50:27 +08:00
|
|
|
if devcap:
|
|
|
|
if (not hasattr(vdev, "capability_type") or
|
|
|
|
vdev.capability_type != devcap):
|
|
|
|
continue
|
|
|
|
|
2009-03-10 04:20:23 +08:00
|
|
|
retdevs.append(vdev)
|
|
|
|
|
|
|
|
return retdevs
|
|
|
|
|
2013-04-30 22:53:10 +08:00
|
|
|
def get_nodedevs_number(self, devtype, vendor, product):
|
|
|
|
count = 0
|
|
|
|
devs = self.get_nodedevs(devtype)
|
|
|
|
|
|
|
|
for dev in devs:
|
|
|
|
if vendor == dev.vendor_id and \
|
|
|
|
product == dev.product_id:
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
logging.debug("There are %d node devices with "
|
|
|
|
"vendorId: %s, productId: %s",
|
|
|
|
count, vendor, product)
|
|
|
|
|
|
|
|
return count
|
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
def get_net_by_name(self, name):
|
|
|
|
for net in self.nets.values():
|
|
|
|
if net.get_name() == name:
|
|
|
|
return net
|
2009-03-09 03:31:15 +08:00
|
|
|
|
2009-03-10 04:16:45 +08:00
|
|
|
def get_pool_by_path(self, path):
|
|
|
|
for pool in self.pools.values():
|
|
|
|
if pool.get_target_path() == path:
|
|
|
|
return pool
|
|
|
|
return None
|
|
|
|
|
2009-03-10 04:17:09 +08:00
|
|
|
def get_pool_by_name(self, name):
|
|
|
|
for p in self.pools.values():
|
|
|
|
if p.get_name() == name:
|
|
|
|
return p
|
|
|
|
return None
|
|
|
|
|
|
|
|
def get_vol_by_path(self, path):
|
|
|
|
for pool in self.pools.values():
|
|
|
|
for vol in pool.get_volumes().values():
|
|
|
|
if vol.get_path() == path:
|
|
|
|
return vol
|
|
|
|
return None
|
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
def list_vm_uuids(self):
|
|
|
|
return self.vms.keys()
|
|
|
|
def list_net_uuids(self):
|
|
|
|
return self.nets.keys()
|
|
|
|
def list_net_device_paths(self):
|
2009-11-26 06:07:12 +08:00
|
|
|
# Update netdev list
|
|
|
|
if self.netdev_use_libvirt:
|
|
|
|
self.netdevs = self._build_libvirt_netdev_list()
|
2009-11-26 05:13:46 +08:00
|
|
|
return self.netdevs.keys()
|
|
|
|
def list_pool_uuids(self):
|
|
|
|
return self.pools.keys()
|
|
|
|
def list_interface_names(self):
|
|
|
|
return self.interfaces.keys()
|
|
|
|
|
|
|
|
|
|
|
|
###################################
|
|
|
|
# Libvirt object creation methods #
|
|
|
|
###################################
|
|
|
|
|
|
|
|
def create_network(self, xml, start=True, autostart=True):
|
2009-12-12 02:45:53 +08:00
|
|
|
# Define network
|
2013-07-05 20:59:58 +08:00
|
|
|
net = self._backend.networkDefineXML(xml)
|
2009-12-12 02:45:53 +08:00
|
|
|
|
|
|
|
try:
|
|
|
|
if start:
|
|
|
|
net.create()
|
|
|
|
net.setAutostart(autostart)
|
|
|
|
except:
|
|
|
|
net.undefine()
|
|
|
|
raise
|
|
|
|
|
|
|
|
return net
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2011-03-24 04:56:12 +08:00
|
|
|
def rename_vm(self, domainobj, origxml, newxml):
|
|
|
|
# Undefine old domain
|
|
|
|
domainobj.delete()
|
|
|
|
|
|
|
|
newobj = None
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
# Redefine new domain
|
|
|
|
newobj = self.define_domain(newxml)
|
|
|
|
except Exception, renameerr:
|
|
|
|
try:
|
|
|
|
logging.exception("Error defining new name XML")
|
|
|
|
newobj = self.define_domain(origxml)
|
|
|
|
except Exception, fixerr:
|
|
|
|
logging.exception("Failed to redefine original domain!")
|
|
|
|
raise RuntimeError(
|
|
|
|
_("Domain rename failed. Attempting to recover also "
|
|
|
|
"failed.\n\n"
|
|
|
|
"Original error: %s\n\n"
|
|
|
|
"Recover error: %s" %
|
|
|
|
(str(renameerr), str(fixerr))))
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
if newobj:
|
|
|
|
# Reinsert handle into new domain
|
|
|
|
domainobj.change_name_backend(newobj)
|
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
def define_domain(self, xml):
|
2013-07-05 20:59:58 +08:00
|
|
|
return self._backend.defineXML(xml)
|
2010-02-26 08:35:01 +08:00
|
|
|
def define_interface(self, xml):
|
2013-07-05 20:59:58 +08:00
|
|
|
self._backend.interfaceDefineXML(xml, 0)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
def restore(self, frm):
|
2013-07-05 20:59:58 +08:00
|
|
|
self._backend.restore(frm)
|
2009-11-26 05:13:46 +08:00
|
|
|
try:
|
|
|
|
# FIXME: This isn't correct in the remote case. Why do we even
|
|
|
|
# do this? Seems like we should provide an option for this
|
|
|
|
# to the user.
|
|
|
|
os.remove(frm)
|
|
|
|
except:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("Couldn't remove save file '%s' used for restore",
|
2009-11-26 05:13:46 +08:00
|
|
|
frm)
|
|
|
|
|
|
|
|
####################
|
|
|
|
# Update listeners #
|
|
|
|
####################
|
|
|
|
|
2009-12-11 09:04:26 +08:00
|
|
|
# Generic media device helpers
|
|
|
|
def _remove_mediadev(self, key):
|
2011-04-12 04:19:47 +08:00
|
|
|
self.mediadevs[key].cleanup()
|
2009-12-11 09:04:26 +08:00
|
|
|
del(self.mediadevs[key])
|
|
|
|
self.emit("mediadev-removed", key)
|
|
|
|
def _add_mediadev(self, key, dev):
|
|
|
|
self.mediadevs[key] = dev
|
|
|
|
self.emit("mediadev-added", dev)
|
2009-12-01 05:16:43 +08:00
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
def _netdev_added(self, ignore, netdev):
|
|
|
|
name = netdev.get_name()
|
2010-12-11 00:47:07 +08:00
|
|
|
if name in self.netdevs:
|
2009-11-30 23:12:45 +08:00
|
|
|
return
|
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
self.netdevs[name] = netdev
|
|
|
|
|
2009-12-11 09:04:26 +08:00
|
|
|
# Optical HAL listener
|
2009-12-01 00:56:41 +08:00
|
|
|
def _optical_added(self, ignore, dev):
|
|
|
|
key = dev.get_key()
|
2010-12-11 00:47:07 +08:00
|
|
|
if key in self.mediadevs:
|
2009-12-01 00:56:41 +08:00
|
|
|
return
|
2009-11-26 05:13:46 +08:00
|
|
|
|
2009-12-11 09:04:26 +08:00
|
|
|
self._add_mediadev(key, dev)
|
2009-12-01 05:16:43 +08:00
|
|
|
|
2011-07-23 01:54:40 +08:00
|
|
|
def _nodedev_mediadev_added(self, ignore1, name):
|
2010-12-11 00:47:07 +08:00
|
|
|
if name in self.mediadevs:
|
2009-12-01 05:16:43 +08:00
|
|
|
return
|
|
|
|
|
|
|
|
vobj = self.get_nodedev(name)
|
2011-04-10 09:56:05 +08:00
|
|
|
mediadev = vmmMediaDevice.mediadev_from_nodedev(vobj)
|
2009-12-01 05:16:43 +08:00
|
|
|
if not mediadev:
|
|
|
|
return
|
|
|
|
|
2009-12-11 09:04:26 +08:00
|
|
|
self._add_mediadev(name, mediadev)
|
2009-12-01 05:16:43 +08:00
|
|
|
|
2011-07-23 01:54:40 +08:00
|
|
|
def _nodedev_mediadev_removed(self, ignore1, name):
|
2010-12-11 00:47:07 +08:00
|
|
|
if name not in self.mediadevs:
|
2009-12-01 05:16:43 +08:00
|
|
|
return
|
|
|
|
|
2009-12-11 09:04:26 +08:00
|
|
|
self._remove_mediadev(name)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
######################################
|
|
|
|
# Connection closing/opening methods #
|
|
|
|
######################################
|
|
|
|
|
|
|
|
def get_autoconnect(self):
|
2009-11-26 03:16:31 +08:00
|
|
|
return self.config.get_conn_autoconnect(self.get_uri())
|
|
|
|
def set_autoconnect(self, val):
|
|
|
|
self.config.set_conn_autoconnect(self.get_uri(), val)
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
def close(self):
|
2011-04-11 23:00:57 +08:00
|
|
|
def cleanup(devs):
|
|
|
|
for dev in devs.values():
|
|
|
|
dev.cleanup()
|
|
|
|
|
2013-07-05 20:59:58 +08:00
|
|
|
self._backend.close()
|
2011-04-11 23:00:57 +08:00
|
|
|
self.record = []
|
|
|
|
|
|
|
|
cleanup(self.nodedevs)
|
2011-03-23 22:07:23 +08:00
|
|
|
self.nodedevs = {}
|
2011-04-11 23:00:57 +08:00
|
|
|
|
|
|
|
cleanup(self.netdevs)
|
2011-03-23 22:07:23 +08:00
|
|
|
self.netdevs = {}
|
2011-04-11 23:00:57 +08:00
|
|
|
|
|
|
|
cleanup(self.mediadevs)
|
2011-03-23 22:07:23 +08:00
|
|
|
self.mediadevs = {}
|
2011-04-11 23:00:57 +08:00
|
|
|
|
|
|
|
cleanup(self.interfaces)
|
2011-03-23 22:07:23 +08:00
|
|
|
self.interfaces = {}
|
2011-04-11 23:00:57 +08:00
|
|
|
|
|
|
|
cleanup(self.pools)
|
2009-11-26 05:13:46 +08:00
|
|
|
self.pools = {}
|
2011-04-11 23:00:57 +08:00
|
|
|
|
|
|
|
cleanup(self.nets)
|
2011-03-23 22:07:23 +08:00
|
|
|
self.nets = {}
|
2011-04-11 23:00:57 +08:00
|
|
|
|
|
|
|
cleanup(self.vms)
|
2009-11-26 05:13:46 +08:00
|
|
|
self.vms = {}
|
2011-03-23 22:07:23 +08:00
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
self._change_state(self.STATE_DISCONNECTED)
|
|
|
|
|
2011-07-24 09:16:54 +08:00
|
|
|
def _cleanup(self):
|
2011-04-13 21:27:02 +08:00
|
|
|
self.close()
|
2011-07-24 03:01:30 +08:00
|
|
|
self.connectError = None
|
2011-04-13 21:27:02 +08:00
|
|
|
|
2010-11-30 07:00:35 +08:00
|
|
|
def open(self, sync=False):
|
2007-09-10 10:57:24 +08:00
|
|
|
if self.state != self.STATE_DISCONNECTED:
|
|
|
|
return
|
|
|
|
|
2010-11-30 07:00:35 +08:00
|
|
|
self.connectError = None
|
2009-07-12 09:23:16 +08:00
|
|
|
self._change_state(self.STATE_CONNECTING)
|
2007-09-11 08:56:01 +08:00
|
|
|
|
2010-11-30 07:00:35 +08:00
|
|
|
if sync:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("Opening connection synchronously: %s",
|
2011-05-04 01:11:33 +08:00
|
|
|
self.get_uri())
|
2010-11-30 07:00:35 +08:00
|
|
|
self._open_thread()
|
|
|
|
else:
|
2011-05-04 01:11:33 +08:00
|
|
|
logging.debug("Scheduling background open thread for " +
|
|
|
|
self.get_uri())
|
2010-12-11 00:47:07 +08:00
|
|
|
self.connectThread = threading.Thread(target=self._open_thread,
|
2011-05-04 01:11:33 +08:00
|
|
|
name="Connect %s" % self.get_uri())
|
2010-11-30 07:00:35 +08:00
|
|
|
self.connectThread.setDaemon(True)
|
|
|
|
self.connectThread.start()
|
2007-09-11 08:56:01 +08:00
|
|
|
|
2013-07-05 20:59:58 +08:00
|
|
|
def _do_creds_password(self, creds):
|
2008-02-01 00:39:10 +08:00
|
|
|
try:
|
2012-02-10 20:54:32 +08:00
|
|
|
return connectauth.creds_dialog(creds)
|
2009-04-04 02:58:51 +08:00
|
|
|
except Exception, e:
|
2008-02-01 00:39:10 +08:00
|
|
|
# Detailed error message, in English so it can be Googled.
|
2011-05-04 01:11:33 +08:00
|
|
|
self.connectError = (
|
|
|
|
"Failed to get credentials for '%s':\n%s\n%s" %
|
|
|
|
(self.get_uri(), str(e), "".join(traceback.format_exc())))
|
2008-02-01 00:39:10 +08:00
|
|
|
return -1
|
2007-09-11 08:56:01 +08:00
|
|
|
|
2009-01-27 00:10:39 +08:00
|
|
|
def _open_thread(self):
|
2011-05-04 01:41:53 +08:00
|
|
|
logging.debug("Background 'open connection' thread is running")
|
2009-01-27 00:10:39 +08:00
|
|
|
|
2011-07-24 03:01:30 +08:00
|
|
|
while True:
|
2011-08-31 02:50:29 +08:00
|
|
|
libexc = None
|
2011-07-24 03:01:30 +08:00
|
|
|
exc = None
|
|
|
|
tb = None
|
2012-01-28 07:31:21 +08:00
|
|
|
warnconsole = False
|
2011-07-24 03:01:30 +08:00
|
|
|
try:
|
2013-07-05 20:59:58 +08:00
|
|
|
self._backend.open(self._do_creds_password)
|
2011-08-31 02:50:29 +08:00
|
|
|
except libvirt.libvirtError, libexc:
|
|
|
|
tb = "".join(traceback.format_exc())
|
2011-07-24 03:01:30 +08:00
|
|
|
except Exception, exc:
|
|
|
|
tb = "".join(traceback.format_exc())
|
2009-01-27 00:10:39 +08:00
|
|
|
|
2011-08-31 02:50:29 +08:00
|
|
|
if libexc:
|
|
|
|
exc = libexc
|
|
|
|
|
2011-07-24 03:01:30 +08:00
|
|
|
if not exc:
|
2009-03-10 04:21:32 +08:00
|
|
|
self.state = self.STATE_ACTIVE
|
2011-07-24 03:01:30 +08:00
|
|
|
break
|
2009-03-10 04:21:32 +08:00
|
|
|
|
2010-08-25 02:55:00 +08:00
|
|
|
self.state = self.STATE_DISCONNECTED
|
2009-03-10 04:21:32 +08:00
|
|
|
|
2012-01-28 05:00:04 +08:00
|
|
|
if (libexc and
|
|
|
|
(libexc.get_error_code() ==
|
|
|
|
getattr(libvirt, "VIR_ERR_AUTH_CANCELLED", None))):
|
|
|
|
logging.debug("User cancelled auth, not raising any error.")
|
|
|
|
break
|
|
|
|
|
2012-01-28 07:31:21 +08:00
|
|
|
if (libexc and
|
|
|
|
libexc.get_error_code() == libvirt.VIR_ERR_AUTH_FAILED and
|
|
|
|
"not authorized" in libexc.get_error_message().lower()):
|
|
|
|
logging.debug("Looks like we might have failed policykit "
|
|
|
|
"auth. Checking to see if we have a valid "
|
|
|
|
"console session")
|
2012-02-10 20:54:32 +08:00
|
|
|
if (not self.is_remote() and
|
|
|
|
not connectauth.do_we_have_session()):
|
2012-01-28 07:31:21 +08:00
|
|
|
warnconsole = True
|
|
|
|
|
2011-08-31 02:50:29 +08:00
|
|
|
if (libexc and
|
|
|
|
libexc.get_error_code() == libvirt.VIR_ERR_AUTH_FAILED and
|
|
|
|
"GSSAPI Error" in libexc.get_error_message() and
|
|
|
|
"No credentials cache found" in libexc.get_error_message()):
|
2012-02-10 20:54:32 +08:00
|
|
|
if connectauth.acquire_tgt():
|
2010-08-25 02:55:00 +08:00
|
|
|
continue
|
2009-03-10 04:21:32 +08:00
|
|
|
|
2012-01-28 07:31:21 +08:00
|
|
|
self.connectError = (str(exc), tb, warnconsole)
|
2011-07-24 03:01:30 +08:00
|
|
|
break
|
2007-09-11 08:56:01 +08:00
|
|
|
|
2011-04-19 00:39:53 +08:00
|
|
|
# We want to kill off this thread asap, so schedule an
|
|
|
|
# idle event to inform the UI of result
|
2007-09-11 08:56:01 +08:00
|
|
|
logging.debug("Background open thread complete, scheduling notify")
|
2012-02-11 03:07:51 +08:00
|
|
|
self.idle_add(self._open_notify)
|
2007-09-11 08:56:01 +08:00
|
|
|
self.connectThread = None
|
|
|
|
|
|
|
|
def _open_notify(self):
|
|
|
|
logging.debug("Notifying open result")
|
2009-07-12 09:23:16 +08:00
|
|
|
|
2011-04-18 23:12:36 +08:00
|
|
|
self.idle_emit("state-changed")
|
2010-11-25 03:01:51 +08:00
|
|
|
|
|
|
|
if self.state == self.STATE_ACTIVE:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("%s capabilities:\n%s",
|
2013-07-07 02:12:13 +08:00
|
|
|
self.get_uri(), self.caps.xml)
|
2010-11-25 03:01:51 +08:00
|
|
|
self.tick()
|
|
|
|
|
|
|
|
if self.state == self.STATE_DISCONNECTED:
|
2012-01-28 05:00:04 +08:00
|
|
|
if self.connectError:
|
2012-01-28 07:31:21 +08:00
|
|
|
self.idle_emit("connect-error", *self.connectError)
|
2010-11-25 03:01:51 +08:00
|
|
|
self.connectError = None
|
2007-09-11 08:56:01 +08:00
|
|
|
|
2006-10-11 05:24:59 +08:00
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
#######################
|
|
|
|
# Tick/Update methods #
|
|
|
|
#######################
|
2006-06-14 22:59:40 +08:00
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
def _poll_helper(self,
|
|
|
|
origlist, typename, check_support,
|
|
|
|
active_list, inactive_list,
|
2013-07-07 05:49:42 +08:00
|
|
|
lookup_func, build_func):
|
|
|
|
"""
|
|
|
|
Helper routine for old style split API libvirt polling.
|
|
|
|
@origlist: Pre-existing mapping of objects, with key->obj mapping
|
|
|
|
objects must have an is_active and set_active API
|
|
|
|
@typename: string describing type of objects we are polling for use
|
|
|
|
in debug messages.
|
|
|
|
@active_list: Function that returns the list of active objects
|
|
|
|
@inactive_list: Function that returns the list of inactive objects
|
|
|
|
@lookup_func: Function to get an object handle for the passed name
|
|
|
|
@build_func: Function that builds a new object class. It is passed
|
|
|
|
args of (raw libvirt object, key (usually UUID), bool is_active)
|
|
|
|
"""
|
2009-11-20 06:35:27 +08:00
|
|
|
current = {}
|
|
|
|
start = []
|
|
|
|
stop = []
|
|
|
|
new = []
|
|
|
|
newActiveNames = []
|
|
|
|
newInactiveNames = []
|
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
if not check_support():
|
|
|
|
return (stop, start, origlist, new, current)
|
2009-11-20 06:35:27 +08:00
|
|
|
|
|
|
|
try:
|
2011-04-10 09:51:50 +08:00
|
|
|
newActiveNames = active_list()
|
2011-04-21 05:00:03 +08:00
|
|
|
except Exception, e:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("Unable to list active %ss: %s", typename, e)
|
2009-11-20 06:35:27 +08:00
|
|
|
try:
|
2011-04-10 09:51:50 +08:00
|
|
|
newInactiveNames = inactive_list()
|
2011-04-21 05:00:03 +08:00
|
|
|
except Exception, e:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("Unable to list inactive %ss: %s", typename, e)
|
2009-11-20 06:35:27 +08:00
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
def check_obj(key, is_active):
|
|
|
|
if key not in origlist:
|
2011-04-21 05:00:03 +08:00
|
|
|
try:
|
|
|
|
obj = lookup_func(key)
|
|
|
|
except Exception, e:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("Could not fetch %s '%s': %s",
|
|
|
|
typename, key, e)
|
2011-04-21 05:00:03 +08:00
|
|
|
return
|
|
|
|
|
2009-11-20 06:35:27 +08:00
|
|
|
# Object is brand new this tick period
|
2013-07-07 05:49:42 +08:00
|
|
|
current[key] = build_func(obj, key, is_active)
|
2009-11-20 06:35:27 +08:00
|
|
|
new.append(key)
|
|
|
|
|
|
|
|
if is_active:
|
|
|
|
start.append(key)
|
|
|
|
else:
|
|
|
|
# Previously known object, see if it changed state
|
2011-04-10 09:51:50 +08:00
|
|
|
current[key] = origlist[key]
|
2009-11-20 06:35:27 +08:00
|
|
|
|
|
|
|
if current[key].is_active() != is_active:
|
|
|
|
current[key].set_active(is_active)
|
|
|
|
|
|
|
|
if is_active:
|
|
|
|
start.append(key)
|
|
|
|
else:
|
|
|
|
stop.append(key)
|
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
del origlist[key]
|
2009-11-20 06:35:27 +08:00
|
|
|
|
|
|
|
for name in newActiveNames:
|
|
|
|
try:
|
|
|
|
check_obj(name, True)
|
2009-12-14 05:31:04 +08:00
|
|
|
except:
|
2009-11-20 06:35:27 +08:00
|
|
|
logging.exception("Couldn't fetch active "
|
2012-01-17 11:04:40 +08:00
|
|
|
"%s '%s'", typename, name)
|
2009-11-20 06:35:27 +08:00
|
|
|
|
|
|
|
for name in newInactiveNames:
|
|
|
|
try:
|
|
|
|
check_obj(name, False)
|
2009-12-14 05:31:04 +08:00
|
|
|
except:
|
2009-11-20 06:35:27 +08:00
|
|
|
logging.exception("Couldn't fetch inactive "
|
2012-01-17 11:04:40 +08:00
|
|
|
"%s '%s'", typename, name)
|
2009-11-26 03:50:27 +08:00
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
return (stop, start, origlist, new, current)
|
2009-11-26 03:50:27 +08:00
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
def _update_nets(self):
|
2011-10-14 04:59:15 +08:00
|
|
|
orig = self.nets.copy()
|
2011-04-10 09:51:50 +08:00
|
|
|
name = "network"
|
2013-07-05 20:59:58 +08:00
|
|
|
active_list = self._backend.listNetworks
|
|
|
|
inactive_list = self._backend.listDefinedNetworks
|
2011-04-10 09:51:50 +08:00
|
|
|
check_support = self.is_network_capable
|
2013-07-05 20:59:58 +08:00
|
|
|
lookup_func = self._backend.networkLookupByName
|
2013-07-07 05:49:42 +08:00
|
|
|
build_func = (lambda obj, key, is_active:
|
|
|
|
vmmNetwork(self, obj, key, is_active))
|
2011-04-10 09:51:50 +08:00
|
|
|
|
|
|
|
return self._poll_helper(orig, name, check_support,
|
|
|
|
active_list, inactive_list,
|
2013-07-07 05:49:42 +08:00
|
|
|
lookup_func, build_func)
|
2009-11-26 03:50:27 +08:00
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
def _update_pools(self):
|
2011-10-14 04:59:15 +08:00
|
|
|
orig = self.pools.copy()
|
2011-04-10 09:51:50 +08:00
|
|
|
name = "pool"
|
2013-07-05 20:59:58 +08:00
|
|
|
active_list = self._backend.listStoragePools
|
|
|
|
inactive_list = self._backend.listDefinedStoragePools
|
2011-04-10 09:51:50 +08:00
|
|
|
check_support = self.is_storage_capable
|
2013-07-05 20:59:58 +08:00
|
|
|
lookup_func = self._backend.storagePoolLookupByName
|
2013-07-07 05:49:42 +08:00
|
|
|
build_func = (lambda obj, key, is_active:
|
|
|
|
vmmStoragePool(self, obj, key, is_active))
|
2011-04-10 09:51:50 +08:00
|
|
|
|
|
|
|
return self._poll_helper(orig, name, check_support,
|
|
|
|
active_list, inactive_list,
|
2013-07-07 05:49:42 +08:00
|
|
|
lookup_func, build_func)
|
2009-11-26 03:50:27 +08:00
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
def _update_interfaces(self):
|
2011-10-14 04:59:15 +08:00
|
|
|
orig = self.interfaces.copy()
|
2011-04-10 09:51:50 +08:00
|
|
|
name = "interface"
|
2013-07-05 20:59:58 +08:00
|
|
|
active_list = self._backend.listInterfaces
|
|
|
|
inactive_list = self._backend.listDefinedInterfaces
|
2011-04-10 09:51:50 +08:00
|
|
|
check_support = self.is_interface_capable
|
2013-07-05 20:59:58 +08:00
|
|
|
lookup_func = self._backend.interfaceLookupByName
|
2013-07-07 05:49:42 +08:00
|
|
|
build_func = (lambda obj, key, is_active:
|
|
|
|
vmmInterface(self, obj, key, is_active))
|
2009-11-26 03:50:27 +08:00
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
return self._poll_helper(orig, name, check_support,
|
|
|
|
active_list, inactive_list,
|
2013-07-07 05:49:42 +08:00
|
|
|
lookup_func, build_func)
|
2009-11-26 03:50:27 +08:00
|
|
|
|
|
|
|
|
2011-04-10 09:51:50 +08:00
|
|
|
def _update_nodedevs(self):
|
2011-10-14 04:59:15 +08:00
|
|
|
orig = self.nodedevs.copy()
|
2011-04-10 09:51:50 +08:00
|
|
|
name = "nodedev"
|
2013-07-05 20:59:58 +08:00
|
|
|
active_list = lambda: self._backend.listDevices(None, 0)
|
2011-04-10 09:51:50 +08:00
|
|
|
inactive_list = lambda: []
|
|
|
|
check_support = self.is_nodedev_capable
|
2013-07-05 20:59:58 +08:00
|
|
|
lookup_func = self._backend.nodeDeviceLookupByName
|
2013-07-07 05:49:42 +08:00
|
|
|
build_func = lambda obj, key, ignore: vmmNodeDevice(self, obj, key)
|
2011-04-10 09:51:50 +08:00
|
|
|
|
|
|
|
return self._poll_helper(orig, name, check_support,
|
|
|
|
active_list, inactive_list,
|
2013-07-07 05:49:42 +08:00
|
|
|
lookup_func, build_func)
|
2009-11-20 06:35:27 +08:00
|
|
|
|
2008-07-10 05:00:49 +08:00
|
|
|
def _update_vms(self):
|
2013-07-07 05:49:42 +08:00
|
|
|
# We can't easily use _poll_helper here because the domain API
|
|
|
|
# doesn't always return names like other objects, it returns
|
|
|
|
# IDs for active VMs
|
|
|
|
|
2011-04-10 11:51:43 +08:00
|
|
|
newActiveIDs = []
|
|
|
|
newInactiveNames = []
|
2006-10-10 01:28:13 +08:00
|
|
|
oldActiveIDs = {}
|
|
|
|
oldInactiveNames = {}
|
2011-04-10 11:51:43 +08:00
|
|
|
|
2011-10-14 04:59:15 +08:00
|
|
|
origlist = self.vms.copy()
|
2011-04-10 11:51:43 +08:00
|
|
|
current = {}
|
|
|
|
new = []
|
|
|
|
|
2011-04-11 05:41:56 +08:00
|
|
|
# Build list of previous vms with proper id/name mappings
|
2011-04-11 06:26:40 +08:00
|
|
|
for uuid in origlist:
|
|
|
|
vm = origlist[uuid]
|
2011-04-10 10:40:22 +08:00
|
|
|
if vm.is_active():
|
2006-10-10 01:28:13 +08:00
|
|
|
oldActiveIDs[vm.get_id()] = vm
|
2011-04-11 05:41:56 +08:00
|
|
|
else:
|
|
|
|
oldInactiveNames[vm.get_name()] = vm
|
2006-10-10 01:28:13 +08:00
|
|
|
|
2008-03-14 22:15:27 +08:00
|
|
|
try:
|
2013-07-05 20:59:58 +08:00
|
|
|
newActiveIDs = self._backend.listDomainsID()
|
2011-04-21 05:00:03 +08:00
|
|
|
except Exception, e:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.debug("Unable to list active domains: %s", e)
|
2008-03-14 22:15:27 +08:00
|
|
|
|
2006-10-12 00:05:50 +08:00
|
|
|
try:
|
2013-07-05 20:59:58 +08:00
|
|
|
newInactiveNames = self._backend.listDefinedDomains()
|
2011-04-21 05:00:03 +08:00
|
|
|
except Exception, e:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.exception("Unable to list inactive domains: %s", e)
|
2006-10-10 01:28:13 +08:00
|
|
|
|
2011-04-11 06:26:40 +08:00
|
|
|
def add_vm(vm):
|
|
|
|
uuid = vm.get_uuid()
|
|
|
|
|
|
|
|
current[uuid] = vm
|
|
|
|
del(origlist[uuid])
|
|
|
|
|
|
|
|
def check_new(rawvm, uuid):
|
|
|
|
if uuid in origlist:
|
|
|
|
vm = origlist[uuid]
|
|
|
|
del(origlist[uuid])
|
|
|
|
else:
|
|
|
|
vm = vmmDomain(self, rawvm, uuid)
|
|
|
|
new.append(uuid)
|
|
|
|
|
|
|
|
current[uuid] = vm
|
|
|
|
|
2011-04-10 11:51:43 +08:00
|
|
|
for _id in newActiveIDs:
|
|
|
|
if _id in oldActiveIDs:
|
|
|
|
# No change, copy across existing VM object
|
|
|
|
vm = oldActiveIDs[_id]
|
2011-04-11 06:26:40 +08:00
|
|
|
add_vm(vm)
|
2011-04-10 11:51:43 +08:00
|
|
|
else:
|
2011-04-11 06:26:40 +08:00
|
|
|
# Check if domain is brand new, or old one that changed state
|
2011-04-10 11:51:43 +08:00
|
|
|
try:
|
2013-07-05 20:59:58 +08:00
|
|
|
vm = self._backend.lookupByID(_id)
|
2011-04-10 11:51:43 +08:00
|
|
|
uuid = util.uuidstr(vm.UUID())
|
2011-04-11 06:26:40 +08:00
|
|
|
|
|
|
|
check_new(vm, uuid)
|
2011-04-10 11:51:43 +08:00
|
|
|
except:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.exception("Couldn't fetch domain id '%s'", _id)
|
2006-10-10 01:28:13 +08:00
|
|
|
|
2011-04-11 06:26:40 +08:00
|
|
|
|
2011-04-10 11:51:43 +08:00
|
|
|
for name in newInactiveNames:
|
|
|
|
if name in oldInactiveNames:
|
|
|
|
# No change, copy across existing VM object
|
|
|
|
vm = oldInactiveNames[name]
|
2011-04-11 06:26:40 +08:00
|
|
|
add_vm(vm)
|
2011-04-10 11:51:43 +08:00
|
|
|
else:
|
2011-04-11 06:26:40 +08:00
|
|
|
# Check if domain is brand new, or old one that changed state
|
2011-04-10 11:51:43 +08:00
|
|
|
try:
|
2013-07-05 20:59:58 +08:00
|
|
|
vm = self._backend.lookupByName(name)
|
2011-04-10 11:51:43 +08:00
|
|
|
uuid = util.uuidstr(vm.UUID())
|
2011-04-11 06:26:40 +08:00
|
|
|
|
|
|
|
check_new(vm, uuid)
|
2011-04-10 11:51:43 +08:00
|
|
|
except:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.exception("Couldn't fetch domain '%s'", name)
|
2006-10-10 01:28:13 +08:00
|
|
|
|
2011-04-16 02:13:05 +08:00
|
|
|
return (new, origlist, current)
|
2006-10-10 01:28:13 +08:00
|
|
|
|
2008-07-10 05:00:49 +08:00
|
|
|
def tick(self, noStatsUpdate=False):
|
2012-01-30 01:07:39 +08:00
|
|
|
try:
|
|
|
|
self._ticklock.acquire()
|
|
|
|
self._tick(noStatsUpdate)
|
|
|
|
finally:
|
|
|
|
self._ticklock.release()
|
|
|
|
|
|
|
|
def _tick(self, noStatsUpdate=False):
|
2008-07-10 05:00:49 +08:00
|
|
|
""" main update function: polls for new objects, updates stats, ..."""
|
|
|
|
if self.state != self.STATE_ACTIVE:
|
|
|
|
return
|
2006-10-10 01:28:13 +08:00
|
|
|
|
2013-07-05 20:59:58 +08:00
|
|
|
self.hostinfo = self._backend.getInfo()
|
2008-10-07 01:21:06 +08:00
|
|
|
|
2008-07-10 05:00:49 +08:00
|
|
|
# Poll for new virtual network objects
|
2011-04-12 04:19:47 +08:00
|
|
|
(startNets, stopNets, oldNets,
|
|
|
|
newNets, self.nets) = self._update_nets()
|
2006-06-14 22:59:40 +08:00
|
|
|
|
2008-08-08 05:37:16 +08:00
|
|
|
# Update pools
|
|
|
|
(stopPools, startPools, oldPools,
|
|
|
|
newPools, self.pools) = self._update_pools()
|
|
|
|
|
2009-11-20 06:35:27 +08:00
|
|
|
# Update interfaces
|
|
|
|
(stopInterfaces, startInterfaces, oldInterfaces,
|
|
|
|
newInterfaces, self.interfaces) = self._update_interfaces()
|
|
|
|
|
2009-11-26 03:50:27 +08:00
|
|
|
# Update nodedevice list
|
2011-04-10 09:51:50 +08:00
|
|
|
(ignore, ignore, oldNodedevs,
|
|
|
|
newNodedevs, self.nodedevs) = self._update_nodedevs()
|
2009-11-26 03:50:27 +08:00
|
|
|
|
2008-07-10 05:00:49 +08:00
|
|
|
# Poll for changed/new/removed VMs
|
2011-04-16 02:13:05 +08:00
|
|
|
(newVMs, oldVMs, self.vms) = self._update_vms()
|
2008-07-10 05:00:49 +08:00
|
|
|
|
2009-07-12 09:23:16 +08:00
|
|
|
def tick_send_signals():
|
|
|
|
"""
|
|
|
|
Responsible for signaling the UI for any updates. All possible UI
|
|
|
|
updates need to go here to enable threading that doesn't block the
|
|
|
|
app with long tick operations.
|
|
|
|
"""
|
2010-02-12 04:25:41 +08:00
|
|
|
# Connection closed out from under us
|
2013-07-05 20:59:58 +08:00
|
|
|
if not self._backend.is_open():
|
2010-02-12 04:25:41 +08:00
|
|
|
return
|
|
|
|
|
2010-02-09 00:44:04 +08:00
|
|
|
# Make sure device polling is setup
|
|
|
|
if not self.netdev_initialized:
|
|
|
|
self._init_netdev()
|
|
|
|
|
|
|
|
if not self.mediadev_initialized:
|
|
|
|
self._init_mediadev()
|
2009-07-12 09:23:16 +08:00
|
|
|
|
|
|
|
# Update VM states
|
|
|
|
for uuid in oldVMs:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("vm-removed", uuid)
|
2011-04-12 04:19:47 +08:00
|
|
|
oldVMs[uuid].cleanup()
|
2009-07-12 09:23:16 +08:00
|
|
|
for uuid in newVMs:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("vm-added", uuid)
|
2009-07-12 09:23:16 +08:00
|
|
|
|
|
|
|
# Update virtual network states
|
|
|
|
for uuid in oldNets:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("net-removed", uuid)
|
2011-04-12 04:19:47 +08:00
|
|
|
oldNets[uuid].cleanup()
|
2009-07-12 09:23:16 +08:00
|
|
|
for uuid in newNets:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("net-added", uuid)
|
2009-07-12 09:23:16 +08:00
|
|
|
for uuid in startNets:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("net-started", uuid)
|
2009-07-12 09:23:16 +08:00
|
|
|
for uuid in stopNets:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("net-stopped", uuid)
|
2009-07-12 09:23:16 +08:00
|
|
|
|
2009-11-20 06:35:27 +08:00
|
|
|
# Update storage pool states
|
2009-07-12 09:23:16 +08:00
|
|
|
for uuid in oldPools:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("pool-removed", uuid)
|
2011-04-12 04:19:47 +08:00
|
|
|
oldPools[uuid].cleanup()
|
2009-07-12 09:23:16 +08:00
|
|
|
for uuid in newPools:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("pool-added", uuid)
|
2009-07-12 09:23:16 +08:00
|
|
|
for uuid in startPools:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("pool-started", uuid)
|
2009-07-12 09:23:16 +08:00
|
|
|
for uuid in stopPools:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("pool-stopped", uuid)
|
2009-07-12 09:23:16 +08:00
|
|
|
|
2009-11-20 06:35:27 +08:00
|
|
|
# Update interface states
|
|
|
|
for name in oldInterfaces:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("interface-removed", name)
|
2011-04-13 00:05:36 +08:00
|
|
|
oldInterfaces[name].cleanup()
|
2009-11-20 06:35:27 +08:00
|
|
|
for name in newInterfaces:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("interface-added", name)
|
2009-11-20 06:35:27 +08:00
|
|
|
for name in startInterfaces:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("interface-started", name)
|
2009-11-20 06:35:27 +08:00
|
|
|
for name in stopInterfaces:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("interface-stopped", name)
|
2009-11-20 06:35:27 +08:00
|
|
|
|
2009-11-26 03:50:27 +08:00
|
|
|
# Update nodedev list
|
|
|
|
for name in oldNodedevs:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("nodedev-removed", name)
|
2011-04-13 00:05:36 +08:00
|
|
|
oldNodedevs[name].cleanup()
|
2009-11-26 03:50:27 +08:00
|
|
|
for name in newNodedevs:
|
2011-07-23 01:54:40 +08:00
|
|
|
self.emit("nodedev-added", name)
|
2009-11-20 06:35:27 +08:00
|
|
|
|
2012-02-11 03:07:51 +08:00
|
|
|
self.idle_add(tick_send_signals)
|
2008-08-08 05:37:16 +08:00
|
|
|
|
2006-10-10 01:28:13 +08:00
|
|
|
# Finally, we sample each domain
|
2011-04-29 05:11:04 +08:00
|
|
|
now = time.time()
|
2006-06-14 22:59:40 +08:00
|
|
|
|
2006-10-03 23:53:07 +08:00
|
|
|
updateVMs = self.vms
|
|
|
|
if noStatsUpdate:
|
2008-07-10 05:00:49 +08:00
|
|
|
updateVMs = newVMs
|
2006-10-03 23:53:07 +08:00
|
|
|
|
2008-07-10 05:00:49 +08:00
|
|
|
for uuid in updateVMs:
|
2009-09-17 04:02:19 +08:00
|
|
|
vm = self.vms[uuid]
|
|
|
|
try:
|
|
|
|
vm.tick(now)
|
|
|
|
except Exception, e:
|
2012-01-17 11:04:40 +08:00
|
|
|
logging.exception("Tick for VM '%s' failed", vm.get_name())
|
2012-01-30 00:26:24 +08:00
|
|
|
if (isinstance(e, libvirt.libvirtError) and
|
2012-02-01 08:07:32 +08:00
|
|
|
(getattr(e, "get_error_code")() ==
|
|
|
|
libvirt.VIR_ERR_SYSTEM_ERROR)):
|
2012-01-30 00:26:24 +08:00
|
|
|
# Try a simple getInfo call to see if conn was dropped
|
2013-07-05 20:59:58 +08:00
|
|
|
self._backend.getInfo()
|
2012-01-30 00:26:24 +08:00
|
|
|
logging.debug("vm tick raised system error but "
|
|
|
|
"connection doesn't seem to have dropped. "
|
|
|
|
"Ignoring.")
|
2007-03-28 07:52:00 +08:00
|
|
|
|
2012-02-09 03:15:15 +08:00
|
|
|
for dev in self.mediadevs.values():
|
|
|
|
dev.tick()
|
|
|
|
|
2007-03-28 07:52:00 +08:00
|
|
|
if not noStatsUpdate:
|
2011-04-11 06:26:40 +08:00
|
|
|
self._recalculate_stats(now, updateVMs)
|
2007-03-28 07:52:00 +08:00
|
|
|
|
2011-04-18 23:12:36 +08:00
|
|
|
self.idle_emit("resources-sampled")
|
2009-07-12 09:23:16 +08:00
|
|
|
|
2006-06-14 22:59:40 +08:00
|
|
|
return 1
|
|
|
|
|
2011-04-11 06:26:40 +08:00
|
|
|
def _recalculate_stats(self, now, vms):
|
2013-07-05 20:59:58 +08:00
|
|
|
if not self._backend.is_open():
|
2010-02-12 04:25:41 +08:00
|
|
|
return
|
|
|
|
|
2007-03-28 07:52:00 +08:00
|
|
|
expected = self.config.get_stats_history_length()
|
|
|
|
current = len(self.record)
|
|
|
|
if current > expected:
|
|
|
|
del self.record[expected:current]
|
|
|
|
|
|
|
|
mem = 0
|
|
|
|
cpuTime = 0
|
2008-10-19 03:21:33 +08:00
|
|
|
rdRate = 0
|
|
|
|
wrRate = 0
|
|
|
|
rxRate = 0
|
|
|
|
txRate = 0
|
2011-07-26 03:09:42 +08:00
|
|
|
diskMaxRate = self.disk_io_max_rate() or 10.0
|
|
|
|
netMaxRate = self.network_traffic_max_rate() or 10.0
|
2007-03-28 07:52:00 +08:00
|
|
|
|
2011-04-11 06:26:40 +08:00
|
|
|
for uuid in vms:
|
|
|
|
vm = vms[uuid]
|
2011-04-10 10:40:22 +08:00
|
|
|
if not vm.is_active():
|
|
|
|
continue
|
|
|
|
|
|
|
|
cpuTime += vm.cpu_time()
|
2011-04-10 12:02:39 +08:00
|
|
|
mem += vm.stats_memory()
|
2011-04-10 10:40:22 +08:00
|
|
|
rdRate += vm.disk_read_rate()
|
|
|
|
wrRate += vm.disk_write_rate()
|
|
|
|
rxRate += vm.network_rx_rate()
|
|
|
|
txRate += vm.network_tx_rate()
|
2007-03-28 07:52:00 +08:00
|
|
|
|
2011-07-26 03:09:42 +08:00
|
|
|
netMaxRate = max(netMaxRate, vm.network_traffic_max_rate())
|
|
|
|
diskMaxRate = max(diskMaxRate, vm.disk_io_max_rate())
|
|
|
|
|
2011-07-12 09:22:50 +08:00
|
|
|
pcentHostCpu = 0
|
|
|
|
pcentMem = mem * 100.0 / self.host_memory_size()
|
|
|
|
|
2007-03-28 07:52:00 +08:00
|
|
|
if len(self.record) > 0:
|
|
|
|
prevTimestamp = self.record[0]["timestamp"]
|
2010-12-11 00:47:07 +08:00
|
|
|
host_cpus = self.host_active_processor_count()
|
|
|
|
|
2011-07-12 09:22:50 +08:00
|
|
|
pcentHostCpu = ((cpuTime) * 100.0 /
|
2010-12-11 00:47:07 +08:00
|
|
|
((now - prevTimestamp) *
|
|
|
|
1000.0 * 1000.0 * 1000.0 * host_cpus))
|
2007-03-28 07:52:00 +08:00
|
|
|
|
|
|
|
|
2011-07-12 09:22:50 +08:00
|
|
|
pcentHostCpu = max(0.0, min(100.0, pcentHostCpu))
|
|
|
|
pcentMem = max(0.0, min(100.0, pcentMem))
|
2007-03-28 07:52:00 +08:00
|
|
|
|
|
|
|
newStats = {
|
|
|
|
"timestamp": now,
|
|
|
|
"memory": mem,
|
|
|
|
"memoryPercent": pcentMem,
|
|
|
|
"cpuTime": cpuTime,
|
2011-07-12 09:22:50 +08:00
|
|
|
"cpuHostPercent": pcentHostCpu,
|
2008-10-19 03:21:33 +08:00
|
|
|
"diskRdRate" : rdRate,
|
|
|
|
"diskWrRate" : wrRate,
|
|
|
|
"netRxRate" : rxRate,
|
|
|
|
"netTxRate" : txRate,
|
2011-07-26 03:09:42 +08:00
|
|
|
"diskMaxRate" : diskMaxRate,
|
|
|
|
"netMaxRate" : netMaxRate,
|
2007-03-28 07:52:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
self.record.insert(0, newStats)
|
|
|
|
|
2009-11-26 05:13:46 +08:00
|
|
|
|
|
|
|
########################
|
|
|
|
# Stats getter methods #
|
|
|
|
########################
|
2012-05-14 21:24:56 +08:00
|
|
|
|
2011-07-12 09:22:50 +08:00
|
|
|
def _vector_helper(self, record_name):
|
2007-03-28 07:52:00 +08:00
|
|
|
vector = []
|
|
|
|
stats = self.record
|
2010-12-11 00:47:07 +08:00
|
|
|
for i in range(self.config.get_stats_history_length() + 1):
|
2007-03-28 07:52:00 +08:00
|
|
|
if i < len(stats):
|
2011-07-12 09:22:50 +08:00
|
|
|
vector.append(stats[i][record_name] / 100.0)
|
2007-03-28 07:52:00 +08:00
|
|
|
else:
|
|
|
|
vector.append(0)
|
|
|
|
return vector
|
|
|
|
|
2013-04-12 20:26:21 +08:00
|
|
|
def stats_memory_vector(self):
|
|
|
|
return self._vector_helper("memoryPercent")
|
|
|
|
|
2011-07-12 09:22:50 +08:00
|
|
|
def host_cpu_time_vector(self):
|
|
|
|
return self._vector_helper("cpuHostPercent")
|
2011-07-12 20:49:47 +08:00
|
|
|
guest_cpu_time_vector = host_cpu_time_vector
|
2011-07-12 09:22:50 +08:00
|
|
|
|
|
|
|
def host_cpu_time_vector_limit(self, limit):
|
|
|
|
cpudata = self.host_cpu_time_vector()
|
2007-03-28 07:52:00 +08:00
|
|
|
if len(cpudata) > limit:
|
|
|
|
cpudata = cpudata[0:limit]
|
|
|
|
return cpudata
|
2011-07-12 20:49:47 +08:00
|
|
|
guest_cpu_time_vector_limit = host_cpu_time_vector_limit
|
2013-04-12 20:26:21 +08:00
|
|
|
|
|
|
|
def disk_io_vector_limit(self, ignore):
|
2011-04-15 00:20:02 +08:00
|
|
|
return [0.0]
|
2013-04-12 20:26:21 +08:00
|
|
|
def network_traffic_vector_limit(self, ignore):
|
2011-04-15 00:20:02 +08:00
|
|
|
return [0.0]
|
|
|
|
|
|
|
|
def _get_record_helper(self, record_name):
|
|
|
|
if len(self.record) == 0:
|
|
|
|
return 0
|
|
|
|
return self.record[0][record_name]
|
|
|
|
|
|
|
|
def stats_memory(self):
|
|
|
|
return self._get_record_helper("memory")
|
2011-07-26 03:09:42 +08:00
|
|
|
def pretty_stats_memory(self):
|
|
|
|
return util.pretty_mem(self.stats_memory())
|
|
|
|
|
2011-07-12 09:22:50 +08:00
|
|
|
def host_cpu_time_percentage(self):
|
|
|
|
return self._get_record_helper("cpuHostPercent")
|
2011-07-12 20:49:47 +08:00
|
|
|
guest_cpu_time_percentage = host_cpu_time_percentage
|
2011-04-15 00:20:02 +08:00
|
|
|
|
2008-10-19 03:21:33 +08:00
|
|
|
def network_rx_rate(self):
|
2011-04-10 11:03:01 +08:00
|
|
|
return self._get_record_helper("netRxRate")
|
2008-10-19 03:21:33 +08:00
|
|
|
def network_tx_rate(self):
|
2011-04-10 11:03:01 +08:00
|
|
|
return self._get_record_helper("netTxRate")
|
2008-10-19 03:21:33 +08:00
|
|
|
def network_traffic_rate(self):
|
|
|
|
return self.network_tx_rate() + self.network_rx_rate()
|
2011-07-26 03:09:42 +08:00
|
|
|
def network_traffic_max_rate(self):
|
|
|
|
return self._get_record_helper("netMaxRate")
|
2011-04-15 00:20:02 +08:00
|
|
|
|
2008-10-19 03:21:33 +08:00
|
|
|
def disk_read_rate(self):
|
2011-04-10 11:03:01 +08:00
|
|
|
return self._get_record_helper("diskRdRate")
|
2008-10-19 03:21:33 +08:00
|
|
|
def disk_write_rate(self):
|
2011-04-10 11:03:01 +08:00
|
|
|
return self._get_record_helper("diskWrRate")
|
2008-10-19 03:21:33 +08:00
|
|
|
def disk_io_rate(self):
|
|
|
|
return self.disk_read_rate() + self.disk_write_rate()
|
2011-07-26 03:09:42 +08:00
|
|
|
def disk_io_max_rate(self):
|
|
|
|
return self._get_record_helper("diskMaxRate")
|