Add base conn object and bump minimum libvirt version to 0.6.0

This base connection object will be used to simplify the API in various
places, reduce libvirt API calls, and better share code between virtinst
and virt-manager. For now it just centralizes connection opening.

This also exposed various places where our handling for older libvirt
was busted, so raise our minimum host version to 0.6.0, the first
version that supports threaded client requests.
This commit is contained in:
Cole Robinson 2013-07-05 08:59:58 -04:00
parent 35758d0fb7
commit be2d9ddcb4
35 changed files with 374 additions and 419 deletions

View File

@ -15,7 +15,6 @@
# MA 02110-1301 USA.
import unittest
import libvirt
import virtinst
import virtinst.cli
import virtinst.ImageParser
@ -32,8 +31,8 @@ qemuuri = "__virtinst_test__test:///default,caps=%s/tests/capabilities-xml/capab
class TestImageParser(unittest.TestCase):
basedir = "tests/image-xml/"
conn = libvirt.open("test:///default")
qemuconn = virtinst.cli.open_test_uri(qemuuri)
conn = utils.open_testdefault()
qemuconn = virtinst.cli.getConnection(qemuuri)
caps = virtinst.CapabilitiesParser.parse(conn.getCapabilities())
qemucaps = virtinst.CapabilitiesParser.parse(qemuconn.getCapabilities())
@ -107,7 +106,7 @@ class TestImageParser(unittest.TestCase):
utils.reset_conn()
def testImage2XML(self):
# Build libvirt XML from the image xml
# Build guest XML from the image xml
self._image2XMLhelper("image.xml", ["image-xenpv32.xml",
"image-xenfv32.xml"])
self._image2XMLhelper("image-kernel.xml", ["image-xenpv32-kernel.xml"])

View File

@ -19,9 +19,8 @@ import unittest
import virtinst.Storage
from virtinst.Storage import StoragePool, StorageVolume
from tests import utils
import libvirt
from tests import utils
# pylint: disable=W0212
# Access to protected member, needed to unittest stuff
@ -134,7 +133,7 @@ def createVol(poolobj, volname=None, input_vol=None, clone_vol=None):
class TestStorage(unittest.TestCase):
def setUp(self):
self.conn = libvirt.open("test:///default")
self.conn = utils.open_testdefault()
def testDirPool(self):
poolobj = createPool(self.conn, StoragePool.TYPE_DIR, "pool-dir")

View File

@ -32,7 +32,6 @@ from virtinst.OSDistro import SLDistro
from virtinst.OSDistro import UbuntuDistro
from virtinst.OSDistro import MandrivaDistro
import libvirt
import urlgrabber.progress
# pylint: disable=W0212
@ -224,7 +223,7 @@ urls = {
}
testconn = libvirt.open("test:///default")
testconn = utils.open_testdefault()
testguest = virtinst.Guest(testconn, installer=virtinst.DistroInstaller())

View File

@ -58,6 +58,10 @@ def _make_uri(base, connver=None, libver=None):
return base
def open_testdefault():
return virtinst.cli.getConnection("test:///default")
def open_testdriver():
return virtinst.cli.getConnection(_testuri)

View File

@ -802,7 +802,7 @@ class TestXMLConfig(unittest.TestCase):
self._compare(g, "boot-many-devices", False)
def testCpuset(self):
normaltest = libvirt.open("test:///default")
normaltest = utils.open_testdefault()
utils.set_conn(normaltest)
g = utils.get_basic_fullyvirt_guest()

View File

@ -44,7 +44,8 @@ GObject.threads_init()
def _show_startup_error(msg, details):
if logging_setup:
logging.exception("Error starting virt-manager")
logging.debug("Error starting virt-manager: %s\n%s", msg, details,
exc_info=True)
from virtManager.error import vmmErrorDialog
err = vmmErrorDialog()
title = _("Error starting Virtual Machine Manager")
@ -243,6 +244,12 @@ def main():
cliconfig.__version__,
os.path.join(cliconfig.asset_dir, "ui"),
options.testfirstrun)
if not config.support_threading:
_show_startup_error(
_("virt-manager requires libvirt 0.6.0 or later."), "")
return
virtManager.util.running_config = config
config.default_qemu_user = cliconfig.default_qemu_user
config.rhel6_defaults = cliconfig.rhel_enable_unsupported_opts

View File

@ -408,7 +408,7 @@ class vmmAddHardware(vmmGObjectUI):
"pci")
add_hw_option("Video", "video-display", PAGE_VIDEO,
virtinst.support.check_conn_support(
self.conn.vmm,
self.conn.get_backend(),
virtinst.support.SUPPORT_CONN_DOMAIN_VIDEO),
_("Libvirt version does not support video devices."))
add_hw_option("Watchdog", "device_pci", PAGE_WATCHDOG,
@ -416,7 +416,7 @@ class vmmAddHardware(vmmGObjectUI):
_("Not supported for this guest type."))
add_hw_option("Filesystem", Gtk.STOCK_DIRECTORY, PAGE_FILESYSTEM,
virtinst.support.check_conn_hv_support(
self.conn.vmm,
self.conn.get_backend(),
virtinst.support.SUPPORT_CONN_HV_FILESYSTEM,
self.vm.get_hv_type()),
_("Not supported for this hypervisor/libvirt "
@ -1048,7 +1048,7 @@ class vmmAddHardware(vmmGObjectUI):
return
devtype = src.get_model()[src.get_active()][0]
conn = self.conn.vmm
conn = self.conn.get_backend()
self._dev = VirtualTPMDevice.get_dev_instance(conn,
devtype)
@ -1073,7 +1073,7 @@ class vmmAddHardware(vmmGObjectUI):
chartype = self.get_char_type()
devtype = src.get_model()[src.get_active()][0]
conn = self.conn.vmm
conn = self.conn.get_backend()
self._dev = VirtualCharDevice.get_dev_instance(conn,
chartype,
@ -1167,13 +1167,8 @@ class vmmAddHardware(vmmGObjectUI):
def do_file_allocate(asyncjob, disk):
meter = asyncjob.get_meter()
# If creating disk via storage API, we need to thread
# off a new connection
if disk.vol_install:
newconn = util.dup_lib_conn(disk.conn)
disk.conn = newconn
logging.debug("Starting background file allocate process")
disk.setup_dev(self.conn.vmm, meter=meter)
disk.setup_dev(self.conn.get_backend(), meter=meter)
logging.debug("Allocation completed")
progWin = vmmAsyncJob(do_file_allocate,
@ -1190,7 +1185,7 @@ class vmmAddHardware(vmmGObjectUI):
self._dev.creating_storage()):
return self._storage_progress()
return self._dev.setup_dev(self.conn.vmm)
return self._dev.setup_dev(self.conn.get_backend())
def add_device(self):
ret = self.setup_device()
@ -1282,6 +1277,8 @@ class vmmAddHardware(vmmGObjectUI):
cache = self.get_config_disk_cache()
fmt = self.get_config_disk_format()
controller_model = None
conn = self.conn.get_backend()
if bus == "virtio-scsi":
bus = "scsi"
controller_model = "virtio-scsi"
@ -1309,11 +1306,9 @@ class vmmAddHardware(vmmGObjectUI):
ret = True
try:
do_exist = virtinst.VirtualDisk.path_exists(
self.conn.vmm, ideal)
do_exist = virtinst.VirtualDisk.path_exists(conn, ideal)
ret = virtinst.VirtualDisk.path_in_use_by(self.conn.vmm,
ideal)
ret = virtinst.VirtualDisk.path_in_use_by(conn, ideal)
except:
logging.exception("Error checking default path usage")
@ -1326,7 +1321,7 @@ class vmmAddHardware(vmmGObjectUI):
if do_use:
diskpath = ideal
disk = virtinst.VirtualDisk(conn=self.conn.vmm,
disk = virtinst.VirtualDisk(conn=conn,
path=diskpath,
size=disksize,
sparse=sparse,
@ -1369,7 +1364,7 @@ class vmmAddHardware(vmmGObjectUI):
return False
# Disk collision
if disk.is_conflict_disk(self.conn.vmm):
if disk.is_conflict_disk(conn):
res = self.err.yes_no(_('Disk "%s" is already in use by another '
'guest!' % disk.path),
_("Do you really want to use the disk?"))
@ -1383,7 +1378,7 @@ class vmmAddHardware(vmmGObjectUI):
disk.vmm_controller = None
if (controller_model == "virtio-scsi") and (bus == "scsi"):
controllers = self.vm.get_controller_devices()
controller = VirtualControllerSCSI(conn=self.conn.vmm)
controller = VirtualControllerSCSI(conn=conn)
controller.set_model(controller_model)
disk.vmm_controller = controller
for d in controllers:
@ -1423,7 +1418,7 @@ class vmmAddHardware(vmmGObjectUI):
def validate_page_input(self):
ignore, inp_type, inp_bus = self.get_config_input()
dev = virtinst.VirtualInputDevice(self.conn.vmm)
dev = virtinst.VirtualInputDevice(self.conn.get_backend())
dev.type = inp_type
dev.bus = inp_bus
@ -1436,7 +1431,7 @@ class vmmAddHardware(vmmGObjectUI):
"sdl": virtinst.VirtualGraphics.TYPE_SDL}[graphics]
self._dev = virtinst.VirtualGraphics(type=_type,
conn=self.conn.vmm)
conn=self.conn.get_backend())
try:
self._dev.port = self.get_config_graphics_port()
self._dev.tlsPort = self.get_config_graphics_tls_port()
@ -1449,7 +1444,7 @@ class vmmAddHardware(vmmGObjectUI):
def validate_page_sound(self):
smodel = self.get_config_sound_model()
try:
self._dev = virtinst.VirtualAudio(conn=self.conn.vmm,
self._dev = virtinst.VirtualAudio(conn=self.conn.get_backend(),
model=smodel)
except Exception, e:
return self.err.val_err(_("Sound device parameter error"), e)
@ -1479,7 +1474,7 @@ class vmmAddHardware(vmmGObjectUI):
try:
self._dev = virtinst.VirtualHostDevice.device_from_node(
conn=self.conn.vmm,
conn=self.conn.get_backend(),
name=nodedev_name,
nodedev=nodedev,
is_dup=is_dup)
@ -1491,7 +1486,7 @@ class vmmAddHardware(vmmGObjectUI):
modebox = self.widget("char-mode")
devbox = self.widget("char-device-type")
devtype = devbox.get_model()[devbox.get_active()][0]
conn = self.conn.vmm
conn = self.conn.get_backend()
devclass = VirtualCharDevice.get_dev_instance(conn, chartype, devtype)
@ -1533,7 +1528,7 @@ class vmmAddHardware(vmmGObjectUI):
chartype.capitalize(), e)
def validate_page_video(self):
conn = self.conn.vmm
conn = self.conn.get_backend()
model = self.get_config_video_model()
try:
@ -1543,7 +1538,7 @@ class vmmAddHardware(vmmGObjectUI):
return self.err.val_err(_("Video device parameter error"), e)
def validate_page_watchdog(self):
conn = self.conn.vmm
conn = self.conn.get_backend()
model = self.get_config_watchdog_model()
action = self.get_config_watchdog_action()
@ -1555,7 +1550,7 @@ class vmmAddHardware(vmmGObjectUI):
return self.err.val_err(_("Watchdog parameter error"), e)
def validate_page_filesystem(self):
conn = self.conn.vmm
conn = self.conn.get_backend()
source = self.widget("fs-source").get_text()
target = self.widget("fs-target").get_text()
mode = self.get_config_fs_mode()
@ -1600,7 +1595,7 @@ class vmmAddHardware(vmmGObjectUI):
return False
def validate_page_smartcard(self):
conn = self.conn.vmm
conn = self.conn.get_backend()
mode = self.get_config_smartcard_mode()
try:
@ -1609,7 +1604,7 @@ class vmmAddHardware(vmmGObjectUI):
return self.err.val_err(_("Smartcard device parameter error"), e)
def validate_page_usbredir(self):
conn = self.conn.vmm
conn = self.conn.get_backend()
stype = self.get_config_usbredir_type()
host = self.get_config_usbredir_host()
service = self.get_config_usbredir_service()
@ -1625,7 +1620,7 @@ class vmmAddHardware(vmmGObjectUI):
str(e))
def validate_page_tpm(self):
conn = self.conn.vmm
conn = self.conn.get_backend()
typ = self.get_config_tpm_type()
device_path = self.widget("tpm-device-path").get_text()

View File

@ -29,7 +29,6 @@ from gi.repository import Gdk
from virtManager.baseclass import vmmGObjectUI
from virtManager.asyncjob import vmmAsyncJob
from virtManager.storagebrowse import vmmStorageBrowser
from virtManager import util
import virtinst
from virtinst import Cloner
@ -73,7 +72,8 @@ def can_we_clone(conn, vol, path):
elif vol:
# Managed storage
if not virtinst.Storage.is_create_vol_from_supported(conn.vmm):
if not virtinst.Storage.is_create_vol_from_supported(
conn.get_backend()):
if conn.is_remote() or not os.access(path, os.R_OK):
msg = _("Connection does not support managed storage cloning.")
else:
@ -255,7 +255,7 @@ class vmmCloneVM(vmmGObjectUI):
self.clone_design = self.build_new_clone_design()
def build_new_clone_design(self, new_name=None):
design = Cloner(self.conn.vmm)
design = Cloner(self.conn.get_backend())
design.original_guest = self.orig_vm.get_name()
if not new_name:
new_name = design.generate_clone_name()
@ -302,9 +302,9 @@ class vmmCloneVM(vmmGObjectUI):
net_type = net.type
# Generate a new MAC
obj = VirtualNetworkInterface(conn=self.conn.vmm,
obj = VirtualNetworkInterface(conn=self.conn.get_backend(),
type=VirtualNetworkInterface.TYPE_USER)
obj.setup(self.conn.vmm)
obj.setup(self.conn.get_backend())
newmac = obj.macaddr
@ -676,7 +676,7 @@ class vmmCloneVM(vmmGObjectUI):
row = self.net_list[orig]
try:
VirtualNetworkInterface(conn=self.conn.vmm,
VirtualNetworkInterface(conn=self.conn.get_backend(),
type=VirtualNetworkInterface.TYPE_USER,
macaddr=new)
row[NETWORK_INFO_NEW_MAC] = new
@ -819,20 +819,10 @@ class vmmCloneVM(vmmGObjectUI):
self.conn.tick(noStatsUpdate=True)
def _async_clone(self, asyncjob):
newconn = None
try:
self.orig_vm.set_cloning(True)
# Open a seperate connection to install on since this is async
logging.debug("Threading off connection to clone VM.")
newconn = util.dup_conn(self.conn).vmm
meter = asyncjob.get_meter()
self.clone_design.orig_connection = newconn
for d in self.clone_design.clone_disks:
d.conn = newconn
self.clone_design.setup()
self.clone_design.start_duplicate(meter)
finally:

View File

@ -88,12 +88,10 @@ class vmmConnection(vmmGObject):
self.connectThread = None
self.connectError = None
self._ticklock = threading.Lock()
self.vmm = None
self._backend = virtinst.VirtualConnection(self._uri)
self._caps = None
self._caps_xml = None
self._is_virtinst_test_uri = virtinst.cli.is_virtinst_test_uri(
self._uri)
self.network_capable = None
self._storage_capable = None
@ -129,6 +127,7 @@ class vmmConnection(vmmGObject):
self.mediadev_error = ""
self.mediadev_use_libvirt = False
#################
# Init routines #
#################
@ -186,6 +185,8 @@ class vmmConnection(vmmGObject):
def get_uri(self):
return self._uri
def get_backend(self):
return self._backend
def invalidate_caps(self):
self._caps_xml = None
@ -193,7 +194,7 @@ class vmmConnection(vmmGObject):
def _check_caps(self):
if not (self._caps_xml or self._caps):
self._caps_xml = self.vmm.getCapabilities()
self._caps_xml = self._backend.getCapabilities()
self._caps = virtinst.CapabilitiesParser.parse(self._caps_xml)
def get_capabilities_xml(self):
@ -207,33 +208,33 @@ class vmmConnection(vmmGObject):
return self._caps
def get_max_vcpus(self, _type):
return virtinst.util.get_max_vcpus(self.vmm, _type)
return virtinst.util.get_max_vcpus(self._backend, _type)
def get_host_info(self):
return self.hostinfo
def pretty_host_memory_size(self):
if self.vmm is None:
if not self._backend.is_open():
return ""
return util.pretty_mem(self.host_memory_size())
def host_memory_size(self):
if self.vmm is None:
if not self._backend.is_open():
return 0
return self.hostinfo[1] * 1024
def host_architecture(self):
if self.vmm is None:
if not self._backend.is_open():
return ""
return self.hostinfo[0]
def host_active_processor_count(self):
if self.vmm is None:
if not self._backend.is_open():
return 0
return self.hostinfo[2]
def host_maximum_processor_count(self):
if self.vmm is None:
if not self._backend.is_open():
return 0
return (self.hostinfo[4] * self.hostinfo[5] *
self.hostinfo[6] * self.hostinfo[7])
@ -258,9 +259,9 @@ class vmmConnection(vmmGObject):
##########################
def get_qualified_hostname(self):
if virtinst.support.check_conn_support(self.vmm,
if virtinst.support.check_conn_support(self._backend,
virtinst.support.SUPPORT_CONN_GETHOSTNAME):
return self.vmm.getHostname()
return self._backend.getHostname()
uri_hostname = self.get_uri_hostname()
if self.is_remote() and uri_hostname.lower() != "localhost":
@ -299,7 +300,7 @@ class vmmConnection(vmmGObject):
return self.is_lxc() or self.is_openvz()
def is_lxc(self):
if self._is_virtinst_test_uri:
if self._backend.is_virtinst_test_uri:
self.get_uri().count(",lxc")
return uriutil.uri_split(self.get_uri())[0].startswith("lxc")
@ -308,14 +309,14 @@ class vmmConnection(vmmGObject):
return uriutil.uri_split(self.get_uri())[0].startswith("openvz")
def is_xen(self):
if self._is_virtinst_test_uri:
if self._backend.is_virtinst_test_uri:
return self.get_uri().count(",xen")
scheme = uriutil.uri_split(self.get_uri())[0]
return scheme.startswith("xen")
def is_qemu(self):
if self._is_virtinst_test_uri:
if self._backend.is_virtinst_test_uri:
return self.get_uri().count(",qemu")
scheme = uriutil.uri_split(self.get_uri())[0]
@ -465,7 +466,7 @@ class vmmConnection(vmmGObject):
def is_storage_capable(self):
if self._storage_capable is None:
self._storage_capable = virtinst.util.is_storage_capable(self.vmm)
self._storage_capable = virtinst.util.is_storage_capable(self._backend)
if self._storage_capable is False:
logging.debug("Connection doesn't seem to support storage "
"APIs. Skipping all storage polling.")
@ -481,7 +482,7 @@ class vmmConnection(vmmGObject):
def is_network_capable(self):
if self.network_capable is None:
self.network_capable = virtinst.support.check_conn_support(
self.vmm,
self._backend,
virtinst.support.SUPPORT_CONN_NETWORK)
if self.network_capable is False:
logging.debug("Connection doesn't seem to support network "
@ -492,7 +493,7 @@ class vmmConnection(vmmGObject):
def is_interface_capable(self):
if self.interface_capable is None:
self.interface_capable = virtinst.support.check_conn_support(
self.vmm,
self._backend,
virtinst.support.SUPPORT_CONN_INTERFACE)
if self.interface_capable is False:
logging.debug("Connection doesn't seem to support interface "
@ -502,7 +503,7 @@ class vmmConnection(vmmGObject):
def is_nodedev_capable(self):
if self._nodedev_capable is None:
self._nodedev_capable = virtinst.NodeDeviceParser.is_nodedev_capable(self.vmm)
self._nodedev_capable = virtinst.NodeDeviceParser.is_nodedev_capable(self._backend)
return self._nodedev_capable
def _get_flags_helper(self, obj, key, check_func):
@ -765,7 +766,7 @@ class vmmConnection(vmmGObject):
def create_network(self, xml, start=True, autostart=True):
# Define network
net = self.vmm.networkDefineXML(xml)
net = self._backend.networkDefineXML(xml)
try:
if start:
@ -805,12 +806,12 @@ class vmmConnection(vmmGObject):
domainobj.change_name_backend(newobj)
def define_domain(self, xml):
return self.vmm.defineXML(xml)
return self._backend.defineXML(xml)
def define_interface(self, xml):
self.vmm.interfaceDefineXML(xml, 0)
self._backend.interfaceDefineXML(xml, 0)
def restore(self, frm):
self.vmm.restore(frm)
self._backend.restore(frm)
try:
# FIXME: This isn't correct in the remote case. Why do we even
# do this? Seems like we should provide an option for this
@ -879,7 +880,7 @@ class vmmConnection(vmmGObject):
for dev in devs.values():
dev.cleanup()
self.vmm = None
self._backend.close()
self.record = []
cleanup(self.nodedevs)
@ -909,15 +910,6 @@ class vmmConnection(vmmGObject):
self.close()
self.connectError = None
def _open_dev_conn(self, uri):
"""
Allow using virtinsts connection hacking to fake capabilities
and other reproducible/testable behavior
"""
if not self._is_virtinst_test_uri:
return
return virtinst.cli.open_test_uri(uri)
def open(self, sync=False):
if self.state != self.STATE_DISCONNECTED:
return
@ -937,18 +929,8 @@ class vmmConnection(vmmGObject):
self.connectThread.setDaemon(True)
self.connectThread.start()
def _do_creds(self, creds, cbdata):
"""
Generic libvirt openAuth callback
"""
ignore = cbdata
def _do_creds_password(self, creds):
try:
for cred in creds:
if cred[0] == libvirt.VIR_CRED_EXTERNAL:
logging.debug("Don't know how to handle external cred %s",
cred[2])
return -1
return connectauth.creds_dialog(creds)
except Exception, e:
# Detailed error message, in English so it can be Googled.
@ -957,23 +939,6 @@ class vmmConnection(vmmGObject):
(self.get_uri(), str(e), "".join(traceback.format_exc())))
return -1
def _try_open(self):
vmm = self._open_dev_conn(self.get_uri())
if vmm:
return vmm
if virtinst.support.support_openauth():
vmm = libvirt.openAuth(self.get_uri(),
[[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_EXTERNAL],
self._do_creds, None],
flags)
else:
vmm = libvirt.open(self.get_uri())
return vmm
def _open_thread(self):
logging.debug("Background 'open connection' thread is running")
@ -983,7 +948,7 @@ class vmmConnection(vmmGObject):
tb = None
warnconsole = False
try:
self.vmm = self._try_open()
self._backend.open(self._do_creds_password)
except libvirt.libvirtError, libexc:
tb = "".join(traceback.format_exc())
except Exception, exc:
@ -1123,10 +1088,10 @@ class vmmConnection(vmmGObject):
def _update_nets(self):
orig = self.nets.copy()
name = "network"
active_list = self.vmm.listNetworks
inactive_list = self.vmm.listDefinedNetworks
active_list = self._backend.listNetworks
inactive_list = self._backend.listDefinedNetworks
check_support = self.is_network_capable
lookup_func = self.vmm.networkLookupByName
lookup_func = self._backend.networkLookupByName
build_class = vmmNetwork
return self._poll_helper(orig, name, check_support,
@ -1136,10 +1101,10 @@ class vmmConnection(vmmGObject):
def _update_pools(self):
orig = self.pools.copy()
name = "pool"
active_list = self.vmm.listStoragePools
inactive_list = self.vmm.listDefinedStoragePools
active_list = self._backend.listStoragePools
inactive_list = self._backend.listDefinedStoragePools
check_support = self.is_storage_capable
lookup_func = self.vmm.storagePoolLookupByName
lookup_func = self._backend.storagePoolLookupByName
build_class = vmmStoragePool
return self._poll_helper(orig, name, check_support,
@ -1149,10 +1114,10 @@ class vmmConnection(vmmGObject):
def _update_interfaces(self):
orig = self.interfaces.copy()
name = "interface"
active_list = self.vmm.listInterfaces
inactive_list = self.vmm.listDefinedInterfaces
active_list = self._backend.listInterfaces
inactive_list = self._backend.listDefinedInterfaces
check_support = self.is_interface_capable
lookup_func = self.vmm.interfaceLookupByName
lookup_func = self._backend.interfaceLookupByName
build_class = vmmInterface
return self._poll_helper(orig, name, check_support,
@ -1163,10 +1128,10 @@ class vmmConnection(vmmGObject):
def _update_nodedevs(self):
orig = self.nodedevs.copy()
name = "nodedev"
active_list = lambda: self.vmm.listDevices(None, 0)
active_list = lambda: self._backend.listDevices(None, 0)
inactive_list = lambda: []
check_support = self.is_nodedev_capable
lookup_func = self.vmm.nodeDeviceLookupByName
lookup_func = self._backend.nodeDeviceLookupByName
build_class = (lambda conn, obj, key, ignore:
vmmNodeDevice(conn, obj, key))
@ -1196,12 +1161,12 @@ class vmmConnection(vmmGObject):
oldInactiveNames[vm.get_name()] = vm
try:
newActiveIDs = self.vmm.listDomainsID()
newActiveIDs = self._backend.listDomainsID()
except Exception, e:
logging.debug("Unable to list active domains: %s", e)
try:
newInactiveNames = self.vmm.listDefinedDomains()
newInactiveNames = self._backend.listDefinedDomains()
except Exception, e:
logging.exception("Unable to list inactive domains: %s", e)
@ -1234,7 +1199,7 @@ class vmmConnection(vmmGObject):
else:
# Check if domain is brand new, or old one that changed state
try:
vm = self.vmm.lookupByID(_id)
vm = self._backend.lookupByID(_id)
uuid = util.uuidstr(vm.UUID())
check_new(vm, uuid)
@ -1250,7 +1215,7 @@ class vmmConnection(vmmGObject):
else:
# Check if domain is brand new, or old one that changed state
try:
vm = self.vmm.lookupByName(name)
vm = self._backend.lookupByName(name)
uuid = util.uuidstr(vm.UUID())
check_new(vm, uuid)
@ -1271,7 +1236,7 @@ class vmmConnection(vmmGObject):
if self.state != self.STATE_ACTIVE:
return
self.hostinfo = self.vmm.getInfo()
self.hostinfo = self._backend.getInfo()
# Poll for new virtual network objects
(startNets, stopNets, oldNets,
@ -1299,7 +1264,7 @@ class vmmConnection(vmmGObject):
app with long tick operations.
"""
# Connection closed out from under us
if not self.vmm:
if not self._backend.is_open():
return
# Make sure device polling is setup
@ -1375,7 +1340,7 @@ class vmmConnection(vmmGObject):
(getattr(e, "get_error_code")() ==
libvirt.VIR_ERR_SYSTEM_ERROR)):
# Try a simple getInfo call to see if conn was dropped
self.vmm.getInfo()
self._backend.getInfo()
logging.debug("vm tick raised system error but "
"connection doesn't seem to have dropped. "
"Ignoring.")
@ -1391,7 +1356,7 @@ class vmmConnection(vmmGObject):
return 1
def _recalculate_stats(self, now, vms):
if self.vmm is None:
if not self._backend.is_open():
return
expected = self.config.get_stats_history_length()

View File

@ -462,7 +462,8 @@ class vmmCreate(vmmGObjectUI):
can_storage = (is_local or is_storage_capable)
is_pv = (self.capsguest.os_type == "xen")
is_container = self.conn.is_container()
can_remote_url = virtinst.support.check_stream_support(self.conn.vmm,
can_remote_url = virtinst.support.check_stream_support(
self.conn.get_backend(),
virtinst.support.SUPPORT_STREAM_UPLOAD)
# Install Options
@ -823,12 +824,12 @@ class vmmCreate(vmmGObjectUI):
break
(newg, newdom) = virtinst.CapabilitiesParser.guest_lookup(
conn=self.conn.vmm,
caps=self.caps,
os_type=gtype,
typ=dtype,
accelerated=True,
arch=arch)
conn=self.conn.get_backend(),
caps=self.caps,
os_type=gtype,
typ=dtype,
accelerated=True,
arch=arch)
if (self.capsguest and self.capsdomain and
(newg.arch == self.capsguest.arch and
@ -1487,7 +1488,7 @@ class vmmCreate(vmmGObjectUI):
name = self.get_config_name()
try:
g = virtinst.Guest(self.conn.vmm)
g = virtinst.Guest(self.conn.get_backend())
g.name = name
except Exception, e:
return self.err.val_err(_("Invalid System Name"), e)
@ -1647,6 +1648,7 @@ class vmmCreate(vmmGObjectUI):
def validate_storage_page(self, oldguest=None):
use_storage = self.widget("enable-storage").get_active()
instcd = self.get_config_install_page() == INSTALL_PAGE_ISO
conn = self.conn.get_backend()
# CD/ISO install and no disks implies LiveCD
if instcd:
@ -1685,11 +1687,8 @@ class vmmCreate(vmmGObjectUI):
ret = True
try:
do_exist = virtinst.VirtualDisk.path_exists(
self.conn.vmm, ideal)
ret = virtinst.VirtualDisk.path_in_use_by(self.conn.vmm,
ideal)
do_exist = virtinst.VirtualDisk.path_exists(conn, ideal)
ret = virtinst.VirtualDisk.path_in_use_by(conn, ideal)
except:
logging.exception("Error checking default path usage")
@ -1705,7 +1704,7 @@ class vmmCreate(vmmGObjectUI):
if not diskpath:
return self.err.val_err(_("A storage path must be specified."))
disk = virtinst.VirtualDisk(conn=self.conn.vmm,
disk = virtinst.VirtualDisk(conn=conn,
path=diskpath,
size=disksize,
sparse=sparse)
@ -1782,7 +1781,7 @@ class vmmCreate(vmmGObjectUI):
# Interesting methods
def build_installer(self, instclass):
installer = instclass(conn=self.conn.vmm,
installer = instclass(conn=self.conn.get_backend(),
type=self.capsdomain.hypervisor_type,
os_type=self.capsguest.os_type)
installer.arch = self.capsguest.arch
@ -1908,11 +1907,6 @@ class vmmCreate(vmmGObjectUI):
meter = asyncjob.get_meter()
logging.debug("Starting background install process")
guest.conn = util.dup_conn(self.conn).vmm
for dev in guest.get_all_devices():
dev.conn = guest.conn
guest.start_install(False, meter=meter)
logging.debug("Install completed")

View File

@ -559,9 +559,11 @@ class vmmCreateInterface(vmmGObjectUI):
name = _("No interface selected")
if itype == Interface.Interface.INTERFACE_TYPE_BRIDGE:
name = Interface.Interface.find_free_name(self.conn.vmm, "br")
name = Interface.Interface.find_free_name(self.conn.get_backend(),
"br")
elif itype == Interface.Interface.INTERFACE_TYPE_BOND:
name = Interface.Interface.find_free_name(self.conn.vmm, "bond")
name = Interface.Interface.find_free_name(self.conn.get_backend(),
"bond")
else:
ifaces = self.get_config_selected_interfaces()
if len(ifaces) > 0:
@ -921,7 +923,7 @@ class vmmCreateInterface(vmmGObjectUI):
return self.err.val_err(_("An interface must be selected"))
try:
iobj = iclass(name, self.conn.vmm)
iobj = iclass(name, self.conn.get_backend())
iobj.start_mode = start
check_conflict = False
@ -1128,8 +1130,5 @@ class vmmCreateInterface(vmmGObjectUI):
def do_install(self, asyncjob, activate):
meter = asyncjob.get_meter()
self.interface.conn = util.dup_conn(self.conn).vmm
self.interface.install(meter, create=activate)
logging.debug("Install completed")

View File

@ -277,9 +277,10 @@ class vmmCreatePool(vmmGObjectUI):
plist = []
try:
plist = Storage.StoragePool.pool_list_from_sources(self.conn.vmm,
name, pool_type,
host=host)
plist = Storage.StoragePool.pool_list_from_sources(
self.conn.get_backend(),
name, pool_type,
host=host)
except Exception:
logging.exception("Pool enumeration failed")
@ -454,12 +455,7 @@ class vmmCreatePool(vmmGObjectUI):
self.close()
def _async_pool_create(self, asyncjob, build):
newconn = None
# Open a seperate connection to install on since this is async
newconn = util.dup_lib_conn(self._pool.conn)
meter = asyncjob.get_meter()
self._pool.conn = newconn
logging.debug("Starting backround pool creation.")
poolobj = self._pool.install(create=True, meter=meter, build=build)
@ -501,7 +497,7 @@ class vmmCreatePool(vmmGObjectUI):
if page == PAGE_NAME:
typ = self.get_config_type()
name = self.get_config_name()
conn = self.conn.vmm
conn = self.conn.get_backend()
try:
self._pool_class = Storage.StoragePool.get_pool_class(typ)

View File

@ -26,7 +26,6 @@ from gi.repository import Gtk
from gi.repository import Gdk
# pylint: enable=E0611
from virtManager import util
from virtManager.baseclass import vmmGObjectUI
from virtManager.asyncjob import vmmAsyncJob
@ -250,10 +249,10 @@ class vmmCreateVolume(vmmGObjectUI):
self.close()
def _async_vol_create(self, asyncjob):
newconn = util.dup_conn(self.conn).vmm
conn = self.conn.get_backend()
# Lookup different pool obj
newpool = newconn.storagePoolLookupByName(self.parent_pool.get_name())
newpool = conn.storagePoolLookupByName(self.parent_pool.get_name())
self.vol.pool = newpool
meter = asyncjob.get_meter()

View File

@ -168,7 +168,6 @@ class vmmDeleteDialog(vmmGObjectUI):
self.close()
def _async_delete(self, asyncjob, paths):
newconn = None
storage_errors = []
details = ""
@ -177,16 +176,14 @@ class vmmDeleteDialog(vmmGObjectUI):
logging.debug("Forcing VM '%s' power off.", self.vm.get_name())
self.vm.destroy()
# Open a seperate connection to install on since this is async
logging.debug("Threading off connection to delete vol.")
newconn = util.dup_conn(self.conn).vmm
conn = self.conn.get_backend()
meter = asyncjob.get_meter()
for path in paths:
try:
logging.debug("Deleting path: %s", path)
meter.start(text=_("Deleting path '%s'") % path)
self._async_delete_path(newconn, path, meter)
self._async_delete_path(conn, path, meter)
except Exception, e:
storage_errors.append((str(e),
"".join(traceback.format_exc())))
@ -382,7 +379,7 @@ def do_we_default(conn, vm_name, vol, path, ro, shared):
info = append_str(info, _("Storage is marked as shareable."))
try:
names = virtinst.VirtualDisk.path_in_use_by(conn.vmm, path)
names = virtinst.VirtualDisk.path_in_use_by(conn.get_backend(), path)
if len(names) > 1:
namestr = ""

View File

@ -1825,7 +1825,7 @@ class vmmDetails(vmmGObjectUI):
def generate_cpuset(self):
mem = int(self.vm.get_memory()) / 1024 / 1024
return virtinst.Guest.generate_cpuset(self.conn.vmm, mem)
return virtinst.Guest.generate_cpuset(self.conn.get_backend(), mem)
# VCPUS
def config_get_vcpus(self):
@ -1867,7 +1867,7 @@ class vmmDetails(vmmGObjectUI):
def config_cpu_copy_host(self, src_ignore):
# Update UI with output copied from host
try:
CPU = virtinst.CPU(self.vm.conn.vmm)
CPU = virtinst.CPU(self.vm.conn.get_backend())
CPU.copy_host_cpu()
self._refresh_cpu_config(CPU)
@ -2202,7 +2202,8 @@ class vmmDetails(vmmGObjectUI):
try:
new_text = new_text.strip()
vcpu_num = int(row[0])
pinlist = virtinst.Guest.cpuset_str_to_tuple(conn.vmm, new_text)
pinlist = virtinst.Guest.cpuset_str_to_tuple(
conn.get_backend(), new_text)
except Exception, e:
self.err.val_err(_("Error building pin list"), e)
return
@ -2223,7 +2224,8 @@ class vmmDetails(vmmGObjectUI):
if self.vm.vcpu_pinning() == cpuset:
return
pinlist = virtinst.Guest.cpuset_str_to_tuple(conn.vmm, cpuset)
pinlist = virtinst.Guest.cpuset_str_to_tuple(
conn.get_backend(), cpuset)
for row in vcpu_model:
vcpu_num = row[0]
self.vm.pin_vcpu(int(vcpu_num), pinlist)

View File

@ -407,7 +407,7 @@ class vmmDomain(vmmLibvirtObject):
return self._guest
def _build_guest(self, xml):
return virtinst.Guest(self.conn.vmm,
return virtinst.Guest(self.conn.get_backend(),
parsexml=xml,
caps=self.conn.get_capabilities())
@ -1289,13 +1289,14 @@ class vmmDomain(vmmLibvirtObject):
flags |= libvirt.VIR_MIGRATE_PEER2PEER
flags |= libvirt.VIR_MIGRATE_TUNNELLED
destconn = destconn.get_backend().libvirtconn
logging.debug("Migrating: conn=%s flags=%s dname=%s uri=%s rate=%s",
destconn.vmm, flags, newname, interface, rate)
destconn, flags, newname, interface, rate)
if meter:
start_job_progress_thread(self, meter, _("Migrating domain"))
self._backend.migrate(destconn.vmm, flags, newname, interface, rate)
self._backend.migrate(destconn, flags, newname, interface, rate)
def define_cb():
newxml = self.get_xml(inactive=True)

View File

@ -796,8 +796,10 @@ class vmmEngine(vmmGObject):
if vm.getjobinfo_supported:
_cancel_cb = (self._save_cancel, vm)
progWin = vmmAsyncJob(self._save_callback,
[vm, path],
def cb(asyncjob):
vm.save(path, meter=asyncjob.get_meter())
progWin = vmmAsyncJob(cb, [],
_("Saving Virtual Machine"),
_("Saving virtual machine memory to disk "),
src.topwin, cancel_cb=_cancel_cb)
@ -822,13 +824,6 @@ class vmmEngine(vmmGObject):
asyncjob.job_canceled = True
return
def _save_callback(self, asyncjob, vm, file_to_save):
conn = util.dup_conn(vm.conn)
newvm = conn.get_vm(vm.get_uuid())
meter = asyncjob.get_meter()
newvm.save(file_to_save, meter=meter)
def _do_restore_domain(self, src, uri):
conn = self._lookup_conn(uri)
if conn.is_remote():
@ -844,11 +839,7 @@ class vmmEngine(vmmGObject):
if not path:
return
def cb():
newconn = util.dup_conn(conn)
newconn.restore(path)
vmmAsyncJob.simple_async_noshow(cb, [], src,
vmmAsyncJob.simple_async_noshow(conn.restore, [path], src,
_("Error restoring domain"))
def _do_destroy_domain(self, src, uri, uuid):

View File

@ -984,7 +984,8 @@ class vmmHost(vmmGObjectUI):
namestr = None
try:
if path:
names = VirtualDisk.path_in_use_by(self.conn.vmm, path)
names = VirtualDisk.path_in_use_by(self.conn.get_backend(),
path)
namestr = ", ".join(names)
if not namestr:
namestr = None

View File

@ -531,10 +531,10 @@ class vmmMigrateDialog(vmmGObjectUI):
secure, max_downtime):
meter = asyncjob.get_meter()
srcconn = util.dup_conn(origvm.conn)
dstconn = util.dup_conn(origdconn)
srcconn = origvm.conn
dstconn = origdconn
vminst = srcconn.vmm.lookupByName(origvm.get_name())
vminst = srcconn.get_backend().lookupByName(origvm.get_name())
vm = vmmDomain(srcconn, vminst, vminst.UUID())
logging.debug("Migrating vm=%s from %s to %s", vm.get_name(),

View File

@ -59,7 +59,7 @@ class vmmNetwork(vmmLibvirtObject):
def _XMLDesc(self, flags):
return self.net.XMLDesc(flags)
def _define(self, xml):
return self.conn.vmm.networkDefineXML(xml)
return self.conn.get_backend().networkDefineXML(xml)
def set_active(self, state):
self.active = state

View File

@ -202,7 +202,8 @@ class LibvirtConsoleConnection(ConsoleConnection):
if not name:
raise RuntimeError(_("Cannot open a device with no alias name"))
self.stream = self.conn.vmm.newStream(libvirt.VIR_STREAM_NONBLOCK)
self.stream = self.conn.get_backend().newStream(
libvirt.VIR_STREAM_NONBLOCK)
self.vm.open_console(name, self.stream)

View File

@ -340,7 +340,7 @@ class vmmStorageBrowser(vmmGObjectUI):
try:
if path:
names = virtinst.VirtualDisk.path_in_use_by(
self.conn.vmm, path)
self.conn.get_backend(), path)
namestr = ", ".join(names)
if not namestr:
namestr = None

View File

@ -52,7 +52,7 @@ class vmmStoragePool(vmmLibvirtObject):
def _XMLDesc(self, flags):
return self.pool.XMLDesc(flags)
def _define(self, xml):
return self.conn.vmm.storagePoolDefineXML(xml, 0)
return self.conn.get_backend().storagePoolDefineXML(xml, 0)
def set_active(self, state):
@ -91,7 +91,7 @@ class vmmStoragePool(vmmLibvirtObject):
return self.pool.autostart()
def get_target_path(self):
return util.xpath(self.get_xml(), "/pool/target/path")
return util.xpath(self.get_xml(), "/pool/target/path") or ""
def get_allocation(self):
return long(util.xpath(self.get_xml(), "/pool/allocation"))

View File

@ -164,7 +164,7 @@ def populate_video_combo(vm, video_dev, no_default=None):
if v.model_type == "qxl"])
video_dev_model.clear()
tmpdev = virtinst.VirtualVideoDevice(vm.conn.vmm)
tmpdev = virtinst.VirtualVideoDevice(vm.conn.get_backend())
for m in tmpdev.model_types:
if not vm.rhel6_defaults():
if m == "qxl" and not has_spice and not has_qxl:
@ -641,7 +641,8 @@ def populate_network_list(net_list, conn, show_direct_interfaces=True):
bridge_name = name
brlabel = _("(Empty bridge)")
else:
if (show_direct_interfaces and virtinst.support.check_conn_support(conn.vmm,
if (show_direct_interfaces and
virtinst.support.check_conn_support(conn.get_backend(),
virtinst.support.SUPPORT_CONN_HV_DIRECT_INTERFACE)):
sensitive = True
nettype = VirtualNetworkInterface.TYPE_DIRECT
@ -713,7 +714,7 @@ def validate_network(parent, conn, nettype, devname, macaddr, model=None):
# Make sure VirtualNetwork is running
if (nettype == VirtualNetworkInterface.TYPE_VIRTUAL and
devname not in conn.vmm.listNetworks()):
devname not in conn.get_backend().listNetworks()):
res = err_dial.yes_no(_("Virtual Network is not active."),
_("Virtual Network '%s' is not active. "
@ -724,7 +725,7 @@ def validate_network(parent, conn, nettype, devname, macaddr, model=None):
# Try to start the network
try:
virnet = conn.vmm.networkLookupByName(devname)
virnet = conn.get_backend().networkLookupByName(devname)
virnet.create()
logging.info("Started network '%s'", devname)
except Exception, e:
@ -744,7 +745,7 @@ def validate_network(parent, conn, nettype, devname, macaddr, model=None):
elif nettype == VirtualNetworkInterface.TYPE_USER:
pass
net = VirtualNetworkInterface(conn=conn.vmm,
net = VirtualNetworkInterface(conn=conn.get_backend(),
type=nettype,
bridge=bridge,
network=netname,
@ -759,7 +760,7 @@ def validate_network(parent, conn, nettype, devname, macaddr, model=None):
return err_dial.val_err(_("Error with network parameters."), e)
# Make sure there is no mac address collision
isfatal, errmsg = net.is_conflict_net(conn.vmm)
isfatal, errmsg = net.is_conflict_net(conn.get_backend())
if isfatal:
return err_dial.val_err(_("Mac address collision."), errmsg)
elif errmsg is not None:
@ -775,8 +776,8 @@ def validate_network(parent, conn, nettype, devname, macaddr, model=None):
def generate_macaddr(conn):
newmac = ""
try:
net = VirtualNetworkInterface(conn=conn.vmm)
net.setup(conn.vmm)
net = VirtualNetworkInterface(conn=conn.get_backend())
net.setup(conn.get_backend())
newmac = net.macaddr
except:
pass
@ -983,7 +984,8 @@ def check_path_search_for_qemu(parent, conn, path):
user = util.running_config.default_qemu_user
skip_paths = util.running_config.get_perms_fix_ignore()
broken_paths = VirtualDisk.check_path_search_for_user(conn.vmm, path, user)
broken_paths = VirtualDisk.check_path_search_for_user(conn.get_backend(),
path, user)
for p in broken_paths:
if p in skip_paths:
broken_paths.remove(p)
@ -1005,7 +1007,8 @@ def check_path_search_for_qemu(parent, conn, path):
return
logging.debug("Attempting to correct permission issues.")
errors = VirtualDisk.fix_path_search_for_user(conn.vmm, path, user)
errors = VirtualDisk.fix_path_search_for_user(conn.get_backend(),
path, user)
if not errors:
return

View File

@ -23,7 +23,6 @@ import libvirt
import logging
import os.path
import virtManager
import virtinst
running_config = None
@ -46,7 +45,7 @@ def build_default_pool(vmmconn):
"""
Helper to build the 'default' storage pool
"""
conn = vmmconn.vmm
conn = vmmconn.get_backend()
path = get_default_pool_path(vmmconn)
name = get_default_pool_name(vmmconn)
@ -255,44 +254,6 @@ def browse_local(parent, dialog_name, conn, start_folder=None,
return ret
def dup_lib_conn(libconn):
conn = _dup_all_conn(None, libconn)
if isinstance(conn, virtManager.connection.vmmConnection):
return conn.vmm
return conn
def dup_conn(conn):
return _dup_all_conn(conn, None)
def _dup_all_conn(conn, libconn):
if libconn:
uri = libconn.getURI()
is_test = uri.startswith("test")
vmm = libconn
else:
is_test = conn.is_test_conn()
uri = conn.get_uri()
vmm = conn.vmm
if is_test:
# Skip duplicating a test conn, since it doesn't maintain state
# between instances
return conn or vmm
if running_config.support_threading:
# Libvirt 0.6.0 implemented client side request threading: this
# removes the need to actually duplicate the connection.
return conn or vmm
logging.debug("Duplicating connection for async operation.")
newconn = virtManager.connection.vmmConnection(uri)
newconn.open(sync=True)
return newconn
def pretty_hv(gtype, domtype):
"""
Convert XML <domain type='foo'> and <os><type>bar</type>

View File

@ -712,8 +712,7 @@ def guest_lookup(conn, caps=None, os_type=None, arch=None, typ=None,
This function throws C{ValueError}s if any of the requested values are
not found.
@param conn: virConnect instance
@type conn: libvirt.virConnect
@param conn: libvirt connection
@param caps: Optional L{Capabilities} instance (saves a lookup)
@type caps: L{Capabilities}
@param typ: Virtualization type ('hvm', 'xen', ...)

View File

@ -110,8 +110,6 @@ class Interface(object):
def _get_conn(self):
return self._conn
def _set_conn(self, val):
if not isinstance(val, libvirt.virConnect):
raise ValueError(_("'conn' must be a libvirt connection object."))
if not support.check_conn_support(val, support.SUPPORT_CONN_INTERFACE):
raise ValueError(_("Passed connection is not libvirt interface "
"capable"))

View File

@ -401,7 +401,6 @@ def is_nodedev_capable(conn):
Check if the passed libvirt connection supports host device routines
@param conn: Connection to check
@type conn: libvirt.virConnect
@rtype: C{bool}
"""
@ -413,7 +412,6 @@ def is_pci_detach_capable(conn):
Check if the passed libvirt connection support pci device Detach/Reset
@param conn: Connection to check
@type conn: libvirt.virConnect
@rtype: C{bool}
"""

View File

@ -148,8 +148,6 @@ class StorageObject(object):
def get_conn(self):
return self._conn
def set_conn(self, val):
if not isinstance(val, libvirt.virConnect):
raise ValueError(_("'conn' must be a libvirt connection object."))
if not util.is_storage_capable(val):
raise ValueError(_("Passed connection is not libvirt storage "
"capable"))
@ -987,7 +985,7 @@ class StorageVolume(StorageObject):
"""
@param name: Name for the new storage volume
@param capacity: Total size of the new volume (in bytes)
@param conn: optional virConnect instance to lookup pool_name on
@param conn: optional connection instance to lookup pool_name on
@param pool_name: optional pool_name to install on
@param pool: virStoragePool object to install on
@param allocation: amount of storage to actually allocate (default 0)

View File

@ -76,7 +76,6 @@ class VirtualDevice(XMLBuilderDomain):
Initialize device state
@param conn: libvirt connection to validate device against
@type conn: virConnect
"""
XMLBuilderDomain.__init__(self, conn, parsexml, parsexmlnode,
caps=caps)

View File

@ -22,7 +22,6 @@
import copy
import threading
import libvirt
import libxml2
from virtinst import CapabilitiesParser
@ -397,7 +396,7 @@ class XMLBuilderDomain(object):
Initialize state
@param conn: libvirt connection to validate device against
@type conn: virConnect
@type conn: VirtualConnection
@param parsexml: Optional XML string to parse
@type parsexml: C{str}
@param parsexmlnode: Option xpathNode to use
@ -453,8 +452,6 @@ class XMLBuilderDomain(object):
def get_conn(self):
return self._conn
def set_conn(self, val):
if not isinstance(val, libvirt.virConnect):
raise ValueError(_("'conn' must be a virConnect instance."))
self._conn = val
self._conn_uri = self._conn.getURI()
self.__remote = uriutil.is_uri_remote(self._conn_uri, conn=self._conn)

View File

@ -49,3 +49,4 @@ from virtinst.CloneManager import Cloner
from virtinst.Clock import Clock
from virtinst.CPU import CPU, CPUFeature
from virtinst.Seclabel import Seclabel
from virtinst.connection import VirtualConnection

View File

@ -25,10 +25,8 @@ import logging
import logging.handlers
import optparse
import os
import re
import shlex
import sys
import tempfile
import traceback
import libvirt
@ -236,142 +234,15 @@ def setupLogging(appname, debug_stdout, do_quiet, cli_app=True):
# Libvirt connection helpers #
#######################################
_virtinst_uri_magic = "__virtinst_test__"
def is_virtinst_test_uri(uri):
return uri and uri.startswith(_virtinst_uri_magic)
def open_test_uri(uri):
"""
This hack allows us to fake various drivers via passing a magic
URI string to virt-*. Helps with testing
"""
uri = uri.replace(_virtinst_uri_magic, "")
ret = uri.split(",", 1)
uri = ret[0]
opts = parse_optstr(len(ret) > 1 and ret[1] or "")
conn = open_connection(uri)
def sanitize_xml(xml):
import difflib
orig = xml
xml = re.sub("arch='.*'", "arch='i686'", xml)
xml = re.sub("domain type='.*'", "domain type='test'", xml)
xml = re.sub("machine type='.*'", "", xml)
xml = re.sub(">exe<", ">hvm<", xml)
logging.debug("virtinst test sanitizing diff\n:%s",
"\n".join(difflib.unified_diff(orig.split("\n"),
xml.split("\n"))))
return xml
# Need tmpfile names to be deterministic
if "predictable" in opts:
setattr(conn, "_virtinst__fake_conn_predictable", True)
def fakemkstemp(prefix, *args, **kwargs):
ignore = args
ignore = kwargs
filename = os.path.join(".", prefix)
return os.open(filename, os.O_RDWR | os.O_CREAT), filename
tempfile.mkstemp = fakemkstemp
# Fake remote status
if "remote" in opts:
setattr(conn, "_virtinst__fake_conn_remote", True)
# Fake capabilities
if "caps" in opts:
capsxml = file(opts["caps"]).read()
conn.getCapabilities = lambda: capsxml
if ("qemu" in opts) or ("xen" in opts) or ("lxc" in opts):
conn.getVersion = lambda: 10000000000
origcreate = conn.createLinux
origdefine = conn.defineXML
def newcreate(xml, flags):
xml = sanitize_xml(xml)
return origcreate(xml, flags)
def newdefine(xml):
xml = sanitize_xml(xml)
return origdefine(xml)
conn.createLinux = newcreate
conn.defineXML = newdefine
if "qemu" in opts:
conn.getURI = lambda: "qemu+abc:///system"
if "xen" in opts:
conn.getURI = lambda: "xen+abc:///"
if "lxc" in opts:
conn.getURI = lambda: "lxc+abc:///"
# These need to come after the HV setter, since that sets a default
# conn version
if "connver" in opts:
ver = int(opts["connver"])
def newconnversion():
return ver
conn.getVersion = newconnversion
if "libver" in opts:
ver = int(opts["libver"])
def newlibversion(drv=None):
if drv:
return (ver, ver)
return ver
libvirt.getVersion = newlibversion
setattr(conn, "_virtinst__fake_conn", True)
return conn
def getConnection(uri):
# Hack to facilitate virtinst unit testing
if is_virtinst_test_uri(uri):
return open_test_uri(uri)
logging.debug("Requesting libvirt URI %s", (uri or "default"))
conn = open_connection(uri)
conn = virtinst.VirtualConnection(uri)
conn.open(_do_creds_authname)
logging.debug("Received libvirt URI %s", conn.getURI())
return conn
def open_connection(uri):
open_flags = 0
valid_auth_options = [libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_EXTERNAL]
authcb = do_creds
authcb_data = None
return libvirt.openAuth(uri, [valid_auth_options, authcb, authcb_data],
open_flags)
def do_creds(creds, cbdata):
try:
return _do_creds(creds, cbdata)
except:
logging.debug("Error in creds callback.", exc_info=True)
raise
def _do_creds(creds, cbdata_ignore):
for cred in creds:
if cred[0] == libvirt.VIR_CRED_EXTERNAL:
print_stderr("Don't know to do handle external cred %s" % cred[2])
return -1
return _do_creds_authname(creds)
# SASL username/pass auth
def _do_creds_authname(creds):
retindex = 4
@ -387,11 +258,10 @@ def _do_creds_authname(creds):
import getpass
res = getpass.getpass(prompt)
else:
print_stderr("Unknown auth type in creds callback: %d" % credtype)
return -1
raise RuntimeError("Unknown auth type in creds callback: %d" %
credtype)
cred[retindex] = res
return 0

193
virtinst/connection.py Normal file
View File

@ -0,0 +1,193 @@
#
# Copyright 2013 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import logging
import os
import re
import libvirt
from virtinst.cli import parse_optstr
_virtinst_uri_magic = "__virtinst_test__"
class VirtualConnection(object):
"""
Wrapper for libvirt connection that provides various bits like
- caching static data
- lookup for API feature support
- simplified API wrappers that handle new and old ways of doing things
"""
def __init__(self, uri):
self._uri = uri
self._libvirtconn = None
self.is_virtinst_test_uri = uri and uri.startswith(_virtinst_uri_magic)
# Just proxy virConnect access for now
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
libvirtconn = self.__dict__.get("_libvirtconn")
return getattr(libvirtconn, attr)
##############
# Properties #
##############
uri = property(lambda self: getattr(self, "_uri"))
libvirtconn = property(lambda self: getattr(self, "_libvirtconn"))
##############
# Public API #
##############
def close(self):
self._libvirtconn = None
def is_open(self):
return bool(self._libvirtconn)
def open(self, passwordcb):
open_flags = 0
valid_auth_options = [libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_PASSPHRASE]
authcb = self._auth_cb
authcb_data = passwordcb
testopts = []
uri = self.uri
if self.is_virtinst_test_uri:
uri = uri.replace(_virtinst_uri_magic, "")
ret = uri.split(",", 1)
uri = ret[0]
testopts = parse_optstr(len(ret) > 1 and ret[1] or "")
conn = libvirt.openAuth(uri,
[valid_auth_options, authcb,
(authcb_data, valid_auth_options)],
open_flags)
if testopts:
self._fixup_virtinst_test_uri(conn, testopts)
self._libvirtconn = conn
###################
# Private helpers #
###################
def _auth_cb(self, creds, (passwordcb, passwordcreds)):
for cred in creds:
if cred[0] not in passwordcreds:
raise RuntimeError("Unknown cred type '%s', expected only "
"%s" % (cred[0], passwordcreds))
return passwordcb(creds)
def _fixup_virtinst_test_uri(self, conn, opts):
"""
This hack allows us to fake various drivers via passing a magic
URI string to virt-*. Helps with testing
"""
def sanitize_xml(xml):
import difflib
orig = xml
xml = re.sub("arch='.*'", "arch='i686'", xml)
xml = re.sub("domain type='.*'", "domain type='test'", xml)
xml = re.sub("machine type='.*'", "", xml)
xml = re.sub(">exe<", ">hvm<", xml)
logging.debug("virtinst test sanitizing diff\n:%s",
"\n".join(difflib.unified_diff(orig.split("\n"),
xml.split("\n"))))
return xml
# Need tmpfile names to be deterministic
if "predictable" in opts:
opts.pop("predictable")
import tempfile
setattr(self, "_virtinst__fake_conn_predictable", True)
def fakemkstemp(prefix, *args, **kwargs):
ignore = args
ignore = kwargs
filename = os.path.join(".", prefix)
return os.open(filename, os.O_RDWR | os.O_CREAT), filename
tempfile.mkstemp = fakemkstemp
# Fake remote status
if "remote" in opts:
opts.pop("remote")
setattr(self, "_virtinst__fake_conn_remote", True)
# Fake capabilities
if "caps" in opts:
capsxml = file(opts.pop("caps")).read()
conn.getCapabilities = lambda: capsxml
if ("qemu" in opts) or ("xen" in opts) or ("lxc" in opts):
conn.getVersion = lambda: 10000000000
origcreate = conn.createLinux
origdefine = conn.defineXML
def newcreate(xml, flags):
xml = sanitize_xml(xml)
return origcreate(xml, flags)
def newdefine(xml):
xml = sanitize_xml(xml)
return origdefine(xml)
conn.createLinux = newcreate
conn.defineXML = newdefine
if "qemu" in opts:
opts.pop("qemu")
conn.getURI = lambda: "qemu+abc:///system"
if "xen" in opts:
opts.pop("xen")
conn.getURI = lambda: "xen+abc:///"
if "lxc" in opts:
opts.pop("lxc")
conn.getURI = lambda: "lxc+abc:///"
# These need to come after the HV setter, since that sets a default
# conn version
if "connver" in opts:
ver = int(opts.pop("connver"))
def newconnversion():
return ver
conn.getVersion = newconnversion
if "libver" in opts:
ver = int(opts.pop("libver"))
def newlibversion(drv=None):
if drv:
return (ver, ver)
return ver
libvirt.getVersion = newlibversion
if opts:
raise RuntimeError("Unhandled virtinst test uri options %s" % opts)
setattr(self, "_virtinst__fake_conn", True)

View File

@ -428,7 +428,6 @@ def _check_support(conn, feature, data=None):
the passed connection.
@param conn: Libvirt connection to check feature on
@type conn: virConnect
@param feature: Feature type to check support for
@type feature: One of the SUPPORT_* flags
@param data: Option libvirt object to use in feature checking
@ -437,12 +436,15 @@ def _check_support(conn, feature, data=None):
@returns: True if feature is supported, False otherwise
"""
# Temporary hack to make this work
if "VirtualConnection" in repr(conn):
conn = getattr(conn, "_libvirtconn")
if "VirtualConnection" in repr(data):
data = getattr(data, "_libvirtconn")
support_info = _support_dict[feature]
key_list = support_info.keys()
if not isinstance(conn, libvirt.virConnect):
raise ValueError(_("'conn' must be a virConnect instance."))
def get_value(key):
if key in key_list:
key_list.remove(key)
@ -502,8 +504,9 @@ def _check_support(conn, feature, data=None):
if object_name:
classobj = _get_command(object_name)
if not isinstance(data, classobj):
raise ValueError("Passed obj with args must be of type " +
str(classobj))
raise ValueError(
"Passed obj %s with args must be of type %s, was %s" %
(data, str(classobj), type(data)))
cmd = _get_command(function_name, obj=data)
@ -604,10 +607,6 @@ def support_threading():
return bool(_local_lib_ver() >= 6000)
def support_openauth():
return bool(_local_lib_ver() >= 4000)
def check_conn_support(conn, feature):
return _check_support(conn, feature, conn)