2013-03-18 05:06:52 +08:00
|
|
|
#
|
2015-03-31 23:59:06 +08:00
|
|
|
# Copyright 2008, 2013, 2015 Red Hat, Inc.
|
2013-03-18 05:06:52 +08:00
|
|
|
# Cole Robinson <crobinso@redhat.com>
|
|
|
|
#
|
2018-04-04 21:35:41 +08:00
|
|
|
# This work is licensed under the GNU GPLv2 or later.
|
2018-03-21 03:00:02 +08:00
|
|
|
# See the COPYING file in the top-level directory.
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
import os
|
|
|
|
import logging
|
2018-02-23 08:12:23 +08:00
|
|
|
import threading
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
import libvirt
|
|
|
|
|
2014-09-13 03:59:22 +08:00
|
|
|
from .xmlbuilder import XMLBuilder, XMLChildProperty, XMLProperty
|
|
|
|
from . import util
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
|
2015-05-03 07:54:14 +08:00
|
|
|
_DEFAULT_DEV_TARGET = "/dev"
|
|
|
|
_DEFAULT_LVM_TARGET_BASE = "/dev/"
|
|
|
|
_DEFAULT_SCSI_TARGET = "/dev/disk/by-path"
|
|
|
|
_DEFAULT_MPATH_TARGET = "/dev/mapper"
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
class _StoragePermissions(XMLBuilder):
|
2018-03-21 22:53:34 +08:00
|
|
|
XML_NAME = "permissions"
|
2013-09-20 08:18:12 +08:00
|
|
|
_XML_PROP_ORDER = ["mode", "owner", "group", "label"]
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
mode = XMLProperty("./mode")
|
|
|
|
owner = XMLProperty("./owner")
|
|
|
|
group = XMLProperty("./group")
|
|
|
|
label = XMLProperty("./label")
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
class _StorageObject(XMLBuilder):
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
|
|
|
Base class for building any libvirt storage object.
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
Meaningless to directly instantiate.
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
######################
|
|
|
|
# Validation helpers #
|
|
|
|
######################
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
def _check_name_collision(self, name):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
def _validate_name(self, name):
|
|
|
|
if name == self.name:
|
|
|
|
return
|
|
|
|
util.validate_name(_("Storage object"), name)
|
|
|
|
self._check_name_collision(name)
|
|
|
|
return name
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
##############
|
|
|
|
# Properties #
|
|
|
|
##############
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2018-02-23 09:44:09 +08:00
|
|
|
name = XMLProperty("./name", validate_cb=_validate_name)
|
2013-09-20 08:18:12 +08:00
|
|
|
permissions = XMLChildProperty(_StoragePermissions,
|
|
|
|
relative_xpath="./target",
|
|
|
|
is_single=True)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
|
2014-02-06 08:09:26 +08:00
|
|
|
def _get_default_pool_path(conn):
|
|
|
|
path = "/var/lib/libvirt/images"
|
|
|
|
if conn.is_session_uri():
|
2015-08-11 00:35:13 +08:00
|
|
|
path = os.path.expanduser("~/.local/share/libvirt/images")
|
2014-02-06 08:09:26 +08:00
|
|
|
return path
|
|
|
|
|
|
|
|
|
2018-03-20 23:56:12 +08:00
|
|
|
class _EnumerateSource(XMLBuilder):
|
|
|
|
XML_NAME = "source"
|
|
|
|
|
|
|
|
|
|
|
|
class _EnumerateSources(XMLBuilder):
|
|
|
|
XML_NAME = "sources"
|
|
|
|
sources = XMLChildProperty(_EnumerateSource)
|
|
|
|
|
|
|
|
|
2014-12-10 05:05:02 +08:00
|
|
|
class _Host(XMLBuilder):
|
|
|
|
_XML_PROP_ORDER = ["name", "port"]
|
2018-03-21 22:53:34 +08:00
|
|
|
XML_NAME = "host"
|
2014-12-10 05:05:02 +08:00
|
|
|
|
|
|
|
name = XMLProperty("./@name")
|
|
|
|
port = XMLProperty("./@port", is_int=True)
|
|
|
|
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
class StoragePool(_StorageObject):
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
|
|
|
Base class for building and installing libvirt storage pool xml
|
|
|
|
"""
|
2013-04-12 20:26:21 +08:00
|
|
|
# @group Types: TYPE_*
|
2013-03-18 05:06:52 +08:00
|
|
|
TYPE_DIR = "dir"
|
|
|
|
TYPE_FS = "fs"
|
|
|
|
TYPE_NETFS = "netfs"
|
|
|
|
TYPE_LOGICAL = "logical"
|
|
|
|
TYPE_DISK = "disk"
|
|
|
|
TYPE_ISCSI = "iscsi"
|
|
|
|
TYPE_SCSI = "scsi"
|
|
|
|
TYPE_MPATH = "mpath"
|
2014-02-12 17:39:50 +08:00
|
|
|
TYPE_GLUSTER = "gluster"
|
2014-12-10 05:05:02 +08:00
|
|
|
TYPE_RBD = "rbd"
|
|
|
|
TYPE_SHEEPDOG = "sheepdog"
|
2016-08-09 23:48:34 +08:00
|
|
|
TYPE_ZFS = "zfs"
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
# Pool type descriptions for use in higher level programs
|
2013-09-20 08:18:12 +08:00
|
|
|
_descs = {}
|
|
|
|
_descs[TYPE_DIR] = _("Filesystem Directory")
|
|
|
|
_descs[TYPE_FS] = _("Pre-Formatted Block Device")
|
|
|
|
_descs[TYPE_NETFS] = _("Network Exported Directory")
|
|
|
|
_descs[TYPE_LOGICAL] = _("LVM Volume Group")
|
|
|
|
_descs[TYPE_DISK] = _("Physical Disk Device")
|
|
|
|
_descs[TYPE_ISCSI] = _("iSCSI Target")
|
|
|
|
_descs[TYPE_SCSI] = _("SCSI Host Adapter")
|
|
|
|
_descs[TYPE_MPATH] = _("Multipath Device Enumerator")
|
2014-02-12 17:39:50 +08:00
|
|
|
_descs[TYPE_GLUSTER] = _("Gluster Filesystem")
|
2014-12-10 05:05:02 +08:00
|
|
|
_descs[TYPE_RBD] = _("RADOS Block Device/Ceph")
|
|
|
|
_descs[TYPE_SHEEPDOG] = _("Sheepdog Filesystem")
|
2016-08-09 23:48:34 +08:00
|
|
|
_descs[TYPE_ZFS] = _("ZFS Pool")
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_pool_types():
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
2013-09-20 08:18:12 +08:00
|
|
|
Return list of appropriate pool types
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
2017-10-11 19:35:46 +08:00
|
|
|
return list(StoragePool._descs.keys())
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
@staticmethod
|
2013-03-18 05:06:52 +08:00
|
|
|
def get_pool_type_desc(pool_type):
|
2013-09-20 08:18:12 +08:00
|
|
|
"""
|
|
|
|
Return human readable description for passed pool type
|
|
|
|
"""
|
|
|
|
return StoragePool._descs.get(pool_type, "%s pool" % pool_type)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
@staticmethod
|
|
|
|
def pool_list_from_sources(conn, pool_type, host=None):
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
|
|
|
Return a list of StoragePool instances built from libvirt's pool
|
|
|
|
source enumeration (if supported).
|
|
|
|
|
2018-02-14 20:17:31 +08:00
|
|
|
:param conn: Libvirt connection
|
|
|
|
:param name: Name for the new pool
|
|
|
|
:param pool_type: Pool type string from I{Types}
|
|
|
|
:param host: Option host string to poll for sources
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
|
|
|
if host:
|
|
|
|
source_xml = "<source><host name='%s'/></source>" % host
|
|
|
|
else:
|
|
|
|
source_xml = "<source/>"
|
|
|
|
|
|
|
|
try:
|
|
|
|
xml = conn.findStoragePoolSources(pool_type, source_xml, 0)
|
2017-05-06 00:47:21 +08:00
|
|
|
except libvirt.libvirtError as e:
|
2013-07-06 23:20:28 +08:00
|
|
|
if util.is_error_nosupport(e):
|
2013-03-18 05:06:52 +08:00
|
|
|
return []
|
|
|
|
raise
|
|
|
|
|
2015-04-04 04:22:25 +08:00
|
|
|
ret = []
|
|
|
|
sources = _EnumerateSources(conn, xml)
|
|
|
|
for source in sources.sources:
|
2018-09-01 04:52:02 +08:00
|
|
|
source_xml = source.get_xml()
|
2015-04-04 04:22:25 +08:00
|
|
|
|
2016-08-25 03:55:08 +08:00
|
|
|
pool_xml = "<pool>\n%s\n</pool>" % (
|
2018-03-20 22:35:22 +08:00
|
|
|
util.xml_indent(source_xml, 2))
|
2015-04-04 04:22:25 +08:00
|
|
|
parseobj = StoragePool(conn, parsexml=pool_xml)
|
|
|
|
parseobj.type = pool_type
|
|
|
|
|
|
|
|
obj = StoragePool(conn)
|
|
|
|
obj.type = pool_type
|
|
|
|
obj.source_path = parseobj.source_path
|
2017-06-15 20:18:26 +08:00
|
|
|
for h in parseobj.hosts:
|
2018-02-08 06:27:56 +08:00
|
|
|
parseobj.remove_child(h)
|
|
|
|
obj.add_child(h)
|
2015-04-04 04:22:25 +08:00
|
|
|
obj.source_name = parseobj.source_name
|
|
|
|
obj.format = parseobj.format
|
|
|
|
|
|
|
|
ret.append(obj)
|
|
|
|
return ret
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def build_default_pool(conn):
|
|
|
|
"""
|
|
|
|
Helper to build the 'default' storage pool
|
|
|
|
"""
|
2013-10-06 22:08:04 +08:00
|
|
|
if not conn.check_support(conn.SUPPORT_CONN_STORAGE):
|
2013-09-20 08:18:12 +08:00
|
|
|
return
|
|
|
|
|
|
|
|
pool = None
|
|
|
|
name = "default"
|
2014-02-06 08:09:26 +08:00
|
|
|
path = _get_default_pool_path(conn)
|
2015-05-03 07:09:14 +08:00
|
|
|
if conn.is_session_uri() and not os.path.exists(path):
|
|
|
|
os.makedirs(path)
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
try:
|
|
|
|
pool = conn.storagePoolLookupByName(name)
|
|
|
|
except libvirt.libvirtError:
|
2015-08-13 06:00:14 +08:00
|
|
|
# Try default pool path when "default" name fails
|
|
|
|
pool = StoragePool.lookup_pool_by_path(conn, path)
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
if pool:
|
2015-08-13 06:00:14 +08:00
|
|
|
# This is a libvirt pool object so create a StoragePool from it
|
|
|
|
return StoragePool(conn, parsexml=pool.XMLDesc(0))
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
try:
|
|
|
|
logging.debug("Attempting to build default pool with target '%s'",
|
|
|
|
path)
|
|
|
|
defpool = StoragePool(conn)
|
|
|
|
defpool.type = defpool.TYPE_DIR
|
|
|
|
defpool.name = name
|
|
|
|
defpool.target_path = path
|
2014-02-06 08:09:26 +08:00
|
|
|
defpool.install(build=True, create=True, autostart=True)
|
|
|
|
return defpool
|
2017-05-06 00:47:21 +08:00
|
|
|
except Exception as e:
|
2013-09-20 08:18:12 +08:00
|
|
|
raise RuntimeError(
|
|
|
|
_("Couldn't create default storage pool '%s': %s") %
|
|
|
|
(path, str(e)))
|
|
|
|
|
2016-06-17 08:36:30 +08:00
|
|
|
@staticmethod
|
|
|
|
def manage_path(conn, path):
|
|
|
|
"""
|
|
|
|
If the passed path is managed, lookup its storage objects.
|
|
|
|
If the passed path isn't managed, attempt to manage it if
|
|
|
|
we can.
|
|
|
|
|
|
|
|
:returns: (vol, parent pool) tuple
|
|
|
|
"""
|
|
|
|
from . import diskbackend
|
|
|
|
return diskbackend.manage_path(conn, path)
|
2014-02-06 08:09:26 +08:00
|
|
|
|
|
|
|
@staticmethod
|
2015-05-03 07:33:16 +08:00
|
|
|
def get_default_dir(conn, build=False):
|
2014-02-06 08:09:26 +08:00
|
|
|
"""
|
2015-05-03 07:33:16 +08:00
|
|
|
Return the default storage dir. If there's a 'default' pool,
|
|
|
|
report that. If there's no default pool, return the dir we would
|
2014-02-06 08:09:26 +08:00
|
|
|
use for the default.
|
|
|
|
"""
|
|
|
|
path = _get_default_pool_path(conn)
|
2015-05-03 07:33:16 +08:00
|
|
|
if (not conn.is_remote() and
|
|
|
|
not conn.check_support(conn.SUPPORT_CONN_STORAGE)):
|
|
|
|
if build and not os.path.exists(path):
|
|
|
|
os.makedirs(path)
|
2014-02-06 08:09:26 +08:00
|
|
|
return path
|
|
|
|
|
|
|
|
try:
|
2015-05-03 07:33:16 +08:00
|
|
|
for pool in conn.fetch_all_pools():
|
|
|
|
if pool.name == "default":
|
|
|
|
return pool.target_path
|
2017-07-24 16:26:48 +08:00
|
|
|
except Exception:
|
2014-02-06 08:09:26 +08:00
|
|
|
pass
|
|
|
|
|
2014-06-01 04:30:00 +08:00
|
|
|
if build:
|
|
|
|
return StoragePool.build_default_pool(conn).target_path
|
|
|
|
return _get_default_pool_path(conn)
|
2014-02-06 08:09:26 +08:00
|
|
|
|
|
|
|
|
2013-09-23 05:34:53 +08:00
|
|
|
@staticmethod
|
2014-12-05 08:52:32 +08:00
|
|
|
def lookup_pool_by_path(conn, path):
|
2013-09-23 05:34:53 +08:00
|
|
|
"""
|
|
|
|
Return the first pool with matching matching target path.
|
|
|
|
return the first we find, active or inactive. This iterates over
|
|
|
|
all pools and dumps their xml, so it is NOT quick.
|
2014-09-13 01:04:22 +08:00
|
|
|
|
2018-02-14 20:17:31 +08:00
|
|
|
:returns: virStoragePool object if found, None otherwise
|
2013-09-23 05:34:53 +08:00
|
|
|
"""
|
2013-10-06 22:08:04 +08:00
|
|
|
if not conn.check_support(conn.SUPPORT_CONN_STORAGE):
|
2013-09-23 05:34:53 +08:00
|
|
|
return None
|
|
|
|
|
|
|
|
for pool in conn.fetch_all_pools():
|
2014-12-05 08:52:32 +08:00
|
|
|
xml_path = pool.target_path
|
|
|
|
if xml_path is not None and os.path.abspath(xml_path) == path:
|
2013-09-29 08:05:13 +08:00
|
|
|
return conn.storagePoolLookupByName(pool.name)
|
2013-09-23 05:34:53 +08:00
|
|
|
return None
|
|
|
|
|
2014-02-09 05:36:45 +08:00
|
|
|
@staticmethod
|
|
|
|
def find_free_name(conn, basename, **kwargs):
|
|
|
|
"""
|
|
|
|
Finds a name similar (or equal) to passed 'basename' that is not
|
|
|
|
in use by another pool. Extra params are passed to generate_name
|
|
|
|
"""
|
2017-07-19 07:09:58 +08:00
|
|
|
def cb(name):
|
|
|
|
for pool in conn.fetch_all_pools():
|
|
|
|
if pool.name == name:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
kwargs["lib_collision"] = False
|
|
|
|
return util.generate_name(basename, cb, **kwargs)
|
2013-09-23 05:34:53 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
######################
|
|
|
|
# Validation helpers #
|
|
|
|
######################
|
|
|
|
|
2013-03-18 05:06:52 +08:00
|
|
|
def _check_name_collision(self, name):
|
|
|
|
pool = None
|
|
|
|
try:
|
|
|
|
pool = self.conn.storagePoolLookupByName(name)
|
|
|
|
except libvirt.libvirtError:
|
|
|
|
pass
|
|
|
|
if pool:
|
|
|
|
raise ValueError(_("Name '%s' already in use by another pool." %
|
|
|
|
name))
|
|
|
|
|
2018-09-03 02:47:34 +08:00
|
|
|
def default_target_path(self):
|
2014-02-12 17:29:22 +08:00
|
|
|
if not self.supports_property("target_path"):
|
|
|
|
return None
|
2013-09-20 08:18:12 +08:00
|
|
|
if (self.type == self.TYPE_DIR or
|
|
|
|
self.type == self.TYPE_NETFS or
|
|
|
|
self.type == self.TYPE_FS):
|
2015-05-03 07:54:14 +08:00
|
|
|
return os.path.join(self.get_default_dir(self.conn), self.name)
|
2013-09-20 08:18:12 +08:00
|
|
|
if self.type == self.TYPE_LOGICAL:
|
|
|
|
name = self.name
|
|
|
|
if self.source_name:
|
|
|
|
name = self.source_name
|
2015-05-03 07:54:14 +08:00
|
|
|
return _DEFAULT_LVM_TARGET_BASE + name
|
2013-09-20 08:18:12 +08:00
|
|
|
if self.type == self.TYPE_DISK:
|
2015-05-03 07:54:14 +08:00
|
|
|
return _DEFAULT_DEV_TARGET
|
2013-09-20 08:18:12 +08:00
|
|
|
if self.type == self.TYPE_ISCSI or self.type == self.TYPE_SCSI:
|
2015-05-03 07:54:14 +08:00
|
|
|
return _DEFAULT_SCSI_TARGET
|
2013-09-20 08:18:12 +08:00
|
|
|
if self.type == self.TYPE_MPATH:
|
2015-05-03 07:54:14 +08:00
|
|
|
return _DEFAULT_MPATH_TARGET
|
2013-09-20 08:18:12 +08:00
|
|
|
raise RuntimeError("No default target_path for type=%s" % self.type)
|
|
|
|
|
2015-05-07 01:54:00 +08:00
|
|
|
def _type_to_source_prop(self):
|
2014-12-10 05:05:02 +08:00
|
|
|
if (self.type == self.TYPE_NETFS or
|
|
|
|
self.type == self.TYPE_GLUSTER):
|
2015-05-07 01:54:00 +08:00
|
|
|
return "_source_dir"
|
|
|
|
elif self.type == self.TYPE_SCSI:
|
|
|
|
return "_source_adapter"
|
|
|
|
else:
|
|
|
|
return "_source_device"
|
|
|
|
|
|
|
|
def _get_source(self):
|
|
|
|
return getattr(self, self._type_to_source_prop())
|
|
|
|
def _set_source(self, val):
|
|
|
|
return setattr(self, self._type_to_source_prop(), val)
|
|
|
|
source_path = property(_get_source, _set_source)
|
2013-09-20 08:18:12 +08:00
|
|
|
|
2018-09-03 02:47:34 +08:00
|
|
|
def default_source_name(self):
|
2017-08-04 00:38:37 +08:00
|
|
|
srcname = None
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2017-08-04 00:38:37 +08:00
|
|
|
if not self.supports_property("source_name"):
|
|
|
|
srcname = None
|
|
|
|
elif self.type == StoragePool.TYPE_NETFS:
|
2014-12-10 05:05:02 +08:00
|
|
|
srcname = self.name
|
|
|
|
elif self.type == StoragePool.TYPE_RBD:
|
|
|
|
srcname = "rbd"
|
|
|
|
elif self.type == StoragePool.TYPE_GLUSTER:
|
|
|
|
srcname = "gv0"
|
|
|
|
elif ("target_path" in self._propstore and
|
2017-09-20 15:36:27 +08:00
|
|
|
self.target_path and
|
|
|
|
self.target_path.startswith(_DEFAULT_LVM_TARGET_BASE)):
|
2013-09-20 08:18:12 +08:00
|
|
|
# If there is a target path, parse it for an expected VG
|
|
|
|
# location, and pull the name from there
|
2015-05-03 07:54:14 +08:00
|
|
|
vg = self.target_path[len(_DEFAULT_LVM_TARGET_BASE):]
|
2013-09-20 08:18:12 +08:00
|
|
|
srcname = vg.split("/", 1)[0]
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
return srcname
|
|
|
|
|
|
|
|
|
|
|
|
##############
|
|
|
|
# Properties #
|
|
|
|
##############
|
|
|
|
|
2018-03-21 22:53:34 +08:00
|
|
|
XML_NAME = "pool"
|
2013-09-20 08:18:12 +08:00
|
|
|
_XML_PROP_ORDER = ["name", "type", "uuid",
|
|
|
|
"capacity", "allocation", "available",
|
2014-12-10 05:05:02 +08:00
|
|
|
"format", "hosts",
|
2015-05-07 01:54:00 +08:00
|
|
|
"_source_dir", "_source_adapter", "_source_device",
|
|
|
|
"source_name", "target_path",
|
2018-01-26 18:27:20 +08:00
|
|
|
"permissions",
|
|
|
|
"auth_type", "auth_username", "auth_secret_uuid"]
|
2013-09-20 08:18:12 +08:00
|
|
|
|
2015-05-07 01:54:00 +08:00
|
|
|
|
|
|
|
_source_dir = XMLProperty("./source/dir/@path")
|
|
|
|
_source_adapter = XMLProperty("./source/adapter/@name")
|
|
|
|
_source_device = XMLProperty("./source/device/@path")
|
|
|
|
|
2018-02-23 09:44:09 +08:00
|
|
|
type = XMLProperty("./@type")
|
2017-12-15 01:12:35 +08:00
|
|
|
uuid = XMLProperty("./uuid")
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
capacity = XMLProperty("./capacity", is_int=True)
|
|
|
|
allocation = XMLProperty("./allocation", is_int=True)
|
|
|
|
available = XMLProperty("./available", is_int=True)
|
|
|
|
|
2018-09-03 02:47:34 +08:00
|
|
|
format = XMLProperty("./source/format/@type")
|
2018-02-23 09:44:09 +08:00
|
|
|
iqn = XMLProperty("./source/initiator/iqn/@name")
|
2018-09-03 02:47:34 +08:00
|
|
|
source_name = XMLProperty("./source/name")
|
2013-09-20 08:18:12 +08:00
|
|
|
|
2018-01-26 18:27:20 +08:00
|
|
|
auth_type = XMLProperty("./source/auth/@type")
|
|
|
|
auth_username = XMLProperty("./source/auth/@username")
|
|
|
|
auth_secret_uuid = XMLProperty("./source/auth/secret/@uuid")
|
|
|
|
|
2018-09-03 02:47:34 +08:00
|
|
|
target_path = XMLProperty("./target/path")
|
2014-12-10 05:05:02 +08:00
|
|
|
|
|
|
|
hosts = XMLChildProperty(_Host, relative_xpath="./source")
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
######################
|
|
|
|
# Public API helpers #
|
|
|
|
######################
|
|
|
|
|
|
|
|
def supports_property(self, propname):
|
|
|
|
users = {
|
|
|
|
"source_path": [self.TYPE_FS, self.TYPE_NETFS, self.TYPE_LOGICAL,
|
2014-12-10 05:05:02 +08:00
|
|
|
self.TYPE_DISK, self.TYPE_ISCSI, self.TYPE_SCSI,
|
|
|
|
self.TYPE_GLUSTER],
|
|
|
|
"source_name": [self.TYPE_LOGICAL, self.TYPE_GLUSTER,
|
2016-08-09 23:48:34 +08:00
|
|
|
self.TYPE_RBD, self.TYPE_SHEEPDOG, self.TYPE_ZFS],
|
2014-12-10 05:05:02 +08:00
|
|
|
"hosts": [self.TYPE_NETFS, self.TYPE_ISCSI, self.TYPE_GLUSTER,
|
|
|
|
self.TYPE_RBD, self.TYPE_SHEEPDOG],
|
2013-09-20 08:18:12 +08:00
|
|
|
"format": [self.TYPE_FS, self.TYPE_NETFS, self.TYPE_DISK],
|
|
|
|
"iqn": [self.TYPE_ISCSI],
|
2017-08-05 14:39:32 +08:00
|
|
|
"target_path": [self.TYPE_DIR, self.TYPE_FS, self.TYPE_NETFS,
|
2014-02-12 17:29:22 +08:00
|
|
|
self.TYPE_LOGICAL, self.TYPE_DISK, self.TYPE_ISCSI,
|
|
|
|
self.TYPE_SCSI, self.TYPE_MPATH]
|
2013-09-20 08:18:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if users.get(propname):
|
|
|
|
return self.type in users[propname]
|
|
|
|
return hasattr(self, propname)
|
|
|
|
|
|
|
|
def list_formats(self):
|
|
|
|
if self.type == self.TYPE_FS:
|
|
|
|
return ["auto", "ext2", "ext3", "ext4", "ufs", "iso9660", "udf",
|
|
|
|
"gfs", "gfs2", "vfat", "hfs+", "xfs"]
|
|
|
|
if self.type == self.TYPE_NETFS:
|
|
|
|
return ["auto", "nfs", "glusterfs"]
|
|
|
|
if self.type == self.TYPE_DISK:
|
|
|
|
return ["auto", "bsd", "dos", "dvh", "gpt", "mac", "pc98", "sun"]
|
|
|
|
return []
|
|
|
|
|
|
|
|
def supports_volume_creation(self):
|
|
|
|
return self.type in [
|
|
|
|
StoragePool.TYPE_DIR, StoragePool.TYPE_FS,
|
|
|
|
StoragePool.TYPE_NETFS, StoragePool.TYPE_LOGICAL,
|
2014-12-10 05:05:02 +08:00
|
|
|
StoragePool.TYPE_DISK,
|
2016-08-09 23:48:34 +08:00
|
|
|
StoragePool.TYPE_RBD, StoragePool.TYPE_SHEEPDOG,
|
|
|
|
StoragePool.TYPE_ZFS]
|
2013-09-20 08:18:12 +08:00
|
|
|
|
2017-03-06 16:28:48 +08:00
|
|
|
def get_disk_type(self):
|
|
|
|
if (self.type == StoragePool.TYPE_DISK or
|
|
|
|
self.type == StoragePool.TYPE_LOGICAL or
|
|
|
|
self.type == StoragePool.TYPE_SCSI or
|
|
|
|
self.type == StoragePool.TYPE_MPATH or
|
|
|
|
self.type == StoragePool.TYPE_ZFS):
|
|
|
|
return StorageVolume.TYPE_BLOCK
|
|
|
|
if (self.type == StoragePool.TYPE_GLUSTER or
|
|
|
|
self.type == StoragePool.TYPE_RBD or
|
|
|
|
self.type == StoragePool.TYPE_ISCSI or
|
|
|
|
self.type == StoragePool.TYPE_SHEEPDOG):
|
|
|
|
return StorageVolume.TYPE_NETWORK
|
|
|
|
return StorageVolume.TYPE_FILE
|
2013-09-20 08:18:12 +08:00
|
|
|
|
2018-09-03 02:47:34 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
##################
|
|
|
|
# Build routines #
|
|
|
|
##################
|
|
|
|
|
|
|
|
def validate(self):
|
2018-09-03 02:47:34 +08:00
|
|
|
if not self.target_path:
|
|
|
|
self.target_path = self.default_target_path()
|
|
|
|
if not self.source_name:
|
|
|
|
self.source_name = self.default_source_name()
|
|
|
|
if not self.format and self.supports_property("format"):
|
|
|
|
self.format = "auto"
|
|
|
|
|
2018-03-19 22:29:00 +08:00
|
|
|
if self.supports_property("hosts") and not self.hosts:
|
2013-09-20 08:18:12 +08:00
|
|
|
raise RuntimeError(_("Hostname is required"))
|
|
|
|
if (self.supports_property("source_path") and
|
2016-04-19 04:42:12 +08:00
|
|
|
self.type != self.TYPE_LOGICAL and
|
2013-09-20 08:18:12 +08:00
|
|
|
not self.source_path):
|
|
|
|
raise RuntimeError(_("Source path is required"))
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
if (self.type == self.TYPE_DISK and self.format == "auto"):
|
|
|
|
# There is no explicit "auto" type for disk pools, but leaving out
|
|
|
|
# the format type seems to do the job for existing formatted disks
|
|
|
|
self.format = None
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
def install(self, meter=None, create=False, build=False, autostart=False):
|
|
|
|
"""
|
|
|
|
Install storage pool xml.
|
|
|
|
"""
|
2013-09-20 08:18:12 +08:00
|
|
|
if (self.type == self.TYPE_LOGICAL and
|
|
|
|
build and not self.source_path):
|
|
|
|
raise ValueError(_("Must explicitly specify source path if "
|
|
|
|
"building pool"))
|
|
|
|
if (self.type == self.TYPE_DISK and
|
|
|
|
build and self.format == "auto"):
|
|
|
|
raise ValueError(_("Must explicitly specify disk format if "
|
|
|
|
"formatting disk device."))
|
|
|
|
|
2018-09-01 04:52:02 +08:00
|
|
|
xml = self.get_xml()
|
2013-03-18 05:06:52 +08:00
|
|
|
logging.debug("Creating storage pool '%s' with xml:\n%s",
|
|
|
|
self.name, xml)
|
|
|
|
|
2015-09-07 02:26:50 +08:00
|
|
|
meter = util.ensure_meter(meter)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
try:
|
|
|
|
pool = self.conn.storagePoolDefineXML(xml, 0)
|
2017-05-06 00:47:21 +08:00
|
|
|
except Exception as e:
|
2015-06-02 20:21:58 +08:00
|
|
|
raise RuntimeError(_("Could not define storage pool: %s") % str(e))
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
errmsg = None
|
|
|
|
if build:
|
|
|
|
try:
|
|
|
|
pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW)
|
2017-05-06 00:47:21 +08:00
|
|
|
except Exception as e:
|
2015-06-02 20:21:58 +08:00
|
|
|
errmsg = _("Could not build storage pool: %s") % str(e)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
if create and not errmsg:
|
|
|
|
try:
|
|
|
|
pool.create(0)
|
2017-05-06 00:47:21 +08:00
|
|
|
except Exception as e:
|
2015-06-02 20:21:58 +08:00
|
|
|
errmsg = _("Could not start storage pool: %s") % str(e)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
if autostart and not errmsg:
|
|
|
|
try:
|
|
|
|
pool.setAutostart(True)
|
2017-05-06 00:47:21 +08:00
|
|
|
except Exception as e:
|
2015-06-02 20:21:58 +08:00
|
|
|
errmsg = _("Could not set pool autostart flag: %s") % str(e)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
if errmsg:
|
|
|
|
# Try and clean up the leftover pool
|
|
|
|
try:
|
|
|
|
pool.undefine()
|
2017-05-06 00:47:21 +08:00
|
|
|
except Exception as e:
|
2018-03-02 16:01:23 +08:00
|
|
|
logging.debug("Error cleaning up pool after failure: %s",
|
|
|
|
str(e))
|
2013-03-18 05:06:52 +08:00
|
|
|
raise RuntimeError(errmsg)
|
|
|
|
|
2017-07-20 04:11:17 +08:00
|
|
|
self.conn.cache_new_pool(pool)
|
2017-05-19 20:26:49 +08:00
|
|
|
|
2013-03-18 05:06:52 +08:00
|
|
|
return pool
|
|
|
|
|
|
|
|
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
class StorageVolume(_StorageObject):
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
|
|
|
Base class for building and installing libvirt storage volume xml
|
|
|
|
"""
|
2015-09-30 01:25:34 +08:00
|
|
|
ALL_FORMATS = ["raw", "bochs", "cloop", "dmg", "iso", "qcow",
|
2014-01-24 18:40:09 +08:00
|
|
|
"qcow2", "qed", "vmdk", "vpc", "fat", "vhd", "vdi"]
|
|
|
|
|
2014-02-05 06:30:24 +08:00
|
|
|
@staticmethod
|
|
|
|
def get_file_extension_for_format(fmt):
|
|
|
|
if not fmt:
|
|
|
|
return ""
|
|
|
|
if fmt == "raw":
|
|
|
|
return ".img"
|
|
|
|
return "." + fmt
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
@staticmethod
|
2013-10-05 22:22:27 +08:00
|
|
|
def find_free_name(pool_object, basename, **kwargs):
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
2013-10-01 04:21:23 +08:00
|
|
|
Finds a name similar (or equal) to passed 'basename' that is not
|
2014-02-09 05:36:45 +08:00
|
|
|
in use by another volume. Extra params are passed to generate_name
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
|
|
|
pool_object.refresh(0)
|
2013-10-05 22:22:27 +08:00
|
|
|
return util.generate_name(basename,
|
|
|
|
pool_object.storageVolLookupByName,
|
|
|
|
**kwargs)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
TYPE_FILE = getattr(libvirt, "VIR_STORAGE_VOL_FILE", 0)
|
|
|
|
TYPE_BLOCK = getattr(libvirt, "VIR_STORAGE_VOL_BLOCK", 1)
|
2014-12-10 05:05:02 +08:00
|
|
|
TYPE_DIR = getattr(libvirt, "VIR_STORAGE_VOL_DIR", 2)
|
2014-12-11 01:55:08 +08:00
|
|
|
TYPE_NETWORK = getattr(libvirt, "VIR_STORAGE_VOL_NETWORK", 3)
|
|
|
|
TYPE_NETDIR = getattr(libvirt, "VIR_STORAGE_VOL_NETDIR", 4)
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
_StorageObject.__init__(self, *args, **kwargs)
|
|
|
|
|
|
|
|
self._input_vol = None
|
|
|
|
self._pool = None
|
2014-12-10 05:05:02 +08:00
|
|
|
self._pool_xml = None
|
2015-02-07 10:18:05 +08:00
|
|
|
self._reflink = False
|
2013-09-20 08:18:12 +08:00
|
|
|
|
2018-02-23 08:12:23 +08:00
|
|
|
self._install_finished = threading.Event()
|
2013-09-20 08:18:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
######################
|
|
|
|
# Non XML properties #
|
|
|
|
######################
|
|
|
|
|
|
|
|
def _get_pool(self):
|
2013-03-18 05:06:52 +08:00
|
|
|
return self._pool
|
2013-09-20 08:18:12 +08:00
|
|
|
def _set_pool(self, newpool):
|
2013-03-18 05:06:52 +08:00
|
|
|
if newpool.info()[0] != libvirt.VIR_STORAGE_POOL_RUNNING:
|
2015-06-02 20:21:58 +08:00
|
|
|
raise ValueError(_("pool '%s' must be active.") % newpool.name())
|
2013-03-18 05:06:52 +08:00
|
|
|
self._pool = newpool
|
2014-12-10 05:05:02 +08:00
|
|
|
self._pool_xml = StoragePool(self.conn,
|
|
|
|
parsexml=self._pool.XMLDesc(0))
|
2013-09-20 08:18:12 +08:00
|
|
|
pool = property(_get_pool, _set_pool)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
def _get_input_vol(self):
|
2013-03-18 05:06:52 +08:00
|
|
|
return self._input_vol
|
2013-09-20 08:18:12 +08:00
|
|
|
def _set_input_vol(self, vol):
|
2013-03-18 05:06:52 +08:00
|
|
|
if vol is None:
|
|
|
|
self._input_vol = None
|
|
|
|
return
|
|
|
|
|
|
|
|
if not isinstance(vol, libvirt.virStorageVol):
|
|
|
|
raise ValueError(_("input_vol must be a virStorageVol"))
|
2013-04-12 21:51:26 +08:00
|
|
|
|
2013-03-18 05:06:52 +08:00
|
|
|
self._input_vol = vol
|
2018-02-23 09:44:09 +08:00
|
|
|
input_vol = property(_get_input_vol, _set_input_vol)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2015-02-07 10:18:05 +08:00
|
|
|
def _get_reflink(self):
|
|
|
|
return self._reflink
|
|
|
|
def _set_reflink(self, reflink):
|
|
|
|
self._reflink = reflink
|
2018-02-23 09:44:09 +08:00
|
|
|
reflink = property(_get_reflink, _set_reflink)
|
2015-02-07 10:18:05 +08:00
|
|
|
|
2015-03-31 23:59:06 +08:00
|
|
|
def sync_input_vol(self, only_format=False):
|
2014-07-28 16:55:28 +08:00
|
|
|
# Pull parameters from input vol into this class
|
2013-09-20 08:18:12 +08:00
|
|
|
parsevol = StorageVolume(self.conn,
|
|
|
|
parsexml=self._input_vol.XMLDesc(0))
|
|
|
|
|
2015-03-31 23:59:06 +08:00
|
|
|
self.format = parsevol.format
|
2015-04-10 18:59:20 +08:00
|
|
|
self.capacity = parsevol.capacity
|
|
|
|
self.allocation = parsevol.allocation
|
2015-03-31 23:59:06 +08:00
|
|
|
if only_format:
|
|
|
|
return
|
2013-09-20 08:18:12 +08:00
|
|
|
self.pool = self._input_vol.storagePoolLookupByVolume()
|
|
|
|
|
|
|
|
|
|
|
|
##########################
|
|
|
|
# XML validation helpers #
|
|
|
|
##########################
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
def _check_name_collision(self, name):
|
|
|
|
vol = None
|
|
|
|
try:
|
|
|
|
vol = self.pool.storageVolLookupByName(name)
|
|
|
|
except libvirt.libvirtError:
|
|
|
|
pass
|
|
|
|
if vol:
|
|
|
|
raise ValueError(_("Name '%s' already in use by another volume." %
|
|
|
|
name))
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
def _get_vol_type(self):
|
2014-12-10 05:05:02 +08:00
|
|
|
if self.type:
|
2016-06-17 08:36:30 +08:00
|
|
|
if self.type == "file":
|
|
|
|
return self.TYPE_FILE
|
|
|
|
elif self.type == "block":
|
|
|
|
return self.TYPE_BLOCK
|
|
|
|
elif self.type == "dir":
|
|
|
|
return self.TYPE_DIR
|
|
|
|
elif self.type == "network":
|
|
|
|
return self.TYPE_NETWORK
|
2017-03-06 16:28:48 +08:00
|
|
|
return self._pool_xml.get_disk_type()
|
2013-09-20 08:18:12 +08:00
|
|
|
file_type = property(_get_vol_type)
|
|
|
|
|
|
|
|
|
|
|
|
##################
|
|
|
|
# XML properties #
|
|
|
|
##################
|
|
|
|
|
2018-03-21 22:53:34 +08:00
|
|
|
XML_NAME = "volume"
|
2013-12-05 22:17:12 +08:00
|
|
|
_XML_PROP_ORDER = ["name", "key", "capacity", "allocation", "format",
|
2013-09-20 08:18:12 +08:00
|
|
|
"target_path", "permissions"]
|
|
|
|
|
2014-12-10 05:05:02 +08:00
|
|
|
type = XMLProperty("./@type")
|
2013-12-05 22:17:12 +08:00
|
|
|
key = XMLProperty("./key")
|
2017-03-18 00:00:03 +08:00
|
|
|
capacity = XMLProperty("./capacity", is_int=True)
|
|
|
|
allocation = XMLProperty("./allocation", is_int=True)
|
2018-09-03 02:47:34 +08:00
|
|
|
format = XMLProperty("./target/format/@type")
|
2013-09-20 08:18:12 +08:00
|
|
|
target_path = XMLProperty("./target/path")
|
2013-09-29 07:11:27 +08:00
|
|
|
backing_store = XMLProperty("./backingStore/path")
|
2016-06-17 08:08:53 +08:00
|
|
|
backing_format = XMLProperty("./backingStore/format/@type")
|
2018-09-03 02:47:34 +08:00
|
|
|
lazy_refcounts = XMLProperty(
|
|
|
|
"./target/features/lazy_refcounts", is_bool=True)
|
2014-02-04 03:52:23 +08:00
|
|
|
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2016-06-17 08:36:30 +08:00
|
|
|
def _detect_backing_store_format(self):
|
|
|
|
logging.debug("Attempting to detect format for backing_store=%s",
|
|
|
|
self.backing_store)
|
|
|
|
vol, pool = StoragePool.manage_path(self.conn, self.backing_store)
|
|
|
|
|
|
|
|
if not vol:
|
|
|
|
logging.debug("Didn't find any volume for backing_store")
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Only set backing format for volumes that support
|
|
|
|
# the 'format' parameter as we know it, like qcow2 etc.
|
|
|
|
volxml = StorageVolume(self.conn, vol.XMLDesc(0))
|
|
|
|
volxml.pool = pool
|
|
|
|
logging.debug("Found backing store volume XML:\n%s",
|
2018-09-01 04:52:02 +08:00
|
|
|
volxml.get_xml())
|
2016-06-17 08:36:30 +08:00
|
|
|
|
|
|
|
if volxml.supports_property("format"):
|
|
|
|
logging.debug("Returning format=%s", volxml.format)
|
|
|
|
return volxml.format
|
|
|
|
|
|
|
|
logging.debug("backing_store volume doesn't appear to have "
|
|
|
|
"a file format we can specify, returning None")
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2013-09-20 08:18:12 +08:00
|
|
|
######################
|
|
|
|
# Public API helpers #
|
|
|
|
######################
|
|
|
|
|
2014-12-10 05:05:02 +08:00
|
|
|
def _supports_format(self):
|
|
|
|
if self.file_type == self.TYPE_FILE:
|
|
|
|
return True
|
|
|
|
if self._pool_xml.type == StoragePool.TYPE_GLUSTER:
|
|
|
|
return True
|
|
|
|
return False
|
2013-09-20 08:18:12 +08:00
|
|
|
|
2014-12-10 05:05:02 +08:00
|
|
|
def supports_property(self, propname):
|
|
|
|
if propname == "format":
|
|
|
|
return self._supports_format()
|
2013-09-20 08:18:12 +08:00
|
|
|
return hasattr(self, propname)
|
|
|
|
|
|
|
|
def list_formats(self):
|
2014-12-10 05:05:02 +08:00
|
|
|
if self._supports_format():
|
2014-01-26 04:52:34 +08:00
|
|
|
return self.ALL_FORMATS
|
2013-09-20 08:18:12 +08:00
|
|
|
return []
|
|
|
|
|
|
|
|
def list_create_formats(self):
|
2014-12-10 05:05:02 +08:00
|
|
|
if self._supports_format():
|
2015-09-30 01:25:34 +08:00
|
|
|
return ["raw", "qcow", "qcow2", "qed", "vmdk", "vpc", "vdi"]
|
2013-09-20 08:18:12 +08:00
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
##################
|
|
|
|
# Build routines #
|
|
|
|
##################
|
|
|
|
|
|
|
|
def validate(self):
|
2018-09-03 02:47:34 +08:00
|
|
|
if not self.format and self.file_type == self.TYPE_FILE:
|
|
|
|
self.format = "raw"
|
|
|
|
if self._prop_is_unset("lazy_refcounts") and self.format == "qcow2":
|
|
|
|
self.lazy_refcounts = self.conn.check_support(
|
|
|
|
self.conn.SUPPORT_CONN_QCOW2_LAZY_REFCOUNTS)
|
|
|
|
|
2014-12-10 05:05:02 +08:00
|
|
|
if self._pool_xml.type == StoragePool.TYPE_LOGICAL:
|
2013-09-20 08:18:12 +08:00
|
|
|
if self.allocation != self.capacity:
|
2017-05-06 02:21:15 +08:00
|
|
|
logging.warning(_("Sparse logical volumes are not supported, "
|
2013-09-20 08:18:12 +08:00
|
|
|
"setting allocation equal to capacity"))
|
|
|
|
self.allocation = self.capacity
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2017-03-18 00:00:03 +08:00
|
|
|
isfatal, errmsg = self.is_size_conflict()
|
|
|
|
if isfatal:
|
|
|
|
raise ValueError(errmsg)
|
|
|
|
if errmsg:
|
2017-05-06 02:21:15 +08:00
|
|
|
logging.warning(errmsg)
|
2017-03-18 00:00:03 +08:00
|
|
|
|
2013-03-18 05:06:52 +08:00
|
|
|
def install(self, meter=None):
|
|
|
|
"""
|
|
|
|
Build and install storage volume from xml
|
|
|
|
"""
|
2016-06-17 08:36:30 +08:00
|
|
|
if self.backing_store and not self.backing_format:
|
|
|
|
self.backing_format = self._detect_backing_store_format()
|
|
|
|
|
2018-09-01 04:52:02 +08:00
|
|
|
xml = self.get_xml()
|
2013-03-18 05:06:52 +08:00
|
|
|
logging.debug("Creating storage volume '%s' with xml:\n%s",
|
|
|
|
self.name, xml)
|
|
|
|
|
|
|
|
t = threading.Thread(target=self._progress_thread,
|
|
|
|
name="Checking storage allocation",
|
|
|
|
args=(meter,))
|
|
|
|
t.setDaemon(True)
|
|
|
|
|
2015-09-07 02:26:50 +08:00
|
|
|
meter = util.ensure_meter(meter)
|
2013-03-18 05:06:52 +08:00
|
|
|
|
2013-10-02 02:29:58 +08:00
|
|
|
cloneflags = 0
|
|
|
|
createflags = 0
|
|
|
|
if (self.format == "qcow2" and
|
|
|
|
not self.backing_store and
|
2014-12-10 01:36:09 +08:00
|
|
|
not self.conn.is_really_test() and
|
2013-10-06 22:08:04 +08:00
|
|
|
self.conn.check_support(
|
|
|
|
self.conn.SUPPORT_POOL_METADATA_PREALLOC, self.pool)):
|
2013-10-02 02:29:58 +08:00
|
|
|
createflags |= libvirt.VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA
|
|
|
|
|
2015-02-07 10:18:05 +08:00
|
|
|
if self.reflink:
|
2015-02-22 05:28:19 +08:00
|
|
|
cloneflags |= getattr(libvirt,
|
|
|
|
"VIR_STORAGE_VOL_CREATE_REFLINK", 1)
|
2015-02-07 10:18:05 +08:00
|
|
|
|
2013-03-18 05:06:52 +08:00
|
|
|
try:
|
2018-02-23 08:12:23 +08:00
|
|
|
self._install_finished.clear()
|
2013-06-10 03:40:25 +08:00
|
|
|
t.start()
|
|
|
|
meter.start(size=self.capacity,
|
|
|
|
text=_("Allocating '%s'") % self.name)
|
|
|
|
|
|
|
|
if self.input_vol:
|
2013-10-02 02:29:58 +08:00
|
|
|
vol = self.pool.createXMLFrom(xml, self.input_vol, cloneflags)
|
2013-06-10 03:40:25 +08:00
|
|
|
else:
|
2013-10-02 02:29:58 +08:00
|
|
|
logging.debug("Using vol create flags=%s", createflags)
|
|
|
|
vol = self.pool.createXML(xml, createflags)
|
2013-06-10 03:40:25 +08:00
|
|
|
|
2018-02-23 08:12:23 +08:00
|
|
|
self._install_finished.set()
|
2013-07-23 20:02:45 +08:00
|
|
|
t.join()
|
2013-06-10 03:40:25 +08:00
|
|
|
meter.end(self.capacity)
|
|
|
|
logging.debug("Storage volume '%s' install complete.",
|
|
|
|
self.name)
|
|
|
|
return vol
|
2017-05-06 00:47:21 +08:00
|
|
|
except Exception as e:
|
2014-12-10 05:05:02 +08:00
|
|
|
logging.debug("Error creating storage volume", exc_info=True)
|
2013-06-10 03:40:25 +08:00
|
|
|
raise RuntimeError("Couldn't create storage volume "
|
|
|
|
"'%s': '%s'" % (self.name, str(e)))
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
def _progress_thread(self, meter):
|
|
|
|
vol = None
|
|
|
|
if not meter:
|
|
|
|
return
|
|
|
|
|
2015-11-19 21:48:25 +08:00
|
|
|
while True:
|
2013-03-18 05:06:52 +08:00
|
|
|
try:
|
2013-09-29 23:31:03 +08:00
|
|
|
if not vol:
|
|
|
|
vol = self.pool.storageVolLookupByName(self.name)
|
|
|
|
vol.info()
|
2013-03-18 05:06:52 +08:00
|
|
|
break
|
2017-07-24 16:26:48 +08:00
|
|
|
except Exception:
|
2018-02-23 08:12:23 +08:00
|
|
|
if self._install_finished.wait(.2):
|
2013-03-18 05:06:52 +08:00
|
|
|
break
|
|
|
|
|
2013-04-14 02:34:52 +08:00
|
|
|
if vol is None:
|
2013-03-18 05:06:52 +08:00
|
|
|
logging.debug("Couldn't lookup storage volume in prog thread.")
|
|
|
|
return
|
|
|
|
|
2018-02-23 08:12:23 +08:00
|
|
|
while True:
|
2013-03-18 05:06:52 +08:00
|
|
|
ignore, ignore, alloc = vol.info()
|
|
|
|
meter.update(alloc)
|
2018-02-23 08:12:23 +08:00
|
|
|
if self._install_finished.wait(1):
|
|
|
|
break
|
2013-03-18 05:06:52 +08:00
|
|
|
|
|
|
|
|
2017-03-18 00:00:03 +08:00
|
|
|
def is_size_conflict(self):
|
2013-03-18 05:06:52 +08:00
|
|
|
"""
|
|
|
|
Report if requested size exceeds its pool's available amount
|
|
|
|
|
2018-02-14 20:17:31 +08:00
|
|
|
:returns: 2 element tuple:
|
2013-03-18 05:06:52 +08:00
|
|
|
1. True if collision is fatal, false otherwise
|
|
|
|
2. String message if some collision was encountered.
|
|
|
|
"""
|
2013-09-20 08:18:12 +08:00
|
|
|
if not self.pool:
|
|
|
|
return (False, "")
|
|
|
|
|
2013-04-14 02:34:52 +08:00
|
|
|
# pool info is [pool state, capacity, allocation, available]
|
2013-03-18 05:06:52 +08:00
|
|
|
avail = self.pool.info()[3]
|
2017-03-18 00:00:03 +08:00
|
|
|
if self.allocation > avail:
|
2013-03-18 05:06:52 +08:00
|
|
|
return (True, _("There is not enough free space on the storage "
|
|
|
|
"pool to create the volume. "
|
2016-07-26 15:34:20 +08:00
|
|
|
"(%d M requested allocation > %d M available)") %
|
2017-10-11 19:35:55 +08:00
|
|
|
((self.allocation // (1024 * 1024)),
|
|
|
|
(avail // (1024 * 1024))))
|
2017-03-18 00:00:03 +08:00
|
|
|
elif self.capacity > avail:
|
2013-03-18 05:06:52 +08:00
|
|
|
return (False, _("The requested volume capacity will exceed the "
|
|
|
|
"available pool space when the volume is fully "
|
|
|
|
"allocated. "
|
2016-07-26 15:34:20 +08:00
|
|
|
"(%d M requested capacity > %d M available)") %
|
2017-10-11 19:35:55 +08:00
|
|
|
((self.capacity // (1024 * 1024)),
|
|
|
|
(avail // (1024 * 1024))))
|
2013-03-18 05:06:52 +08:00
|
|
|
return (False, "")
|