2013-07-13 22:09:00 +08:00
|
|
|
#
|
|
|
|
# Storage lookup/creation helpers
|
|
|
|
#
|
2013-10-28 04:59:47 +08:00
|
|
|
# Copyright 2013 Red Hat, Inc.
|
2013-07-13 22:09:00 +08:00
|
|
|
#
|
2018-04-04 21:35:41 +08:00
|
|
|
# This work is licensed under the GNU GPLv2 or later.
|
2018-03-21 03:00:02 +08:00
|
|
|
# See the COPYING file in the top-level directory.
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
import os
|
2014-12-09 07:18:59 +08:00
|
|
|
import re
|
2016-08-25 03:43:04 +08:00
|
|
|
import stat
|
2018-10-12 06:52:45 +08:00
|
|
|
import subprocess
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
import libvirt
|
|
|
|
|
2019-06-17 09:12:39 +08:00
|
|
|
from .logger import log
|
2014-09-13 03:59:22 +08:00
|
|
|
from .storage import StoragePool, StorageVolume
|
2020-01-29 19:18:15 +08:00
|
|
|
from . import xmlutil
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
|
2014-12-05 09:13:53 +08:00
|
|
|
def _lookup_vol_by_path(conn, path):
|
|
|
|
"""
|
|
|
|
Try to find a volume matching the full passed path. Call info() on
|
|
|
|
it to ensure the volume wasn't removed behind libvirt's back
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
vol = conn.storageVolLookupByPath(path)
|
|
|
|
vol.info()
|
|
|
|
return vol, None
|
2017-05-06 00:47:21 +08:00
|
|
|
except libvirt.libvirtError as e:
|
2019-07-02 06:53:16 +08:00
|
|
|
# test_urls trigger empty errors here, because python
|
|
|
|
# garbage collection kicks in after the failure but before
|
|
|
|
# we read the error code, and libvirt virStoragePoolFree
|
|
|
|
# public entry point clears the cached error. So ignore
|
|
|
|
# an empty error code
|
|
|
|
if (e.get_error_code() and
|
2015-07-10 20:07:02 +08:00
|
|
|
e.get_error_code() != libvirt.VIR_ERR_NO_STORAGE_VOL):
|
2020-01-29 08:03:44 +08:00
|
|
|
raise # pragma: no cover
|
2014-12-05 09:13:53 +08:00
|
|
|
return None, e
|
|
|
|
|
|
|
|
|
|
|
|
def _lookup_vol_by_basename(pool, path):
|
|
|
|
"""
|
|
|
|
Try to lookup a volume for 'path' in parent 'pool' by it's filename.
|
|
|
|
This sometimes works in cases where full volume path lookup doesn't,
|
|
|
|
since not all libvirt storage backends implement path lookup.
|
|
|
|
"""
|
|
|
|
name = os.path.basename(path)
|
|
|
|
if name in pool.listVolumes():
|
2015-04-14 03:59:35 +08:00
|
|
|
return pool.storageVolLookupByName(name)
|
2014-12-05 09:13:53 +08:00
|
|
|
|
|
|
|
|
2020-07-18 06:27:14 +08:00
|
|
|
def _get_block_size(path): # pragma: no cover
|
2020-01-29 20:30:51 +08:00
|
|
|
try:
|
|
|
|
fd = os.open(path, os.O_RDONLY)
|
|
|
|
# os.SEEK_END is not present on all systems
|
2020-07-18 06:27:14 +08:00
|
|
|
size = os.lseek(fd, 0, 2)
|
|
|
|
os.close(fd)
|
2020-01-29 20:30:51 +08:00
|
|
|
except Exception:
|
|
|
|
size = 0
|
|
|
|
return size
|
2016-08-25 03:43:04 +08:00
|
|
|
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
def _get_size(path):
|
|
|
|
if not os.path.exists(path):
|
|
|
|
return 0
|
|
|
|
if _stat_is_block(path):
|
2020-07-18 06:27:14 +08:00
|
|
|
return _get_block_size(path) # pragma: no cover
|
2020-01-29 20:30:51 +08:00
|
|
|
return os.path.getsize(path)
|
2016-08-25 03:43:04 +08:00
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
|
|
|
|
def _stat_is_block(path):
|
|
|
|
if not os.path.exists(path):
|
|
|
|
return False
|
|
|
|
return stat.S_ISBLK(os.stat(path)[stat.ST_MODE])
|
2016-08-25 03:43:04 +08:00
|
|
|
|
|
|
|
|
2018-10-12 07:20:46 +08:00
|
|
|
def _check_if_path_managed(conn, path):
|
2014-12-05 09:13:53 +08:00
|
|
|
"""
|
|
|
|
Try to lookup storage objects for the passed path.
|
|
|
|
|
|
|
|
Returns (volume, parent pool). Only one is returned at a time.
|
|
|
|
"""
|
|
|
|
vol, ignore = _lookup_vol_by_path(conn, path)
|
|
|
|
if vol:
|
2014-12-10 06:03:48 +08:00
|
|
|
return vol, vol.storagePoolLookupByVolume()
|
2014-12-05 09:13:53 +08:00
|
|
|
|
2019-04-15 07:16:10 +08:00
|
|
|
pool = StoragePool.lookup_pool_by_path(conn, os.path.dirname(path))
|
2014-12-05 09:13:53 +08:00
|
|
|
if not pool:
|
|
|
|
return None, None
|
|
|
|
|
|
|
|
# We have the parent pool, but didn't find a volume on first lookup
|
2018-05-22 03:42:50 +08:00
|
|
|
# attempt. Refresh the pool and try again, in case we were just out
|
2019-04-15 07:16:10 +08:00
|
|
|
# of date or the pool was inactive.
|
2014-12-05 09:13:53 +08:00
|
|
|
try:
|
2019-04-15 07:16:10 +08:00
|
|
|
StoragePool.ensure_pool_is_running(pool, refresh=True)
|
2014-12-05 09:13:53 +08:00
|
|
|
vol, verr = _lookup_vol_by_path(conn, path)
|
|
|
|
if verr:
|
|
|
|
try:
|
|
|
|
vol = _lookup_vol_by_basename(pool, path)
|
2020-01-29 20:30:51 +08:00
|
|
|
except Exception: # pragma: no cover
|
2014-12-05 09:13:53 +08:00
|
|
|
pass
|
2020-01-29 20:30:51 +08:00
|
|
|
except Exception as e: # pragma: no cover
|
2014-12-05 09:13:53 +08:00
|
|
|
vol = None
|
|
|
|
pool = None
|
|
|
|
verr = str(e)
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
if not vol and not pool and verr: # pragma: no cover
|
2014-02-09 05:36:45 +08:00
|
|
|
raise ValueError(_("Cannot use storage %(path)s: %(err)s") %
|
2017-08-05 14:39:32 +08:00
|
|
|
{'path': path, 'err': verr})
|
2014-02-09 05:36:45 +08:00
|
|
|
|
2014-12-05 08:52:32 +08:00
|
|
|
return vol, pool
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
|
2014-02-09 05:36:45 +08:00
|
|
|
def _can_auto_manage(path):
|
|
|
|
path = path or ""
|
|
|
|
skip_prefixes = ["/dev", "/sys", "/proc"]
|
|
|
|
|
2014-12-10 22:04:09 +08:00
|
|
|
if path_is_url(path):
|
|
|
|
return False
|
|
|
|
|
2014-02-09 05:36:45 +08:00
|
|
|
for prefix in skip_prefixes:
|
|
|
|
if path.startswith(prefix + "/") or path == prefix:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2020-11-11 04:49:24 +08:00
|
|
|
def _get_storage_search_path(path):
|
|
|
|
# If the passed path is one of our artificial rbd:// style
|
|
|
|
# URIs, parse out the path component, since that is what is needed
|
|
|
|
# for looking up storage volumes by target path
|
|
|
|
from .uri import URI
|
|
|
|
uriobj = URI(path)
|
|
|
|
if uriobj.scheme == "rbd":
|
|
|
|
return uriobj.path.strip("/")
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
2014-02-09 05:36:45 +08:00
|
|
|
def manage_path(conn, path):
|
|
|
|
"""
|
|
|
|
If path is not managed, try to create a storage pool to probe the path
|
|
|
|
"""
|
2019-06-08 04:06:52 +08:00
|
|
|
if not conn.support.conn_storage():
|
2022-02-28 01:08:30 +08:00
|
|
|
return None, None, None # pragma: no cover
|
2014-12-10 22:04:09 +08:00
|
|
|
if not path:
|
2022-02-28 01:08:30 +08:00
|
|
|
return None, None, None
|
2014-12-06 08:52:25 +08:00
|
|
|
|
2018-10-10 04:30:09 +08:00
|
|
|
if not path_is_url(path) and not path_is_network_vol(conn, path):
|
2014-12-10 22:04:09 +08:00
|
|
|
path = os.path.abspath(path)
|
2020-11-11 04:49:24 +08:00
|
|
|
|
|
|
|
searchpath = _get_storage_search_path(path)
|
|
|
|
vol, pool = _check_if_path_managed(conn, searchpath)
|
2014-02-09 05:36:45 +08:00
|
|
|
if vol or pool or not _can_auto_manage(path):
|
2022-02-28 01:08:30 +08:00
|
|
|
return path, vol, pool
|
2014-02-09 05:36:45 +08:00
|
|
|
|
|
|
|
dirname = os.path.dirname(path)
|
2014-04-30 02:09:14 +08:00
|
|
|
poolname = os.path.basename(dirname).replace(" ", "_")
|
|
|
|
if not poolname:
|
|
|
|
poolname = "dirpool"
|
|
|
|
poolname = StoragePool.find_free_name(conn, poolname)
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("Attempting to build pool=%s target=%s", poolname, dirname)
|
2014-02-09 05:36:45 +08:00
|
|
|
|
|
|
|
poolxml = StoragePool(conn)
|
2014-04-30 02:09:14 +08:00
|
|
|
poolxml.name = poolname
|
2014-02-09 05:36:45 +08:00
|
|
|
poolxml.type = poolxml.TYPE_DIR
|
|
|
|
poolxml.target_path = dirname
|
|
|
|
pool = poolxml.install(build=False, create=True, autostart=True)
|
|
|
|
|
2014-12-05 09:13:53 +08:00
|
|
|
vol = _lookup_vol_by_basename(pool, path)
|
2022-02-28 01:08:30 +08:00
|
|
|
return path, vol, pool
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
|
2014-12-09 07:18:59 +08:00
|
|
|
def path_is_url(path):
|
|
|
|
"""
|
|
|
|
Detect if path is a URL
|
|
|
|
"""
|
2020-01-29 08:03:44 +08:00
|
|
|
return bool(re.match(r"[a-zA-Z]+(\+[a-zA-Z]+)?://.*", path or ""))
|
2014-12-09 07:18:59 +08:00
|
|
|
|
|
|
|
|
2018-10-10 04:30:09 +08:00
|
|
|
def path_is_network_vol(conn, path):
|
|
|
|
"""
|
|
|
|
Detect if path is a network volume such as rbd, gluster, etc
|
|
|
|
"""
|
|
|
|
for volxml in conn.fetch_all_vols():
|
2020-01-29 08:03:44 +08:00
|
|
|
if path and volxml.target_path == path:
|
2018-10-10 04:30:09 +08:00
|
|
|
return volxml.type == "network"
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2017-03-06 16:28:48 +08:00
|
|
|
def _get_dev_type(path, vol_xml, vol_object, pool_xml, remote):
|
2017-03-06 16:28:47 +08:00
|
|
|
"""
|
|
|
|
Try to get device type for volume.
|
|
|
|
"""
|
2020-01-29 20:30:51 +08:00
|
|
|
if vol_xml and vol_xml.type:
|
|
|
|
return vol_xml.type
|
2017-03-06 16:28:47 +08:00
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
if pool_xml:
|
|
|
|
t = pool_xml.get_disk_type()
|
|
|
|
if t == StorageVolume.TYPE_BLOCK:
|
2017-03-06 16:28:47 +08:00
|
|
|
return "block"
|
2020-01-29 20:30:51 +08:00
|
|
|
elif t == StorageVolume.TYPE_NETWORK:
|
2017-03-06 16:28:47 +08:00
|
|
|
return "network"
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
if vol_object: # pragma: no cover
|
|
|
|
# This path is hard to test, because test suite XML always has
|
|
|
|
# the vol_xml.type set
|
2017-03-06 16:28:47 +08:00
|
|
|
t = vol_object.info()[0]
|
|
|
|
if t == StorageVolume.TYPE_FILE:
|
|
|
|
return "file"
|
|
|
|
elif t == StorageVolume.TYPE_BLOCK:
|
|
|
|
return "block"
|
|
|
|
elif t == StorageVolume.TYPE_NETWORK:
|
|
|
|
return "network"
|
|
|
|
|
|
|
|
if path:
|
|
|
|
if path_is_url(path):
|
|
|
|
return "network"
|
|
|
|
|
2020-01-30 08:06:12 +08:00
|
|
|
if remote:
|
|
|
|
if not _can_auto_manage(path):
|
|
|
|
# Just a heurisitic, if this path is one of the ones
|
|
|
|
# we don't try to auto-import, then consider it a
|
|
|
|
# block device, because managing those correctly is difficult
|
|
|
|
return "block"
|
|
|
|
|
|
|
|
else:
|
2017-03-06 16:28:47 +08:00
|
|
|
if os.path.isdir(path):
|
|
|
|
return "dir"
|
2020-01-29 20:30:51 +08:00
|
|
|
elif _stat_is_block(path):
|
2020-07-18 06:27:14 +08:00
|
|
|
return "block" # pragma: no cover
|
2017-03-06 16:28:47 +08:00
|
|
|
|
|
|
|
return "file"
|
|
|
|
|
|
|
|
|
2018-10-12 07:20:46 +08:00
|
|
|
def path_definitely_exists(conn, path):
|
|
|
|
"""
|
|
|
|
Return True if the path certainly exists, False if we are unsure.
|
|
|
|
See DeviceDisk entry point for more details
|
|
|
|
"""
|
|
|
|
if path is None:
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
(vol, pool) = _check_if_path_managed(conn, path)
|
|
|
|
ignore = pool
|
|
|
|
if vol:
|
|
|
|
return True
|
|
|
|
|
|
|
|
if not conn.is_remote():
|
|
|
|
return os.path.exists(path)
|
2020-01-29 08:03:44 +08:00
|
|
|
except Exception: # pragma: no cover
|
2018-10-12 07:20:46 +08:00
|
|
|
pass
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2018-10-12 06:52:45 +08:00
|
|
|
#########################
|
|
|
|
# ACL/path perm helpers #
|
|
|
|
#########################
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
SETFACL = "setfacl"
|
|
|
|
|
|
|
|
|
2018-10-12 06:52:45 +08:00
|
|
|
def _fix_perms_acl(dirname, username):
|
2020-01-29 20:30:51 +08:00
|
|
|
cmd = [SETFACL, "--modify", "user:%s:x" % username, dirname]
|
2018-10-12 06:52:45 +08:00
|
|
|
proc = subprocess.Popen(cmd,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
out, err = proc.communicate()
|
|
|
|
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("Ran command '%s'", cmd)
|
2018-10-12 06:52:45 +08:00
|
|
|
if out or err:
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("out=%s\nerr=%s", out, err)
|
2018-10-12 06:52:45 +08:00
|
|
|
|
|
|
|
if proc.returncode != 0:
|
|
|
|
raise ValueError(err)
|
|
|
|
|
|
|
|
|
|
|
|
def _fix_perms_chmod(dirname):
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("Setting +x on %s", dirname)
|
2018-10-12 06:52:45 +08:00
|
|
|
mode = os.stat(dirname).st_mode
|
|
|
|
newmode = mode | stat.S_IXOTH
|
|
|
|
os.chmod(dirname, newmode)
|
|
|
|
if os.stat(dirname).st_mode != newmode:
|
|
|
|
# Trying to change perms on vfat at least doesn't work
|
|
|
|
# but also doesn't seem to error. Try and detect that
|
2020-01-29 20:30:51 +08:00
|
|
|
raise ValueError( # pragma: no cover
|
|
|
|
_("Permissions on '%s' did not stick") % dirname)
|
2018-10-12 06:52:45 +08:00
|
|
|
|
|
|
|
|
|
|
|
def set_dirs_searchable(dirlist, username):
|
|
|
|
useacl = True
|
|
|
|
errdict = {}
|
|
|
|
for dirname in dirlist:
|
|
|
|
if useacl:
|
|
|
|
try:
|
|
|
|
_fix_perms_acl(dirname, username)
|
|
|
|
continue
|
|
|
|
except Exception as e:
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("setfacl failed: %s", e)
|
|
|
|
log.debug("trying chmod")
|
2018-10-12 06:52:45 +08:00
|
|
|
useacl = False
|
|
|
|
|
|
|
|
try:
|
|
|
|
# If we reach here, ACL setting failed, try chmod
|
|
|
|
_fix_perms_chmod(dirname)
|
2020-01-29 20:30:51 +08:00
|
|
|
except Exception as e: # pragma: no cover
|
2018-10-12 06:52:45 +08:00
|
|
|
errdict[dirname] = str(e)
|
|
|
|
|
|
|
|
return errdict
|
|
|
|
|
|
|
|
|
|
|
|
def _is_dir_searchable(dirname, uid, username):
|
|
|
|
"""
|
|
|
|
Check if passed directory is searchable by uid
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
statinfo = os.stat(dirname)
|
2020-01-29 20:30:51 +08:00
|
|
|
except OSError: # pragma: no cover
|
2018-10-12 06:52:45 +08:00
|
|
|
return False
|
|
|
|
|
|
|
|
if uid == statinfo.st_uid:
|
|
|
|
flag = stat.S_IXUSR
|
|
|
|
elif uid == statinfo.st_gid:
|
2020-01-29 20:30:51 +08:00
|
|
|
flag = stat.S_IXGRP # pragma: no cover
|
2018-10-12 06:52:45 +08:00
|
|
|
else:
|
|
|
|
flag = stat.S_IXOTH
|
|
|
|
|
|
|
|
if bool(statinfo.st_mode & flag):
|
|
|
|
return True
|
|
|
|
|
|
|
|
# Check POSIX ACL (since that is what we use to 'fix' access)
|
|
|
|
cmd = ["getfacl", dirname]
|
|
|
|
try:
|
|
|
|
proc = subprocess.Popen(cmd,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
out, err = proc.communicate()
|
2020-01-29 20:30:51 +08:00
|
|
|
except OSError: # pragma: no cover
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("Didn't find the getfacl command.")
|
2018-10-12 06:52:45 +08:00
|
|
|
return False
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
if proc.returncode != 0: # pragma: no cover
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("Cmd '%s' failed: %s", cmd, err)
|
2018-10-12 06:52:45 +08:00
|
|
|
return False
|
|
|
|
|
|
|
|
pattern = "user:%s:..x" % username
|
|
|
|
return bool(re.search(pattern.encode("utf-8", "replace"), out))
|
|
|
|
|
|
|
|
|
|
|
|
def is_path_searchable(path, uid, username):
|
|
|
|
"""
|
|
|
|
Check each dir component of the passed path, see if they are
|
|
|
|
searchable by the uid/username, and return a list of paths
|
|
|
|
which aren't searchable
|
|
|
|
"""
|
|
|
|
if os.path.isdir(path):
|
|
|
|
dirname = path
|
|
|
|
base = "-"
|
|
|
|
else:
|
|
|
|
dirname, base = os.path.split(path)
|
|
|
|
|
|
|
|
fixlist = []
|
|
|
|
while base:
|
|
|
|
if not _is_dir_searchable(dirname, uid, username):
|
|
|
|
fixlist.append(dirname)
|
|
|
|
dirname, base = os.path.split(dirname)
|
|
|
|
|
|
|
|
return fixlist
|
|
|
|
|
|
|
|
|
2014-12-09 04:02:27 +08:00
|
|
|
##############################################
|
|
|
|
# Classes for tracking storage media details #
|
|
|
|
##############################################
|
|
|
|
|
2013-07-13 22:09:00 +08:00
|
|
|
class _StorageBase(object):
|
2014-12-09 04:02:27 +08:00
|
|
|
"""
|
2018-03-21 00:18:35 +08:00
|
|
|
Storage base class, defining the API used by DeviceDisk
|
2014-12-09 04:02:27 +08:00
|
|
|
"""
|
2014-12-06 10:09:26 +08:00
|
|
|
def __init__(self, conn):
|
|
|
|
self._conn = conn
|
2014-12-10 07:23:22 +08:00
|
|
|
self._parent_pool_xml = None
|
2014-12-06 10:09:26 +08:00
|
|
|
|
2013-07-13 22:09:00 +08:00
|
|
|
def get_size(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
def get_dev_type(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
def get_driver_type(self):
|
|
|
|
raise NotImplementedError()
|
2014-12-09 05:02:26 +08:00
|
|
|
def get_vol_install(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
def get_vol_object(self):
|
|
|
|
raise NotImplementedError()
|
2014-12-10 07:23:22 +08:00
|
|
|
def get_parent_pool(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
def get_parent_pool_xml(self):
|
|
|
|
if not self._parent_pool_xml and self.get_parent_pool():
|
|
|
|
self._parent_pool_xml = StoragePool(self._conn,
|
|
|
|
parsexml=self.get_parent_pool().XMLDesc(0))
|
|
|
|
return self._parent_pool_xml
|
2020-01-29 20:30:51 +08:00
|
|
|
def validate(self):
|
2014-12-09 05:02:26 +08:00
|
|
|
raise NotImplementedError()
|
2014-12-10 05:57:32 +08:00
|
|
|
def get_path(self):
|
2014-12-09 23:15:27 +08:00
|
|
|
raise NotImplementedError()
|
2020-01-29 19:18:15 +08:00
|
|
|
def is_stub(self):
|
|
|
|
return False
|
2014-12-09 05:02:26 +08:00
|
|
|
|
|
|
|
# Storage creation routines
|
|
|
|
def is_size_conflict(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
def will_create_storage(self):
|
|
|
|
raise NotImplementedError()
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2021-04-08 00:37:38 +08:00
|
|
|
def create(self, meter):
|
|
|
|
ignore = meter # pragma: no cover
|
2020-07-18 06:58:00 +08:00
|
|
|
raise xmlutil.DevError(
|
2020-01-29 19:18:15 +08:00
|
|
|
"%s can't create storage" % self.__class__.__name__)
|
|
|
|
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2014-12-06 10:09:26 +08:00
|
|
|
class _StorageCreator(_StorageBase):
|
2014-12-09 04:02:27 +08:00
|
|
|
"""
|
|
|
|
Base object for classes that will actually create storage on disk
|
|
|
|
"""
|
2014-12-06 10:09:26 +08:00
|
|
|
def __init__(self, conn):
|
|
|
|
_StorageBase.__init__(self, conn)
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2014-12-06 10:09:26 +08:00
|
|
|
self._pool = None
|
|
|
|
self._vol_install = None
|
|
|
|
self._path = None
|
|
|
|
self._size = None
|
2013-07-13 22:09:00 +08:00
|
|
|
self._dev_type = None
|
|
|
|
|
|
|
|
|
|
|
|
##############
|
|
|
|
# Public API #
|
|
|
|
##############
|
|
|
|
|
2021-04-08 00:37:38 +08:00
|
|
|
def create(self, meter):
|
2020-01-29 20:30:51 +08:00
|
|
|
raise NotImplementedError
|
|
|
|
def validate(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
def get_size(self):
|
|
|
|
raise NotImplementedError
|
2014-12-06 10:09:26 +08:00
|
|
|
|
2014-12-10 05:57:32 +08:00
|
|
|
def get_path(self):
|
2013-07-13 22:09:00 +08:00
|
|
|
if self._vol_install and not self._path:
|
2013-09-23 05:34:53 +08:00
|
|
|
xmlobj = StoragePool(self._conn,
|
|
|
|
parsexml=self._vol_install.pool.XMLDesc(0))
|
2014-12-11 01:55:08 +08:00
|
|
|
if self.get_dev_type() == "network":
|
|
|
|
self._path = self._vol_install.name
|
|
|
|
else:
|
2020-09-04 04:46:03 +08:00
|
|
|
self._path = os.path.join(
|
|
|
|
xmlobj.target_path, self._vol_install.name)
|
2013-07-13 22:09:00 +08:00
|
|
|
return self._path
|
|
|
|
|
|
|
|
def get_vol_install(self):
|
|
|
|
return self._vol_install
|
2014-12-11 01:55:08 +08:00
|
|
|
def get_vol_xml(self):
|
|
|
|
return self._vol_install
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
def get_dev_type(self):
|
|
|
|
if not self._dev_type:
|
2017-03-06 16:28:47 +08:00
|
|
|
self._dev_type = _get_dev_type(self._path, self._vol_install, None,
|
2017-03-06 16:28:48 +08:00
|
|
|
self.get_parent_pool_xml(),
|
2017-03-06 16:28:47 +08:00
|
|
|
self._conn.is_remote())
|
2013-07-13 22:09:00 +08:00
|
|
|
return self._dev_type
|
|
|
|
|
|
|
|
def get_driver_type(self):
|
|
|
|
if self._vol_install:
|
2019-07-03 04:56:35 +08:00
|
|
|
if self._vol_install.supports_format():
|
2013-07-13 22:09:00 +08:00
|
|
|
return self._vol_install.format
|
|
|
|
return "raw"
|
|
|
|
|
2014-12-09 05:02:26 +08:00
|
|
|
def will_create_storage(self):
|
|
|
|
return True
|
|
|
|
def get_vol_object(self):
|
|
|
|
return None
|
|
|
|
def get_parent_pool(self):
|
|
|
|
if self._vol_install:
|
|
|
|
return self._vol_install.pool
|
|
|
|
return None
|
|
|
|
def exists(self):
|
|
|
|
return False
|
2014-12-06 10:09:26 +08:00
|
|
|
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
class ManagedStorageCreator(_StorageCreator):
|
|
|
|
"""
|
|
|
|
Handles storage creation via libvirt APIs. All the actual creation
|
|
|
|
logic lives in StorageVolume, this is mostly about pulling out bits
|
|
|
|
from that class and mapping them to DeviceDisk elements
|
|
|
|
"""
|
|
|
|
def __init__(self, conn, vol_install):
|
|
|
|
_StorageCreator.__init__(self, conn)
|
|
|
|
|
|
|
|
self._pool = vol_install.pool
|
|
|
|
self._vol_install = vol_install
|
|
|
|
|
2021-04-08 00:37:38 +08:00
|
|
|
def create(self, meter):
|
|
|
|
return self._vol_install.install(meter=meter)
|
2020-01-29 20:30:51 +08:00
|
|
|
def is_size_conflict(self):
|
|
|
|
return self._vol_install.is_size_conflict()
|
|
|
|
def validate(self):
|
|
|
|
return self._vol_install.validate()
|
|
|
|
def get_size(self):
|
|
|
|
return float(self._vol_install.capacity) / 1024.0 / 1024.0 / 1024.0
|
|
|
|
|
|
|
|
|
2014-12-06 10:09:26 +08:00
|
|
|
class CloneStorageCreator(_StorageCreator):
|
2014-12-09 04:02:27 +08:00
|
|
|
"""
|
|
|
|
Handles manually copying local files for Cloner
|
|
|
|
|
|
|
|
Many clone scenarios will use libvirt storage APIs, which will use
|
|
|
|
the ManagedStorageCreator
|
|
|
|
"""
|
2014-12-06 10:09:26 +08:00
|
|
|
def __init__(self, conn, output_path, input_path, size, sparse):
|
|
|
|
_StorageCreator.__init__(self, conn)
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2014-12-06 10:09:26 +08:00
|
|
|
self._path = output_path
|
|
|
|
self._output_path = output_path
|
|
|
|
self._input_path = input_path
|
|
|
|
self._size = size
|
|
|
|
self._sparse = sparse
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
def get_size(self):
|
|
|
|
return self._size
|
|
|
|
|
2014-12-06 10:09:26 +08:00
|
|
|
def is_size_conflict(self):
|
2013-07-13 22:09:00 +08:00
|
|
|
ret = False
|
|
|
|
msg = None
|
2017-10-03 22:59:13 +08:00
|
|
|
if self.get_dev_type() == "block":
|
2020-01-29 21:57:10 +08:00
|
|
|
avail = _get_size(self._path) # pragma: no cover
|
2017-10-03 22:59:13 +08:00
|
|
|
else:
|
2020-09-04 00:03:38 +08:00
|
|
|
vfs = os.statvfs(os.path.dirname(os.path.abspath(self._path)))
|
2017-10-11 19:35:47 +08:00
|
|
|
avail = vfs.f_frsize * vfs.f_bavail
|
2019-01-11 01:58:43 +08:00
|
|
|
need = int(self._size) * 1024 * 1024 * 1024
|
2020-01-29 21:57:10 +08:00
|
|
|
if need > avail: # pragma: no cover
|
2013-07-13 22:09:00 +08:00
|
|
|
if self._sparse:
|
|
|
|
msg = _("The filesystem will not have enough free space"
|
|
|
|
" to fully allocate the sparse file when the guest"
|
|
|
|
" is running.")
|
|
|
|
else:
|
|
|
|
ret = True
|
|
|
|
msg = _("There is not enough free space to create the disk.")
|
|
|
|
|
|
|
|
|
|
|
|
if msg:
|
2020-07-12 05:31:40 +08:00
|
|
|
msg += " "
|
|
|
|
msg += (_("%(mem1)s M requested > %(mem2)s M available") %
|
|
|
|
{"mem1": (need // (1024 * 1024)),
|
|
|
|
"mem2": (avail // (1024 * 1024))})
|
2013-07-13 22:09:00 +08:00
|
|
|
return (ret, msg)
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
def validate(self):
|
2020-01-29 21:57:10 +08:00
|
|
|
if self._size is None: # pragma: no cover
|
2020-01-29 20:30:51 +08:00
|
|
|
raise ValueError(_("size is required for non-existent disk "
|
|
|
|
"'%s'" % self.get_path()))
|
|
|
|
|
|
|
|
err, msg = self.is_size_conflict()
|
|
|
|
if err:
|
2020-01-29 21:57:10 +08:00
|
|
|
raise ValueError(msg) # pragma: no cover
|
2020-01-29 20:30:51 +08:00
|
|
|
if msg:
|
2020-01-29 21:57:10 +08:00
|
|
|
log.warning(msg) # pragma: no cover
|
2020-01-29 20:30:51 +08:00
|
|
|
|
2021-04-08 00:37:38 +08:00
|
|
|
def create(self, meter):
|
2014-02-09 05:36:45 +08:00
|
|
|
text = (_("Cloning %(srcfile)s") %
|
2017-08-05 14:39:32 +08:00
|
|
|
{'srcfile': os.path.basename(self._input_path)})
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2020-01-29 21:57:10 +08:00
|
|
|
size_bytes = int(self.get_size() * 1024 * 1024 * 1024)
|
2021-04-08 00:37:38 +08:00
|
|
|
meter.start(text, size_bytes)
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2014-02-09 05:36:45 +08:00
|
|
|
# Plain file clone
|
2021-04-08 00:37:38 +08:00
|
|
|
self._clone_local(meter, size_bytes)
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
def _clone_local(self, meter, size_bytes):
|
2020-01-29 20:30:51 +08:00
|
|
|
if self._input_path == "/dev/null": # pragma: no cover
|
2013-07-13 22:09:00 +08:00
|
|
|
# Not really sure why this check is here,
|
|
|
|
# but keeping for compat
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("Source dev was /dev/null. Skipping")
|
2013-07-13 22:09:00 +08:00
|
|
|
return
|
2014-12-06 10:09:26 +08:00
|
|
|
if self._input_path == self._output_path:
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("Source and destination are the same. Skipping.")
|
2013-07-13 22:09:00 +08:00
|
|
|
return
|
|
|
|
|
2014-12-09 15:53:21 +08:00
|
|
|
# If a destination file exists and sparse flag is True,
|
|
|
|
# this priority takes an existing file.
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2014-12-06 10:09:26 +08:00
|
|
|
if (not os.path.exists(self._output_path) and self._sparse):
|
2013-07-13 22:09:00 +08:00
|
|
|
clone_block_size = 4096
|
|
|
|
sparse = True
|
|
|
|
fd = None
|
|
|
|
try:
|
2017-05-06 01:05:30 +08:00
|
|
|
fd = os.open(self._output_path, os.O_WRONLY | os.O_CREAT,
|
|
|
|
0o640)
|
2013-07-13 22:09:00 +08:00
|
|
|
os.ftruncate(fd, size_bytes)
|
|
|
|
finally:
|
|
|
|
if fd:
|
|
|
|
os.close(fd)
|
|
|
|
else:
|
|
|
|
clone_block_size = 1024 * 1024 * 10
|
|
|
|
sparse = False
|
|
|
|
|
2019-06-17 09:12:39 +08:00
|
|
|
log.debug("Local Cloning %s to %s, sparse=%s, block_size=%s",
|
2014-12-06 10:09:26 +08:00
|
|
|
self._input_path, self._output_path,
|
|
|
|
sparse, clone_block_size)
|
2013-07-13 22:09:00 +08:00
|
|
|
|
2020-01-29 21:57:10 +08:00
|
|
|
zeros = b'\0' * 4096
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
src_fd, dst_fd = None, None
|
|
|
|
try:
|
|
|
|
try:
|
2014-12-06 10:09:26 +08:00
|
|
|
src_fd = os.open(self._input_path, os.O_RDONLY)
|
|
|
|
dst_fd = os.open(self._output_path,
|
2017-05-06 01:05:30 +08:00
|
|
|
os.O_WRONLY | os.O_CREAT, 0o640)
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
i = 0
|
|
|
|
while 1:
|
|
|
|
l = os.read(src_fd, clone_block_size)
|
|
|
|
s = len(l)
|
|
|
|
if s == 0:
|
2021-04-08 00:37:38 +08:00
|
|
|
meter.end()
|
2013-07-13 22:09:00 +08:00
|
|
|
break
|
|
|
|
# check sequence of zeros
|
|
|
|
if sparse and zeros == l:
|
|
|
|
os.lseek(dst_fd, s, 1)
|
|
|
|
else:
|
|
|
|
b = os.write(dst_fd, l)
|
2020-01-29 21:57:10 +08:00
|
|
|
if s != b: # pragma: no cover
|
2021-04-08 00:37:38 +08:00
|
|
|
meter.end()
|
2013-07-13 22:09:00 +08:00
|
|
|
break
|
|
|
|
i += s
|
|
|
|
if i < size_bytes:
|
|
|
|
meter.update(i)
|
2020-01-29 20:30:51 +08:00
|
|
|
except OSError as e: # pragma: no cover
|
2020-09-16 00:33:31 +08:00
|
|
|
log.debug("Error while cloning", exc_info=True)
|
2020-07-12 05:31:40 +08:00
|
|
|
msg = (_("Error cloning diskimage "
|
2020-07-14 15:41:49 +08:00
|
|
|
"%(inputpath)s to %(outputpath)s: %(error)s") %
|
2020-07-12 05:31:40 +08:00
|
|
|
{"inputpath": self._input_path,
|
2020-07-14 15:41:49 +08:00
|
|
|
"outputpath": self._output_path,
|
|
|
|
"error": str(e)})
|
2020-09-16 00:33:31 +08:00
|
|
|
raise RuntimeError(msg) from None
|
2013-07-13 22:09:00 +08:00
|
|
|
finally:
|
|
|
|
if src_fd is not None:
|
|
|
|
os.close(src_fd)
|
|
|
|
if dst_fd is not None:
|
|
|
|
os.close(dst_fd)
|
|
|
|
|
|
|
|
|
2020-01-29 19:18:15 +08:00
|
|
|
class StorageBackendStub(_StorageBase):
|
|
|
|
"""
|
|
|
|
Class representing a storage path for a parsed XML disk, that we
|
|
|
|
don't want to do slow resolving of unless requested
|
|
|
|
"""
|
|
|
|
def __init__(self, conn, path, dev_type, driver_type):
|
|
|
|
_StorageBase.__init__(self, conn)
|
|
|
|
self._path = path
|
|
|
|
self._dev_type = dev_type
|
|
|
|
self._driver_type = driver_type
|
|
|
|
|
|
|
|
|
|
|
|
def get_path(self):
|
|
|
|
return self._path
|
|
|
|
def get_vol_object(self):
|
|
|
|
return None
|
|
|
|
def get_vol_xml(self):
|
|
|
|
return None
|
|
|
|
def get_parent_pool(self):
|
|
|
|
return None
|
|
|
|
def get_size(self):
|
|
|
|
return 0
|
|
|
|
def exists(self):
|
|
|
|
return True
|
|
|
|
def get_dev_type(self):
|
|
|
|
return self._dev_type
|
|
|
|
def get_driver_type(self):
|
|
|
|
return self._driver_type
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
def validate(self):
|
2020-01-29 19:18:15 +08:00
|
|
|
return
|
|
|
|
def get_vol_install(self):
|
|
|
|
return None
|
|
|
|
def is_size_conflict(self):
|
|
|
|
return (False, None)
|
|
|
|
def is_stub(self):
|
|
|
|
return True
|
|
|
|
def will_create_storage(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2013-07-13 22:09:00 +08:00
|
|
|
class StorageBackend(_StorageBase):
|
|
|
|
"""
|
|
|
|
Class that carries all the info about any existing storage that
|
|
|
|
the disk references
|
|
|
|
"""
|
2014-12-09 07:18:59 +08:00
|
|
|
def __init__(self, conn, path, vol_object, parent_pool):
|
2014-12-06 10:09:26 +08:00
|
|
|
_StorageBase.__init__(self, conn)
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
self._vol_object = vol_object
|
2014-12-06 08:52:25 +08:00
|
|
|
self._parent_pool = parent_pool
|
2013-07-13 22:09:00 +08:00
|
|
|
self._path = path
|
|
|
|
|
|
|
|
if self._vol_object is not None:
|
|
|
|
self._path = None
|
|
|
|
|
2014-12-10 06:03:48 +08:00
|
|
|
if self._vol_object and not self._parent_pool:
|
2020-07-18 06:58:00 +08:00
|
|
|
raise xmlutil.DevError(
|
2020-01-29 19:18:15 +08:00
|
|
|
"parent_pool must be specified")
|
2014-12-10 05:57:32 +08:00
|
|
|
|
2013-07-13 22:09:00 +08:00
|
|
|
# Cached bits
|
|
|
|
self._vol_xml = None
|
2014-12-10 07:23:22 +08:00
|
|
|
self._parent_pool_xml = None
|
2013-07-13 22:09:00 +08:00
|
|
|
self._exists = None
|
|
|
|
self._size = None
|
|
|
|
self._dev_type = None
|
|
|
|
|
|
|
|
|
|
|
|
##############
|
|
|
|
# Public API #
|
|
|
|
##############
|
|
|
|
|
2014-12-10 05:57:32 +08:00
|
|
|
def get_path(self):
|
2013-07-13 22:09:00 +08:00
|
|
|
if self._vol_object:
|
2014-12-10 07:23:22 +08:00
|
|
|
return self.get_vol_xml().target_path
|
2013-07-13 22:09:00 +08:00
|
|
|
return self._path
|
|
|
|
|
|
|
|
def get_vol_object(self):
|
|
|
|
return self._vol_object
|
2014-12-10 07:23:22 +08:00
|
|
|
def get_vol_xml(self):
|
|
|
|
if self._vol_xml is None:
|
|
|
|
self._vol_xml = StorageVolume(self._conn,
|
|
|
|
parsexml=self._vol_object.XMLDesc(0))
|
2017-05-17 15:18:49 +08:00
|
|
|
self._vol_xml.pool = self._parent_pool
|
2014-12-10 07:23:22 +08:00
|
|
|
return self._vol_xml
|
|
|
|
|
2014-12-06 08:52:25 +08:00
|
|
|
def get_parent_pool(self):
|
|
|
|
return self._parent_pool
|
2013-07-13 22:09:00 +08:00
|
|
|
|
|
|
|
def get_size(self):
|
|
|
|
"""
|
|
|
|
Return size of existing storage
|
|
|
|
"""
|
|
|
|
if self._size is None:
|
|
|
|
ret = 0
|
|
|
|
if self._vol_object:
|
2014-12-10 07:23:22 +08:00
|
|
|
ret = self.get_vol_xml().capacity
|
2013-07-13 22:09:00 +08:00
|
|
|
elif self._path:
|
2020-01-29 20:30:51 +08:00
|
|
|
ret = _get_size(self._path)
|
2013-07-13 22:09:00 +08:00
|
|
|
self._size = (float(ret) / 1024.0 / 1024.0 / 1024.0)
|
|
|
|
return self._size
|
|
|
|
|
2014-12-07 01:41:44 +08:00
|
|
|
def exists(self):
|
2013-07-13 22:09:00 +08:00
|
|
|
if self._exists is None:
|
2020-01-29 20:30:51 +08:00
|
|
|
if self._vol_object:
|
2013-07-13 22:09:00 +08:00
|
|
|
self._exists = True
|
2020-01-29 20:30:51 +08:00
|
|
|
elif self._path is None:
|
2013-07-13 22:09:00 +08:00
|
|
|
self._exists = True
|
2014-12-10 05:57:32 +08:00
|
|
|
elif (not self.get_dev_type() == "network" and
|
2014-12-07 07:33:11 +08:00
|
|
|
not self._conn.is_remote() and
|
|
|
|
os.path.exists(self._path)):
|
2013-07-13 22:09:00 +08:00
|
|
|
self._exists = True
|
2014-12-06 10:09:26 +08:00
|
|
|
elif self._parent_pool:
|
|
|
|
self._exists = False
|
2014-12-10 05:57:32 +08:00
|
|
|
elif self.get_dev_type() == "network":
|
2014-12-07 07:33:11 +08:00
|
|
|
self._exists = True
|
2014-12-07 01:41:44 +08:00
|
|
|
elif (self._conn.is_remote() and
|
2014-02-09 05:36:45 +08:00
|
|
|
not _can_auto_manage(self._path)):
|
|
|
|
# This allows users to pass /dev/sdX and we don't try to
|
|
|
|
# validate it exists on the remote connection, since
|
|
|
|
# autopooling /dev is perilous. Libvirt will error if
|
|
|
|
# the device doesn't exist.
|
|
|
|
self._exists = True
|
2013-07-13 22:09:00 +08:00
|
|
|
else:
|
|
|
|
self._exists = False
|
|
|
|
return self._exists
|
|
|
|
|
|
|
|
def get_dev_type(self):
|
|
|
|
"""
|
|
|
|
Return disk 'type' value per storage settings
|
|
|
|
"""
|
|
|
|
if self._dev_type is None:
|
2017-03-06 16:28:47 +08:00
|
|
|
vol_xml = None
|
2013-07-13 22:09:00 +08:00
|
|
|
if self._vol_object:
|
2017-03-06 16:28:47 +08:00
|
|
|
vol_xml = self.get_vol_xml()
|
|
|
|
self._dev_type = _get_dev_type(self._path, vol_xml, self._vol_object,
|
2017-03-06 16:28:48 +08:00
|
|
|
self.get_parent_pool_xml(),
|
2017-03-06 16:28:47 +08:00
|
|
|
self._conn.is_remote())
|
2013-07-13 22:09:00 +08:00
|
|
|
return self._dev_type
|
|
|
|
|
|
|
|
def get_driver_type(self):
|
|
|
|
if self._vol_object:
|
2022-01-20 03:19:56 +08:00
|
|
|
if self.get_vol_xml().supports_format():
|
|
|
|
return self.get_vol_xml().format
|
|
|
|
return "raw"
|
2013-07-13 22:09:00 +08:00
|
|
|
return None
|
|
|
|
|
2020-01-29 20:30:51 +08:00
|
|
|
def validate(self):
|
2014-12-09 05:02:26 +08:00
|
|
|
return
|
|
|
|
def get_vol_install(self):
|
|
|
|
return None
|
|
|
|
def is_size_conflict(self):
|
|
|
|
return (False, None)
|
|
|
|
def will_create_storage(self):
|
|
|
|
return False
|