ovl: generalize the lower_fs[] array

Rename lower_fs[] array to fs[], extend its size by one and use index fsid
(instead of fsid-1) to access the fs[] array.

Initialize fs[0] with upper fs values. fsid 0 is reserved even with lower
only overlay, so fs[0] remains null in this case.

Signed-off-by: Amir Goldstein <amir73il@gmail.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
Amir Goldstein 2020-01-14 21:59:22 +02:00 committed by Miklos Szeredi
parent 0f831ec85e
commit 07f1e59637
3 changed files with 46 additions and 46 deletions

View File

@ -75,8 +75,7 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
return err;
}
static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
struct ovl_layer *lower_layer)
static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
{
bool samefs = ovl_same_fs(dentry->d_sb);
unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
@ -103,9 +102,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
dentry, stat->ino, xinobits);
} else {
if (lower_layer)
stat->ino |= ((u64)lower_layer->fsid) << shift;
stat->ino |= ((u64)fsid) << shift;
stat->dev = dentry->d_sb->s_dev;
return 0;
}
@ -124,7 +121,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
*/
stat->dev = dentry->d_sb->s_dev;
stat->ino = dentry->d_inode->i_ino;
} else if (lower_layer && lower_layer->fsid) {
} else if (fsid) {
/*
* For non-samefs setup, if we cannot map all layers st_ino
* to a unified address space, we need to make sure that st_dev
@ -132,7 +129,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
* lower layers use the unique anonymous bdev assigned to the
* lower fs.
*/
stat->dev = lower_layer->fs->pseudo_dev;
stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
}
return 0;
@ -147,7 +144,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
const struct cred *old_cred;
bool is_dir = S_ISDIR(dentry->d_inode->i_mode);
bool samefs = ovl_same_fs(dentry->d_sb);
struct ovl_layer *lower_layer = NULL;
int fsid = 0;
int err;
bool metacopy_blocks = false;
@ -170,7 +167,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
*/
if (!is_dir || ovl_same_dev(dentry->d_sb)) {
if (!OVL_TYPE_UPPER(type)) {
lower_layer = ovl_layer_lower(dentry);
fsid = ovl_layer_lower(dentry)->fsid;
} else if (OVL_TYPE_ORIGIN(type)) {
struct kstat lowerstat;
u32 lowermask = STATX_INO | STATX_BLOCKS |
@ -200,13 +197,13 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
(!ovl_verify_lower(dentry->d_sb) &&
(is_dir || lowerstat.nlink == 1))) {
lower_layer = ovl_layer_lower(dentry);
fsid = ovl_layer_lower(dentry)->fsid;
/*
* Cannot use origin st_dev;st_ino because
* origin inode content may differ from overlay
* inode content.
*/
if (samefs || lower_layer->fsid)
if (samefs || fsid)
stat->ino = lowerstat.ino;
}
@ -241,7 +238,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
}
}
err = ovl_map_dev_ino(dentry, stat, lower_layer);
err = ovl_map_dev_ino(dentry, stat, fsid);
if (err)
goto out;

View File

@ -46,10 +46,10 @@ struct ovl_path {
struct ovl_fs {
struct vfsmount *upper_mnt;
unsigned int numlayer;
/* Number of unique lower sb that differ from upper sb */
unsigned int numlowerfs;
/* Number of unique fs among layers including upper fs */
unsigned int numfs;
struct ovl_layer *layers;
struct ovl_sb *lower_fs;
struct ovl_sb *fs;
/* workbasedir is the path at workdir= mount option */
struct dentry *workbasedir;
/* workdir is the 'work' directory under workbasedir */

View File

@ -228,10 +228,11 @@ static void ovl_free_fs(struct ovl_fs *ofs)
iput(ofs->layers[i].trap);
mntput(ofs->layers[i].mnt);
}
for (i = 0; i < ofs->numlowerfs; i++)
free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
kfree(ofs->layers);
kfree(ofs->lower_fs);
/* fs[0].pseudo_dev is either null or real upper st_dev */
for (i = 1; i < ofs->numfs; i++)
free_anon_bdev(ofs->fs[i].pseudo_dev);
kfree(ofs->fs);
kfree(ofs->config.lowerdir);
kfree(ofs->config.upperdir);
@ -1259,7 +1260,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
if (!ofs->config.nfs_export && !ofs->upper_mnt)
return true;
for (i = 0; i < ofs->numlowerfs; i++) {
for (i = 1; i < ofs->numfs; i++) {
/*
* We use uuid to associate an overlay lower file handle with a
* lower layer, so we can accept lower fs with null uuid as long
@ -1267,8 +1268,8 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
* if we detect multiple lower fs with the same uuid, we
* disable lower file handle decoding on all of them.
*/
if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) {
ofs->lower_fs[i].bad_uuid = true;
if (uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) {
ofs->fs[i].bad_uuid = true;
return false;
}
}
@ -1284,13 +1285,9 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
int err;
bool bad_uuid = false;
/* fsid 0 is reserved for upper fs even with non upper overlay */
if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb)
return 0;
for (i = 0; i < ofs->numlowerfs; i++) {
if (ofs->lower_fs[i].sb == sb)
return i + 1;
for (i = 0; i < ofs->numfs; i++) {
if (ofs->fs[i].sb == sb)
return i;
}
if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
@ -1311,12 +1308,11 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
return err;
}
ofs->lower_fs[ofs->numlowerfs].sb = sb;
ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev;
ofs->lower_fs[ofs->numlowerfs].bad_uuid = bad_uuid;
ofs->numlowerfs++;
ofs->fs[ofs->numfs].sb = sb;
ofs->fs[ofs->numfs].pseudo_dev = dev;
ofs->fs[ofs->numfs].bad_uuid = bad_uuid;
return ofs->numlowerfs;
return ofs->numfs++;
}
static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
@ -1331,17 +1327,27 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
if (ofs->layers == NULL)
goto out;
ofs->lower_fs = kcalloc(numlower, sizeof(struct ovl_sb),
GFP_KERNEL);
if (ofs->lower_fs == NULL)
ofs->fs = kcalloc(numlower + 1, sizeof(struct ovl_sb), GFP_KERNEL);
if (ofs->fs == NULL)
goto out;
/* idx 0 is reserved for upper fs even with lower only overlay */
/* idx/fsid 0 are reserved for upper fs even with lower only overlay */
ofs->numfs++;
ofs->layers[0].mnt = ofs->upper_mnt;
ofs->layers[0].idx = 0;
ofs->layers[0].fsid = 0;
ofs->numlayer = 1;
/*
* All lower layers that share the same fs as upper layer, use the real
* upper st_dev.
*/
if (ofs->upper_mnt) {
ofs->fs[0].sb = ofs->upper_mnt->mnt_sb;
ofs->fs[0].pseudo_dev = ofs->upper_mnt->mnt_sb->s_dev;
}
for (i = 0; i < numlower; i++) {
struct vfsmount *mnt;
struct inode *trap;
@ -1379,10 +1385,7 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
ofs->layers[ofs->numlayer].mnt = mnt;
ofs->layers[ofs->numlayer].idx = ofs->numlayer;
ofs->layers[ofs->numlayer].fsid = fsid;
if (fsid) {
ofs->layers[ofs->numlayer].fs =
&ofs->lower_fs[fsid - 1];
}
ofs->layers[ofs->numlayer].fs = &ofs->fs[fsid];
ofs->numlayer++;
}
@ -1394,18 +1397,18 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
* bits reserved for fsid, it emits a warning and uses the original
* inode number.
*/
if (!ofs->numlowerfs || (ofs->numlowerfs == 1 && !ofs->upper_mnt)) {
if (ofs->numfs - !ofs->upper_mnt == 1) {
if (ofs->config.xino == OVL_XINO_ON)
pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
ofs->xino_mode = 0;
} else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) {
/*
* This is a roundup of number of bits needed for numlowerfs+1
* (i.e. ilog2(numlowerfs+1 - 1) + 1). fsid 0 is reserved for
* upper fs even with non upper overlay.
* This is a roundup of number of bits needed for encoding
* fsid, where fsid 0 is reserved for upper fs even with
* lower only overlay.
*/
BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 31);
ofs->xino_mode = ilog2(ofs->numlowerfs) + 1;
ofs->xino_mode = ilog2(ofs->numfs - 1) + 1;
}
if (ofs->xino_mode > 0) {