nilfs2: add ioctl which limits range of segment to be allocated

This adds a new ioctl command which limits range of segment to be
allocated.  This is intended to gather data whithin a range of the
partition before shrinking the filesystem, or to control new log
location for some purpose.

If a range is specified by the ioctl, segment allocator of nilfs tries
to allocate new segments from the range unless no free segments are
available there.

Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
This commit is contained in:
Ryusuke Konishi 2011-05-05 01:23:57 +09:00
parent 56eb553885
commit 619205da5b
4 changed files with 100 additions and 10 deletions

View File

@ -698,6 +698,38 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
return 0; return 0;
} }
static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
__u64 range[2];
__u64 minseg, maxseg;
unsigned long segbytes;
int ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
ret = -EFAULT;
if (copy_from_user(range, argp, sizeof(__u64[2])))
goto out;
ret = -ERANGE;
if (range[1] > i_size_read(inode->i_sb->s_bdev->bd_inode))
goto out;
segbytes = nilfs->ns_blocks_per_segment * nilfs->ns_blocksize;
minseg = range[0] + segbytes - 1;
do_div(minseg, segbytes);
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
do_div(maxseg, segbytes);
maxseg--;
ret = nilfs_sufile_set_alloc_range(nilfs->ns_sufile, minseg, maxseg);
out:
return ret;
}
static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp, unsigned int cmd, void __user *argp,
size_t membsz, size_t membsz,
@ -763,6 +795,8 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return nilfs_ioctl_clean_segments(inode, filp, cmd, argp); return nilfs_ioctl_clean_segments(inode, filp, cmd, argp);
case NILFS_IOCTL_SYNC: case NILFS_IOCTL_SYNC:
return nilfs_ioctl_sync(inode, filp, cmd, argp); return nilfs_ioctl_sync(inode, filp, cmd, argp);
case NILFS_IOCTL_SET_ALLOC_RANGE:
return nilfs_ioctl_set_alloc_range(inode, argp);
default: default:
return -ENOTTY; return -ENOTTY;
} }

View File

@ -33,7 +33,9 @@
struct nilfs_sufile_info { struct nilfs_sufile_info {
struct nilfs_mdt_info mi; struct nilfs_mdt_info mi;
unsigned long ncleansegs; unsigned long ncleansegs;/* number of clean segments */
__u64 allocmin; /* lower limit of allocatable segment range */
__u64 allocmax; /* upper limit of allocatable segment range */
}; };
static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
@ -247,6 +249,35 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
return ret; return ret;
} }
/**
* nilfs_sufile_set_alloc_range - limit range of segment to be allocated
* @sufile: inode of segment usage file
* @start: minimum segment number of allocatable region (inclusive)
* @end: maximum segment number of allocatable region (inclusive)
*
* Return Value: On success, 0 is returned. On error, one of the
* following negative error codes is returned.
*
* %-ERANGE - invalid segment region
*/
int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
{
struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
__u64 nsegs;
int ret = -ERANGE;
down_write(&NILFS_MDT(sufile)->mi_sem);
nsegs = nilfs_sufile_get_nsegments(sufile);
if (start <= end && end < nsegs) {
sui->allocmin = start;
sui->allocmax = end;
ret = 0;
}
up_write(&NILFS_MDT(sufile)->mi_sem);
return ret;
}
/** /**
* nilfs_sufile_alloc - allocate a segment * nilfs_sufile_alloc - allocate a segment
* @sufile: inode of segment usage file * @sufile: inode of segment usage file
@ -269,11 +300,12 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
struct buffer_head *header_bh, *su_bh; struct buffer_head *header_bh, *su_bh;
struct nilfs_sufile_header *header; struct nilfs_sufile_header *header;
struct nilfs_segment_usage *su; struct nilfs_segment_usage *su;
struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
size_t susz = NILFS_MDT(sufile)->mi_entry_size; size_t susz = NILFS_MDT(sufile)->mi_entry_size;
__u64 segnum, maxsegnum, last_alloc; __u64 segnum, maxsegnum, last_alloc;
void *kaddr; void *kaddr;
unsigned long nsegments, ncleansegs, nsus; unsigned long nsegments, ncleansegs, nsus, cnt;
int ret, i, j; int ret, j;
down_write(&NILFS_MDT(sufile)->mi_sem); down_write(&NILFS_MDT(sufile)->mi_sem);
@ -287,13 +319,31 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nsegments = nilfs_sufile_get_nsegments(sufile); nsegments = nilfs_sufile_get_nsegments(sufile);
maxsegnum = sui->allocmax;
segnum = last_alloc + 1; segnum = last_alloc + 1;
maxsegnum = nsegments - 1; if (segnum < sui->allocmin || segnum > sui->allocmax)
for (i = 0; i < nsegments; i += nsus) { segnum = sui->allocmin;
if (segnum >= nsegments) {
/* wrap around */ for (cnt = 0; cnt < nsegments; cnt += nsus) {
segnum = 0; if (segnum > maxsegnum) {
if (cnt < sui->allocmax - sui->allocmin + 1) {
/*
* wrap around in the limited region.
* if allocation started from
* sui->allocmin, this never happens.
*/
segnum = sui->allocmin;
maxsegnum = last_alloc; maxsegnum = last_alloc;
} else if (segnum > sui->allocmin &&
sui->allocmax + 1 < nsegments) {
segnum = sui->allocmax + 1;
maxsegnum = nsegments - 1;
} else if (sui->allocmin > 0) {
segnum = 0;
maxsegnum = sui->allocmin - 1;
} else {
break; /* never happens */
}
} }
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
&su_bh); &su_bh);
@ -319,7 +369,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
header->sh_last_alloc = cpu_to_le64(segnum); header->sh_last_alloc = cpu_to_le64(segnum);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
NILFS_SUI(sufile)->ncleansegs--; sui->ncleansegs--;
nilfs_mdt_mark_buffer_dirty(header_bh); nilfs_mdt_mark_buffer_dirty(header_bh);
nilfs_mdt_mark_buffer_dirty(su_bh); nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
@ -679,6 +729,9 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
brelse(header_bh); brelse(header_bh);
sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
sui->allocmin = 0;
unlock_new_inode(sufile); unlock_new_inode(sufile);
out: out:
*inodep = sufile; *inodep = sufile;

View File

@ -36,6 +36,7 @@ static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile); unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile);
int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end);
int nilfs_sufile_alloc(struct inode *, __u64 *); int nilfs_sufile_alloc(struct inode *, __u64 *);
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum); int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum);
int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,

View File

@ -845,5 +845,7 @@ struct nilfs_bdesc {
_IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
#define NILFS_IOCTL_RESIZE \ #define NILFS_IOCTL_RESIZE \
_IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
#define NILFS_IOCTL_SET_ALLOC_RANGE \
_IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2])
#endif /* _LINUX_NILFS_FS_H */ #endif /* _LINUX_NILFS_FS_H */