mirror of https://gitee.com/openkylin/linux.git
ext4: drop ec_type from the ext4_ext_cache structure
We can encode the ec_type information by using ee_len == 0 to denote EXT4_EXT_CACHE_NO, ee_start == 0 to denote EXT4_EXT_CACHE_GAP, and if neither is true, then the cache type must be EXT4_EXT_CACHE_EXTENT. This allows us to reduce the size of ext4_ext_inode by another 8 bytes. (ec_type is 4 bytes, plus another 4 bytes of padding) Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
parent
01f49d0b9d
commit
b05e6ae58a
|
@ -738,12 +738,13 @@ do { \
|
|||
|
||||
/*
|
||||
* storage for cached extent
|
||||
* If ec_len == 0, then the cache is invalid.
|
||||
* If ec_start == 0, then the cache represents a gap (null mapping)
|
||||
*/
|
||||
struct ext4_ext_cache {
|
||||
ext4_fsblk_t ec_start;
|
||||
ext4_lblk_t ec_block;
|
||||
__u32 ec_len; /* must be 32bit to return holes */
|
||||
__u32 ec_type;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -119,10 +119,6 @@ struct ext4_ext_path {
|
|||
* structure for external API
|
||||
*/
|
||||
|
||||
#define EXT4_EXT_CACHE_NO 0
|
||||
#define EXT4_EXT_CACHE_GAP 1
|
||||
#define EXT4_EXT_CACHE_EXTENT 2
|
||||
|
||||
/*
|
||||
* to be called by ext4_ext_walk_space()
|
||||
* negative retcode - error
|
||||
|
@ -197,7 +193,7 @@ static inline unsigned short ext_depth(struct inode *inode)
|
|||
static inline void
|
||||
ext4_ext_invalidate_cache(struct inode *inode)
|
||||
{
|
||||
EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
|
||||
EXT4_I(inode)->i_cached_extent.ec_len = 0;
|
||||
}
|
||||
|
||||
static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
|
||||
|
|
|
@ -1894,12 +1894,10 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
|
|||
cbex.ec_block = start;
|
||||
cbex.ec_len = end - start;
|
||||
cbex.ec_start = 0;
|
||||
cbex.ec_type = EXT4_EXT_CACHE_GAP;
|
||||
} else {
|
||||
cbex.ec_block = le32_to_cpu(ex->ee_block);
|
||||
cbex.ec_len = ext4_ext_get_actual_len(ex);
|
||||
cbex.ec_start = ext4_ext_pblock(ex);
|
||||
cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
|
||||
}
|
||||
|
||||
if (unlikely(cbex.ec_len == 0)) {
|
||||
|
@ -1939,13 +1937,12 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
|
|||
|
||||
static void
|
||||
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
|
||||
__u32 len, ext4_fsblk_t start, int type)
|
||||
__u32 len, ext4_fsblk_t start)
|
||||
{
|
||||
struct ext4_ext_cache *cex;
|
||||
BUG_ON(len == 0);
|
||||
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
cex = &EXT4_I(inode)->i_cached_extent;
|
||||
cex->ec_type = type;
|
||||
cex->ec_block = block;
|
||||
cex->ec_len = len;
|
||||
cex->ec_start = start;
|
||||
|
@ -1998,15 +1995,18 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
|
|||
}
|
||||
|
||||
ext_debug(" -> %u:%lu\n", lblock, len);
|
||||
ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
|
||||
ext4_ext_put_in_cache(inode, lblock, len, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 0 if cache is invalid; 1 if the cache is valid
|
||||
*/
|
||||
static int
|
||||
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
||||
struct ext4_extent *ex)
|
||||
{
|
||||
struct ext4_ext_cache *cex;
|
||||
int ret = EXT4_EXT_CACHE_NO;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* We borrow i_block_reservation_lock to protect i_cached_extent
|
||||
|
@ -2015,11 +2015,9 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
|||
cex = &EXT4_I(inode)->i_cached_extent;
|
||||
|
||||
/* has cache valid data? */
|
||||
if (cex->ec_type == EXT4_EXT_CACHE_NO)
|
||||
if (cex->ec_len == 0)
|
||||
goto errout;
|
||||
|
||||
BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
|
||||
cex->ec_type != EXT4_EXT_CACHE_EXTENT);
|
||||
if (in_range(block, cex->ec_block, cex->ec_len)) {
|
||||
ex->ee_block = cpu_to_le32(cex->ec_block);
|
||||
ext4_ext_store_pblock(ex, cex->ec_start);
|
||||
|
@ -2027,7 +2025,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
|||
ext_debug("%u cached by %u:%u:%llu\n",
|
||||
block,
|
||||
cex->ec_block, cex->ec_len, cex->ec_start);
|
||||
ret = cex->ec_type;
|
||||
ret = 1;
|
||||
}
|
||||
errout:
|
||||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
|
@ -3298,7 +3296,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
struct ext4_extent_header *eh;
|
||||
struct ext4_extent newex, *ex;
|
||||
ext4_fsblk_t newblock;
|
||||
int err = 0, depth, ret, cache_type;
|
||||
int err = 0, depth, ret;
|
||||
unsigned int allocated = 0;
|
||||
struct ext4_allocation_request ar;
|
||||
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
|
||||
|
@ -3307,9 +3305,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
map->m_lblk, map->m_len, inode->i_ino);
|
||||
|
||||
/* check in cache */
|
||||
cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
|
||||
if (cache_type) {
|
||||
if (cache_type == EXT4_EXT_CACHE_GAP) {
|
||||
if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
|
||||
if (!newex.ee_start_lo && !newex.ee_start_hi) {
|
||||
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
|
||||
/*
|
||||
* block isn't allocated yet and
|
||||
|
@ -3318,7 +3315,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
goto out2;
|
||||
}
|
||||
/* we should allocate requested block */
|
||||
} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
|
||||
} else {
|
||||
/* block is already allocated */
|
||||
newblock = map->m_lblk
|
||||
- le32_to_cpu(newex.ee_block)
|
||||
|
@ -3327,8 +3324,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
allocated = ext4_ext_get_actual_len(&newex) -
|
||||
(map->m_lblk - le32_to_cpu(newex.ee_block));
|
||||
goto out;
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3379,8 +3374,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
/* Do not put uninitialized extent in the cache */
|
||||
if (!ext4_ext_is_uninitialized(ex)) {
|
||||
ext4_ext_put_in_cache(inode, ee_block,
|
||||
ee_len, ee_start,
|
||||
EXT4_EXT_CACHE_EXTENT);
|
||||
ee_len, ee_start);
|
||||
goto out;
|
||||
}
|
||||
ret = ext4_ext_handle_uninitialized_extents(handle,
|
||||
|
@ -3512,8 +3506,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
* when it is _not_ an uninitialized extent.
|
||||
*/
|
||||
if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
|
||||
ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
|
||||
EXT4_EXT_CACHE_EXTENT);
|
||||
ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
|
||||
ext4_update_inode_fsync_trans(handle, inode, 1);
|
||||
} else
|
||||
ext4_update_inode_fsync_trans(handle, inode, 0);
|
||||
|
@ -3789,7 +3782,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
|
|||
|
||||
logical = (__u64)newex->ec_block << blksize_bits;
|
||||
|
||||
if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
|
||||
if (newex->ec_start == 0) {
|
||||
pgoff_t offset;
|
||||
struct page *page;
|
||||
struct buffer_head *bh = NULL;
|
||||
|
|
Loading…
Reference in New Issue