mirror of https://gitee.com/openkylin/linux.git
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2: ocfs2: Fix system inodes cache overflow. ocfs2: Hold ip_lock when set/clear flags for indexed dir. ocfs2: Adjust masklog flag values Ocfs2: Teach 'coherency=full' O_DIRECT writes to correctly up_read i_alloc_sem. ocfs2/dlm: Migrate lockres with no locks if it has a reference
This commit is contained in:
commit
eda4b716ea
|
@ -573,11 +573,14 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
|
|||
/* this io's submitter should not have unlocked this before we could */
|
||||
BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
|
||||
|
||||
if (ocfs2_iocb_is_sem_locked(iocb)) {
|
||||
up_read(&inode->i_alloc_sem);
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
}
|
||||
|
||||
ocfs2_iocb_clear_rw_locked(iocb);
|
||||
|
||||
level = ocfs2_iocb_rw_locked_level(iocb);
|
||||
if (!level)
|
||||
up_read(&inode->i_alloc_sem);
|
||||
ocfs2_rw_unlock(inode, level);
|
||||
|
||||
if (is_async)
|
||||
|
|
|
@ -68,8 +68,27 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
|
|||
else
|
||||
clear_bit(1, (unsigned long *)&iocb->private);
|
||||
}
|
||||
|
||||
/*
|
||||
* Using a named enum representing lock types in terms of #N bit stored in
|
||||
* iocb->private, which is going to be used for communication bewteen
|
||||
* ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
|
||||
*/
|
||||
enum ocfs2_iocb_lock_bits {
|
||||
OCFS2_IOCB_RW_LOCK = 0,
|
||||
OCFS2_IOCB_RW_LOCK_LEVEL,
|
||||
OCFS2_IOCB_SEM,
|
||||
OCFS2_IOCB_NUM_LOCKS
|
||||
};
|
||||
|
||||
#define ocfs2_iocb_clear_rw_locked(iocb) \
|
||||
clear_bit(0, (unsigned long *)&iocb->private)
|
||||
clear_bit(OCFS2_IOCB_RW_LOCK, (unsigned long *)&iocb->private)
|
||||
#define ocfs2_iocb_rw_locked_level(iocb) \
|
||||
test_bit(1, (unsigned long *)&iocb->private)
|
||||
test_bit(OCFS2_IOCB_RW_LOCK_LEVEL, (unsigned long *)&iocb->private)
|
||||
#define ocfs2_iocb_set_sem_locked(iocb) \
|
||||
set_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
|
||||
#define ocfs2_iocb_clear_sem_locked(iocb) \
|
||||
clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
|
||||
#define ocfs2_iocb_is_sem_locked(iocb) \
|
||||
test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
|
||||
#endif /* OCFS2_FILE_H */
|
||||
|
|
|
@ -113,10 +113,11 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
|
|||
define_mask(QUOTA),
|
||||
define_mask(REFCOUNT),
|
||||
define_mask(BASTS),
|
||||
define_mask(RESERVATIONS),
|
||||
define_mask(CLUSTER),
|
||||
define_mask(ERROR),
|
||||
define_mask(NOTICE),
|
||||
define_mask(KTHREAD),
|
||||
define_mask(RESERVATIONS),
|
||||
};
|
||||
|
||||
static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
#include <linux/sched.h>
|
||||
|
||||
/* bits that are frequently given and infrequently matched in the low word */
|
||||
/* NOTE: If you add a flag, you need to also update mlog.c! */
|
||||
/* NOTE: If you add a flag, you need to also update masklog.c! */
|
||||
#define ML_ENTRY 0x0000000000000001ULL /* func call entry */
|
||||
#define ML_EXIT 0x0000000000000002ULL /* func call exit */
|
||||
#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */
|
||||
|
@ -114,13 +114,14 @@
|
|||
#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
|
||||
#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
|
||||
#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
|
||||
#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */
|
||||
#define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */
|
||||
#define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */
|
||||
#define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */
|
||||
|
||||
/* bits that are infrequently given and frequently matched in the high word */
|
||||
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
|
||||
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
|
||||
#define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */
|
||||
#define ML_RESERVATIONS 0x0000000800000000ULL /* ocfs2 alloc reservations */
|
||||
#define ML_CLUSTER 0x0000001000000000ULL /* cluster stack */
|
||||
#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
|
||||
#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
|
||||
#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
|
||||
|
||||
#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
|
||||
#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
|
||||
|
|
|
@ -2461,8 +2461,10 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
|
|||
|
||||
di->i_dx_root = cpu_to_le64(dr_blkno);
|
||||
|
||||
spin_lock(&OCFS2_I(dir)->ip_lock);
|
||||
OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
|
||||
di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
|
||||
spin_unlock(&OCFS2_I(dir)->ip_lock);
|
||||
|
||||
ocfs2_journal_dirty(handle, di_bh);
|
||||
|
||||
|
@ -4466,8 +4468,10 @@ static int ocfs2_dx_dir_remove_index(struct inode *dir,
|
|||
goto out_commit;
|
||||
}
|
||||
|
||||
spin_lock(&OCFS2_I(dir)->ip_lock);
|
||||
OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
|
||||
di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
|
||||
spin_unlock(&OCFS2_I(dir)->ip_lock);
|
||||
di->i_dx_root = cpu_to_le64(0ULL);
|
||||
|
||||
ocfs2_journal_dirty(handle, di_bh);
|
||||
|
|
|
@ -2346,7 +2346,8 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
|
|||
*/
|
||||
static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
|
||||
struct dlm_lock_resource *res,
|
||||
int *numlocks)
|
||||
int *numlocks,
|
||||
int *hasrefs)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
@ -2356,6 +2357,9 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
|
|||
|
||||
assert_spin_locked(&res->spinlock);
|
||||
|
||||
*numlocks = 0;
|
||||
*hasrefs = 0;
|
||||
|
||||
ret = -EINVAL;
|
||||
if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
|
||||
mlog(0, "cannot migrate lockres with unknown owner!\n");
|
||||
|
@ -2386,7 +2390,13 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
|
|||
}
|
||||
|
||||
*numlocks = count;
|
||||
mlog(0, "migrateable lockres having %d locks\n", *numlocks);
|
||||
|
||||
count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
|
||||
if (count < O2NM_MAX_NODES)
|
||||
*hasrefs = 1;
|
||||
|
||||
mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name,
|
||||
res->lockname.len, res->lockname.name, *numlocks, *hasrefs);
|
||||
|
||||
leave:
|
||||
return ret;
|
||||
|
@ -2408,7 +2418,7 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
|
|||
const char *name;
|
||||
unsigned int namelen;
|
||||
int mle_added = 0;
|
||||
int numlocks;
|
||||
int numlocks, hasrefs;
|
||||
int wake = 0;
|
||||
|
||||
if (!dlm_grab(dlm))
|
||||
|
@ -2417,13 +2427,13 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
|
|||
name = res->lockname.name;
|
||||
namelen = res->lockname.len;
|
||||
|
||||
mlog(0, "migrating %.*s to %u\n", namelen, name, target);
|
||||
mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target);
|
||||
|
||||
/*
|
||||
* ensure this lockres is a proper candidate for migration
|
||||
*/
|
||||
spin_lock(&res->spinlock);
|
||||
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
|
||||
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
|
||||
if (ret < 0) {
|
||||
spin_unlock(&res->spinlock);
|
||||
goto leave;
|
||||
|
@ -2431,10 +2441,8 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
|
|||
spin_unlock(&res->spinlock);
|
||||
|
||||
/* no work to do */
|
||||
if (numlocks == 0) {
|
||||
mlog(0, "no locks were found on this lockres! done!\n");
|
||||
if (numlocks == 0 && !hasrefs)
|
||||
goto leave;
|
||||
}
|
||||
|
||||
/*
|
||||
* preallocate up front
|
||||
|
@ -2459,14 +2467,14 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
|
|||
* find a node to migrate the lockres to
|
||||
*/
|
||||
|
||||
mlog(0, "picking a migration node\n");
|
||||
spin_lock(&dlm->spinlock);
|
||||
/* pick a new node */
|
||||
if (!test_bit(target, dlm->domain_map) ||
|
||||
target >= O2NM_MAX_NODES) {
|
||||
target = dlm_pick_migration_target(dlm, res);
|
||||
}
|
||||
mlog(0, "node %u chosen for migration\n", target);
|
||||
mlog(0, "%s: res %.*s, Node %u chosen for migration\n", dlm->name,
|
||||
namelen, name, target);
|
||||
|
||||
if (target >= O2NM_MAX_NODES ||
|
||||
!test_bit(target, dlm->domain_map)) {
|
||||
|
@ -2667,7 +2675,7 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
|
|||
{
|
||||
int ret;
|
||||
int lock_dropped = 0;
|
||||
int numlocks;
|
||||
int numlocks, hasrefs;
|
||||
|
||||
spin_lock(&res->spinlock);
|
||||
if (res->owner != dlm->node_num) {
|
||||
|
@ -2681,8 +2689,8 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
|
|||
}
|
||||
|
||||
/* No need to migrate a lockres having no locks */
|
||||
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
|
||||
if (ret >= 0 && numlocks == 0) {
|
||||
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
|
||||
if (ret >= 0 && numlocks == 0 && !hasrefs) {
|
||||
spin_unlock(&res->spinlock);
|
||||
goto leave;
|
||||
}
|
||||
|
@ -2915,6 +2923,12 @@ static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
|
|||
}
|
||||
queue++;
|
||||
}
|
||||
|
||||
nodenum = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
|
||||
if (nodenum < O2NM_MAX_NODES) {
|
||||
spin_unlock(&res->spinlock);
|
||||
return nodenum;
|
||||
}
|
||||
spin_unlock(&res->spinlock);
|
||||
mlog(0, "have not found a suitable target yet! checking domain map\n");
|
||||
|
||||
|
|
|
@ -2241,11 +2241,15 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
|
|||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
|
||||
relock:
|
||||
/* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
|
||||
if (direct_io) {
|
||||
down_read(&inode->i_alloc_sem);
|
||||
have_alloc_sem = 1;
|
||||
/* communicate with ocfs2_dio_end_io */
|
||||
ocfs2_iocb_set_sem_locked(iocb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2382,8 +2386,10 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
|
|||
ocfs2_rw_unlock(inode, rw_level);
|
||||
|
||||
out_sems:
|
||||
if (have_alloc_sem)
|
||||
if (have_alloc_sem) {
|
||||
up_read(&inode->i_alloc_sem);
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
}
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
|
@ -2527,6 +2533,8 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
|
||||
/*
|
||||
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
|
||||
* need locks to protect pending reads from racing with truncate.
|
||||
|
@ -2534,6 +2542,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
|
|||
if (filp->f_flags & O_DIRECT) {
|
||||
down_read(&inode->i_alloc_sem);
|
||||
have_alloc_sem = 1;
|
||||
ocfs2_iocb_set_sem_locked(iocb);
|
||||
|
||||
ret = ocfs2_rw_lock(inode, 0);
|
||||
if (ret < 0) {
|
||||
|
@ -2575,8 +2584,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
|
|||
}
|
||||
|
||||
bail:
|
||||
if (have_alloc_sem)
|
||||
if (have_alloc_sem) {
|
||||
up_read(&inode->i_alloc_sem);
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
}
|
||||
if (rw_level != -1)
|
||||
ocfs2_rw_unlock(inode, rw_level);
|
||||
mlog_exit(ret);
|
||||
|
|
|
@ -350,7 +350,7 @@ enum {
|
|||
#define OCFS2_LAST_LOCAL_SYSTEM_INODE LOCAL_GROUP_QUOTA_SYSTEM_INODE
|
||||
NUM_SYSTEM_INODES
|
||||
};
|
||||
#define NUM_GLOBAL_SYSTEM_INODES OCFS2_LAST_GLOBAL_SYSTEM_INODE
|
||||
#define NUM_GLOBAL_SYSTEM_INODES OCFS2_FIRST_LOCAL_SYSTEM_INODE
|
||||
#define NUM_LOCAL_SYSTEM_INODES \
|
||||
(NUM_SYSTEM_INODES - OCFS2_FIRST_LOCAL_SYSTEM_INODE)
|
||||
|
||||
|
|
Loading…
Reference in New Issue