dax fixes for 5.13-rc2
- Fix a hang condition (missed wakeups with virtiofs when invalidating entries) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEf41QbsdZzFdA8EfZHtKRamZ9iAIFAmCfBboACgkQHtKRamZ9 iAIiHQ/+LqD0USAXxWQFcDupTATVy0Z/hpUCBWcEKII/ljluUWLLkGUT2/Gy3TXE 0HZmJBWyJyqNRyWtzNZ8hu4FpxSawtYVkqTv0/ODAjrpva9m8p4eVYFp0UpTHn3d KL/DD+VeLWs1yoPIXgqd2dSwV2YsAJSEYYXcF0CYeHOWH4BVGrOglQBL7kJyra6n IQsnXGJQMXkOoDMB/5xTI7LgYD0R09OevsHE6Eupxm9SI8ud2qUQlBLde8Eh+7qb pMhkeNNjG2w461C8215rhGPzCweMMasiBwUz1EHXDpXebZSsDfURwBWMCFbe/H7p x3u0s3hlJydTZmUnaMeWje+wR1Ku8YXiBeelMobpXi4RzNyebhZ0Fap3fMDbrR8/ 5mro6H9blEYGZ1kISHSdvZUfh6uzWiL8hs+uBb/ANICZouValjyVrHuTauwncyQP PHaKZYo/kh6Hj3j1LYDHbMs69Cbr+E0x/JFnYAxIkZSggYJeXN9+3K9hhUXcQNIf Lh4p1F/t7DmIXzljFu6qwJl9JmCC+yx4PcSgOqa6vPvm2H6KEH+rMCLHtu+WgaXq 1Gj9EI1sshTXgot8Y1xlPCCTLNqxhV0O30L+EsasmjNCjWwVRi2zz+FjkgFAeDvo 7LZUNVepC9YMffknBNGkfNibfVBn5/DxbGR/9SWygHy8ahECoLc= =cWwB -----END PGP SIGNATURE----- Merge tag 'dax-fixes-5.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull dax fixes from Dan Williams: "A fix for a hang condition due to missed wakeups in the filesystem-dax core when exercised by virtiofs. This bug has been there from the beginning, but the condition has not triggered on other filesystems since they hold a lock over invalidation events" * tag 'dax-fixes-5.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: dax: Wake up all waiters after invalidating dax entry dax: Add a wakeup mode parameter to put_unlocked_entry() dax: Add an enum for specifying dax wakup mode
This commit is contained in:
commit
393f42f113
35
fs/dax.c
35
fs/dax.c
|
@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
|
|||
struct exceptional_entry_key key;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dax_wake_mode: waitqueue wakeup behaviour
|
||||
* @WAKE_ALL: wake all waiters in the waitqueue
|
||||
* @WAKE_NEXT: wake only the first waiter in the waitqueue
|
||||
*/
|
||||
enum dax_wake_mode {
|
||||
WAKE_ALL,
|
||||
WAKE_NEXT,
|
||||
};
|
||||
|
||||
static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
|
||||
void *entry, struct exceptional_entry_key *key)
|
||||
{
|
||||
|
@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
|
|||
* The important information it's conveying is whether the entry at
|
||||
* this index used to be a PMD entry.
|
||||
*/
|
||||
static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
|
||||
static void dax_wake_entry(struct xa_state *xas, void *entry,
|
||||
enum dax_wake_mode mode)
|
||||
{
|
||||
struct exceptional_entry_key key;
|
||||
wait_queue_head_t *wq;
|
||||
|
@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
|
|||
* must be in the waitqueue and the following check will see them.
|
||||
*/
|
||||
if (waitqueue_active(wq))
|
||||
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
|
||||
__wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
|
|||
finish_wait(wq, &ewait.wait);
|
||||
}
|
||||
|
||||
static void put_unlocked_entry(struct xa_state *xas, void *entry)
|
||||
static void put_unlocked_entry(struct xa_state *xas, void *entry,
|
||||
enum dax_wake_mode mode)
|
||||
{
|
||||
/* If we were the only waiter woken, wake the next one */
|
||||
if (entry && !dax_is_conflict(entry))
|
||||
dax_wake_entry(xas, entry, false);
|
||||
dax_wake_entry(xas, entry, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
|
|||
old = xas_store(xas, entry);
|
||||
xas_unlock_irq(xas);
|
||||
BUG_ON(!dax_is_locked(old));
|
||||
dax_wake_entry(xas, entry, false);
|
||||
dax_wake_entry(xas, entry, WAKE_NEXT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -524,7 +535,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
|
|||
|
||||
dax_disassociate_entry(entry, mapping, false);
|
||||
xas_store(xas, NULL); /* undo the PMD join */
|
||||
dax_wake_entry(xas, entry, true);
|
||||
dax_wake_entry(xas, entry, WAKE_ALL);
|
||||
mapping->nrpages -= PG_PMD_NR;
|
||||
entry = NULL;
|
||||
xas_set(xas, index);
|
||||
|
@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
|
|||
entry = get_unlocked_entry(&xas, 0);
|
||||
if (entry)
|
||||
page = dax_busy_page(entry);
|
||||
put_unlocked_entry(&xas, entry);
|
||||
put_unlocked_entry(&xas, entry, WAKE_NEXT);
|
||||
if (page)
|
||||
break;
|
||||
if (++scanned % XA_CHECK_SCHED)
|
||||
|
@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
|
|||
mapping->nrpages -= 1UL << dax_entry_order(entry);
|
||||
ret = 1;
|
||||
out:
|
||||
put_unlocked_entry(&xas, entry);
|
||||
put_unlocked_entry(&xas, entry, WAKE_ALL);
|
||||
xas_unlock_irq(&xas);
|
||||
return ret;
|
||||
}
|
||||
|
@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
|
|||
xas_lock_irq(xas);
|
||||
xas_store(xas, entry);
|
||||
xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
|
||||
dax_wake_entry(xas, entry, false);
|
||||
dax_wake_entry(xas, entry, WAKE_NEXT);
|
||||
|
||||
trace_dax_writeback_one(mapping->host, index, count);
|
||||
return ret;
|
||||
|
||||
put_unlocked:
|
||||
put_unlocked_entry(xas, entry);
|
||||
put_unlocked_entry(xas, entry, WAKE_NEXT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
|
|||
/* Did we race with someone splitting entry or so? */
|
||||
if (!entry || dax_is_conflict(entry) ||
|
||||
(order == 0 && !dax_is_pte_entry(entry))) {
|
||||
put_unlocked_entry(&xas, entry);
|
||||
put_unlocked_entry(&xas, entry, WAKE_NEXT);
|
||||
xas_unlock_irq(&xas);
|
||||
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
|
||||
VM_FAULT_NOPAGE);
|
||||
|
|
Loading…
Reference in New Issue