diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-31 09:46:39 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-31 09:46:39 -0800 |
commit | 2a1a2c1a76cf89aaeb98a89179c2942c7882f68a (patch) | |
tree | 07373c743c4f43085d7e3a89b83a92e1fa48c6c8 | |
parent | 9ab97aea85cca43a6aedc90e0d1feba91eebe1ad (diff) | |
parent | d8a706414af4827fc0b4b1c0c631c607351938b9 (diff) |
Merge tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull dax fix from Dan Williams:
"Clean up unnecessary usage of prepare_to_wait_exclusive().
While I feel a bit silly sending a single-commit pull-request there is
nothing else queued up for dax this cycle. This change has shipped in
-next for multiple releases"
* tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
dax: Use non-exclusive wait in wait_entry_unlocked()
-rw-r--r-- | fs/dax.c | 16 |
1 files changed, 7 insertions, 9 deletions
@@ -246,18 +246,16 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) ewait.wait.func = wake_exceptional_entry_func; wq = dax_entry_waitqueue(xas, entry, &ewait.key); - prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); + /* + * Unlike get_unlocked_entry() there is no guarantee that this + * path ever successfully retrieves an unlocked entry before an + * inode dies. Perform a non-exclusive wait in case this path + * never successfully performs its own wake up. + */ + prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_unlock_irq(xas); schedule(); finish_wait(wq, &ewait.wait); - - /* - * Entry lock waits are exclusive. Wake up the next waiter since - * we aren't sure we will acquire the entry lock and thus wake - * the next waiter up on unlock. - */ - if (waitqueue_active(wq)) - __wake_up(wq, TASK_NORMAL, 1, &ewait.key); } static void put_unlocked_entry(struct xa_state *xas, void *entry) |