diff options
author | Andreas Gruenbacher <agruenba@redhat.com> | 2024-04-15 11:23:04 +0200 |
---|---|---|
committer | Andreas Gruenbacher <agruenba@redhat.com> | 2024-04-24 19:48:20 +0200 |
commit | 9947a06d29c0a30da88cdc6376ca5fd87083e130 (patch) | |
tree | 687ac555f1706ab8e7968f93f2eb246db3c74e02 /fs/gfs2 | |
parent | 1cd28e15864054f3c48baee9eecda1c0441c48ac (diff) |
gfs2: do_xmote fixes
Function do_xmote() is called with the glock spinlock held. Commit
86934198eefa added a 'goto skip_inval' statement at the beginning of the
function to further below where the glock spinlock is expected not to be
held anymore. Then it added code there that requires the glock spinlock
to be held. This doesn't make sense; fix this up by dropping and
retaking the spinlock where needed.
In addition, when ->lm_lock() returned an error, do_xmote() didn't fail
the locking operation, and simply left the glock hanging; fix that as
well. (This is a much older error.)
Fixes: 86934198eefa ("gfs2: Clear flags when withdraw prevents xmote")
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/glock.c | 44 |
1 files changed, 25 insertions, 19 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index b1a2862d431d..fa374617d2f1 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -721,6 +721,7 @@ __acquires(&gl->gl_lockref.lock) { const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct lm_lockstruct *ls = &sdp->sd_lockstruct; unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); int ret; @@ -749,6 +750,9 @@ __acquires(&gl->gl_lockref.lock) (gl->gl_state == LM_ST_EXCLUSIVE) || (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) clear_bit(GLF_BLOCKING, &gl->gl_flags); + if (!glops->go_inval && !glops->go_sync) + goto skip_inval; + spin_unlock(&gl->gl_lockref.lock); if (glops->go_sync) { ret = glops->go_sync(gl); @@ -761,6 +765,7 @@ __acquires(&gl->gl_lockref.lock) fs_err(sdp, "Error %d syncing glock \n", ret); gfs2_dump_glock(NULL, gl, true); } + spin_lock(&gl->gl_lockref.lock); goto skip_inval; } } @@ -781,9 +786,10 @@ __acquires(&gl->gl_lockref.lock) glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); } + spin_lock(&gl->gl_lockref.lock); skip_inval: - gfs2_glock_hold(gl); + gl->gl_lockref.count++; /* * Check for an error encountered since we called go_sync and go_inval. * If so, we can't withdraw from the glock code because the withdraw @@ -825,37 +831,37 @@ skip_inval: */ clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); - gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); - goto out; + __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); + return; } else { clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); } } - if (sdp->sd_lockstruct.ls_ops->lm_lock) { - struct lm_lockstruct *ls = &sdp->sd_lockstruct; + if (ls->ls_ops->lm_lock) { + spin_unlock(&gl->gl_lockref.lock); + ret = ls->ls_ops->lm_lock(gl, target, lck_flags); + spin_lock(&gl->gl_lockref.lock); - /* lock_dlm */ - ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && target == LM_ST_UNLOCKED && test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { - spin_lock(&gl->gl_lockref.lock); - finish_xmote(gl, target); - __gfs2_glock_queue_work(gl, 0); - spin_unlock(&gl->gl_lockref.lock); + /* + * The lockspace has been released and the lock has + * been unlocked implicitly. + */ } else if (ret) { fs_err(sdp, "lm_lock ret %d\n", ret); - GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp)); + target = gl->gl_state | LM_OUT_ERROR; + } else { + /* The operation will be completed asynchronously. */ + return; } - } else { /* lock_nolock */ - spin_lock(&gl->gl_lockref.lock); - finish_xmote(gl, target); - __gfs2_glock_queue_work(gl, 0); - spin_unlock(&gl->gl_lockref.lock); } -out: - spin_lock(&gl->gl_lockref.lock); + + /* Complete the operation now. */ + finish_xmote(gl, target); + __gfs2_glock_queue_work(gl, 0); } /** |