diff options
author | Andreas Gruenbacher <agruenba@redhat.com> | 2022-06-02 15:15:02 -0500 |
---|---|---|
committer | Andreas Gruenbacher <agruenba@redhat.com> | 2022-06-29 16:54:59 +0200 |
commit | de3f906f0af0c3f5d862b07df4d020c9322cd9c3 (patch) | |
tree | d3357624a5f0cd3f9377a14b6c7e3ae7365d2004 /fs/gfs2 | |
parent | 53d69132958f7e144973d02ad0f0798386219efd (diff) |
gfs2: Revert 'Fix "truncate in progress" hang'
Now that interrupted truncates are completed in the context of the
process taking the glock, there is no need for the glock state engine to
delegate that task to gfs2_quotad or for quotad to perform those
truncates anymore. Get rid of the obsolete associated infrastructure.
Reverts commit 813e0c46c9e2 ("GFS2: Fix "truncate in progress" hang").
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/glock.c | 41 | ||||
-rw-r--r-- | fs/gfs2/glock.h | 2 | ||||
-rw-r--r-- | fs/gfs2/glops.c | 11 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 3 | ||||
-rw-r--r-- | fs/gfs2/main.c | 1 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 2 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 28 |
7 files changed, 8 insertions, 80 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 41bee3db8c0d..347c7bc1fae3 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -518,8 +518,7 @@ again: * do_promote - promote as many requests as possible on the current queue * @gl: The glock * - * Returns: 1 if there is a blocked holder at the head of the list, or 2 - * if a type specific operation is underway. + * Returns: 1 if there is a blocked holder at the head of the list */ static int do_promote(struct gfs2_glock *gl) @@ -627,7 +626,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_holder *gh; unsigned state = ret & LM_OUT_ST_MASK; - int rv; spin_lock(&gl->gl_lockref.lock); trace_gfs2_glock_state_change(gl, state); @@ -685,6 +683,8 @@ retry: gfs2_demote_wake(gl); if (state != LM_ST_UNLOCKED) { if (glops->go_xmote_bh) { + int rv; + spin_unlock(&gl->gl_lockref.lock); rv = glops->go_xmote_bh(gl); spin_lock(&gl->gl_lockref.lock); @@ -693,13 +693,10 @@ retry: goto out; } } - rv = do_promote(gl); - if (rv == 2) - goto out_locked; + do_promote(gl); } out: clear_bit(GLF_LOCK, &gl->gl_flags); -out_locked: spin_unlock(&gl->gl_lockref.lock); } @@ -856,7 +853,6 @@ __releases(&gl->gl_lockref.lock) __acquires(&gl->gl_lockref.lock) { struct gfs2_holder *gh = NULL; - int ret; if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) return; @@ -875,18 +871,14 @@ __acquires(&gl->gl_lockref.lock) } else { if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gfs2_demote_wake(gl); - ret = do_promote(gl); - if (ret == 0) + if (do_promote(gl) == 0) goto out_unlock; - if (ret == 2) - goto out; gh = find_first_waiter(gl); gl->gl_target = gh->gh_state; if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) do_error(gl, 0); /* Fail queued try locks */ } do_xmote(gl, gh, gl->gl_target); -out: return; out_sched: @@ -2213,29 +2205,6 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) glock_hash_walk(dump_glock_func, sdp); } -void gfs2_glock_finish_truncate(struct gfs2_inode *ip) -{ - struct gfs2_glock *gl = ip->i_gl; - int ret; - - ret = gfs2_truncatei_resume(ip); - gfs2_glock_assert_withdraw(gl, ret == 0); - - spin_lock(&gl->gl_lockref.lock); - clear_bit(GLF_LOCK, &gl->gl_flags); - run_queue(gl, 1); - wake_up_glock(gl); - spin_unlock(&gl->gl_lockref.lock); -} - -void gfs2_wait_truncate(struct gfs2_inode *ip) -{ - struct gfs2_glock *gl = ip->i_gl; - wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); - - wait_event(*wq, !(ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG)); -} - static const char *state2str(unsigned state) { switch(state) { diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 2796d5414ec9..5aed8b500cf5 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -274,8 +274,6 @@ extern void gfs2_cancel_delete_work(struct gfs2_glock *gl); extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl); extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp); extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); -extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip); -extern void gfs2_wait_truncate(struct gfs2_inode *ip); extern void gfs2_glock_thaw(struct gfs2_sbd *sdp); extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl); extern void gfs2_glock_free(struct gfs2_glock *gl); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 6bc096610654..c387f80ca65e 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -488,7 +488,6 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) static int inode_go_instantiate(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_inode *ip = gl->gl_object; int error = 0; @@ -504,14 +503,8 @@ static int inode_go_instantiate(struct gfs2_holder *gh) if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && (gl->gl_state == LM_ST_EXCLUSIVE) && - (gh->gh_state == LM_ST_EXCLUSIVE)) { - spin_lock(&sdp->sd_trunc_lock); - if (list_empty(&ip->i_trunc_list)) - list_add(&ip->i_trunc_list, &sdp->sd_trunc_list); - spin_unlock(&sdp->sd_trunc_lock); - wake_up(&sdp->sd_quota_wait); - gfs2_wait_truncate(ip); - } + (gh->gh_state == LM_ST_EXCLUSIVE)) + error = gfs2_truncatei_resume(ip); out: return error; diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 8c00fb389ae5..9e319c8f9efd 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -396,7 +396,6 @@ struct gfs2_inode { atomic_t i_sizehint; /* hint of the write size */ struct rw_semaphore i_rw_mutex; struct list_head i_ordered; - struct list_head i_trunc_list; __be64 *i_hash_cache; u32 i_entries; u32 i_diskflags; @@ -784,8 +783,6 @@ struct gfs2_sbd { struct mutex sd_quota_mutex; struct mutex sd_quota_sync_mutex; wait_queue_head_t sd_quota_wait; - struct list_head sd_trunc_list; - spinlock_t sd_trunc_lock; unsigned int sd_quota_slots; unsigned long *sd_quota_bitmap; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 244187e3e70f..d94791527dcb 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -38,7 +38,6 @@ static void gfs2_init_inode_once(void *foo) inode_init_once(&ip->i_inode); atomic_set(&ip->i_sizehint, 0); init_rwsem(&ip->i_rw_mutex); - INIT_LIST_HEAD(&ip->i_trunc_list); INIT_LIST_HEAD(&ip->i_ordered); ip->i_qadata = NULL; gfs2_holder_mark_uninitialized(&ip->i_rgd_gh); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index c9b423c874a3..549879929c84 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -106,8 +106,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) mutex_init(&sdp->sd_quota_mutex); mutex_init(&sdp->sd_quota_sync_mutex); init_waitqueue_head(&sdp->sd_quota_wait); - INIT_LIST_HEAD(&sdp->sd_trunc_list); - spin_lock_init(&sdp->sd_trunc_lock); spin_lock_init(&sdp->sd_bitmap_lock); INIT_LIST_HEAD(&sdp->sd_sc_inodes_list); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 59d727a4ae2c..a6667e8d781f 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1517,25 +1517,6 @@ static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, } } -static void quotad_check_trunc_list(struct gfs2_sbd *sdp) -{ - struct gfs2_inode *ip; - - while(1) { - ip = NULL; - spin_lock(&sdp->sd_trunc_lock); - if (!list_empty(&sdp->sd_trunc_list)) { - ip = list_first_entry(&sdp->sd_trunc_list, - struct gfs2_inode, i_trunc_list); - list_del_init(&ip->i_trunc_list); - } - spin_unlock(&sdp->sd_trunc_lock); - if (ip == NULL) - return; - gfs2_glock_finish_truncate(ip); - } -} - void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { if (!sdp->sd_statfs_force_sync) { sdp->sd_statfs_force_sync = 1; @@ -1558,7 +1539,6 @@ int gfs2_quotad(void *data) unsigned long quotad_timeo = 0; unsigned long t = 0; DEFINE_WAIT(wait); - int empty; while (!kthread_should_stop()) { @@ -1579,19 +1559,13 @@ int gfs2_quotad(void *data) quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, "ad_timeo, &tune->gt_quota_quantum); - /* Check for & recover partially truncated inodes */ - quotad_check_trunc_list(sdp); - try_to_freeze(); bypass: t = min(quotad_timeo, statfs_timeo); prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); - spin_lock(&sdp->sd_trunc_lock); - empty = list_empty(&sdp->sd_trunc_list); - spin_unlock(&sdp->sd_trunc_lock); - if (empty && !sdp->sd_statfs_force_sync) + if (!sdp->sd_statfs_force_sync) t -= schedule_timeout(t); else t = 0; |