summaryrefslogtreecommitdiff
path: root/fs/gfs2
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2022-12-09 18:28:20 +0100
committerAndreas Gruenbacher <agruenba@redhat.com>2023-01-31 22:40:24 +0100
commit3056dc46559bfe3fc4b79771dcbc2d003f9fd313 (patch)
tree7a34174aadbcdf5bc3e65e975b45b6a349be448c /fs/gfs2
parent228804a35caa7edae4a81049281e7f106dea1ad1 (diff)
gfs2: Get rid of GLF_PENDING_DELETE flag
Get rid of the GLF_PENDING_DELETE glock flag introduced by commit a0e3cc65fa29 ("gfs2: Turn gl_delete into a delayed work"). The only use of that flag is to prevent the iopen glock from being demoted (i.e., unlocked) while delete work is pending. It turns out that demoting the iopen glock while delete work is pending is perfectly fine; we only need to make sure that the glock isn't being freed while still in use. This is ensured by the previous patch because delete_work_func() owns a reference while the work is queued or running. With these changes, gfs2_queue_delete_work() no longer takes the glock spin lock, so we can use it in iopen_go_callback() instead of open-coding it there. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c26
-rw-r--r--fs/gfs2/glock.h1
-rw-r--r--fs/gfs2/glops.c9
-rw-r--r--fs/gfs2/incore.h3
4 files changed, 5 insertions, 34 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index fbfbf7a2feac..8d55616488aa 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -984,10 +984,6 @@ static void delete_work_func(struct work_struct *work)
struct inode *inode;
u64 no_addr = gl->gl_name.ln_number;
- spin_lock(&gl->gl_lockref.lock);
- clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
-
if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
/*
* If we can evict the inode, give the remote node trying to
@@ -2064,28 +2060,14 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
{
- bool queued;
-
- spin_lock(&gl->gl_lockref.lock);
- queued = queue_delayed_work(gfs2_delete_workqueue,
- &gl->gl_delete, delay);
- if (queued)
- set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
- return queued;
+ return queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, delay);
}
void gfs2_cancel_delete_work(struct gfs2_glock *gl)
{
- if (cancel_delayed_work(&gl->gl_delete)) {
- clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ if (cancel_delayed_work(&gl->gl_delete))
gfs2_glock_put(gl);
- }
-}
-
-bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
-{
- return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
}
static void flush_delete_work(struct gfs2_glock *gl)
@@ -2307,8 +2289,6 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'o';
if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b';
- if (test_bit(GLF_PENDING_DELETE, gflags))
- *p++ = 'P';
if (test_bit(GLF_FREEING, gflags))
*p++ = 'x';
if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index f37ac087e2c1..17b05d51977c 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -270,7 +270,6 @@ extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
-extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
extern void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index d78b61ecc1cd..68676bc78171 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -651,17 +651,11 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
gl->gl_lockref.count++;
- if (!queue_delayed_work(gfs2_delete_workqueue,
- &gl->gl_delete, 0))
+ if (!gfs2_queue_delete_work(gl, 0))
gl->gl_lockref.count--;
}
}
-static int iopen_go_demote_ok(const struct gfs2_glock *gl)
-{
- return !gfs2_delete_work_queued(gl);
-}
-
/**
* inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
* @gl: glock being freed
@@ -767,7 +761,6 @@ const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback,
.go_dump = inode_go_dump,
- .go_demote_ok = iopen_go_demote_ok,
.go_flags = GLOF_LRU | GLOF_NONDISK,
.go_subclass = 1,
};
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index c26765080f28..cd886364b11d 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -329,8 +329,7 @@ enum {
GLF_LRU = 13,
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
- GLF_PENDING_DELETE = 17,
- GLF_FREEING = 18, /* Wait for glock to be freed */
+ GLF_FREEING = 16, /* Wait for glock to be freed */
};
struct gfs2_glock {