summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2018-08-21 21:54:59 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 10:52:46 -0700
commit0b172f845ff963ab15e2d861dc155e2ab13241e9 (patch)
tree6d3771499f11584251f78cf918eda90ccfc82de1 /fs
parentbf53183164dbba00b342df7d2215b33007ed83ed (diff)
proc/kcore: replace kclist_lock rwlock with rwsem
Now we only need kclist_lock from user context and at fs init time, and the following changes need to sleep while holding the kclist_lock. Link: http://lkml.kernel.org/r/521ba449ebe921d905177410fee9222d07882f0d.1531953780.git.osandov@fb.com Signed-off-by: Omar Sandoval <osandov@fb.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Bhupesh Sharma <bhsharma@redhat.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: James Morse <james.morse@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/proc/kcore.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e83f15a4f66d..ae43a97d511d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -59,7 +59,7 @@ struct memelfnote
};
static LIST_HEAD(kclist_head);
-static DEFINE_RWLOCK(kclist_lock);
+static DECLARE_RWSEM(kclist_lock);
static int kcore_need_update = 1;
/* This doesn't grab kclist_lock, so it should only be used at init time. */
@@ -117,7 +117,7 @@ static void __kcore_update_ram(struct list_head *list)
struct kcore_list *tmp, *pos;
LIST_HEAD(garbage);
- write_lock(&kclist_lock);
+ down_write(&kclist_lock);
if (xchg(&kcore_need_update, 0)) {
list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
if (pos->type == KCORE_RAM
@@ -128,7 +128,7 @@ static void __kcore_update_ram(struct list_head *list)
} else
list_splice(list, &garbage);
proc_root_kcore->size = get_kcore_size(&nphdr, &size);
- write_unlock(&kclist_lock);
+ up_write(&kclist_lock);
free_kclist_ents(&garbage);
}
@@ -451,11 +451,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
int nphdr;
unsigned long start;
- read_lock(&kclist_lock);
+ down_read(&kclist_lock);
size = get_kcore_size(&nphdr, &elf_buflen);
if (buflen == 0 || *fpos >= size) {
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
return 0;
}
@@ -472,11 +472,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
tsz = buflen;
elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
if (!elf_buf) {
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
return -ENOMEM;
}
elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
kfree(elf_buf);
return -EFAULT;
@@ -491,7 +491,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (buflen == 0)
return acc;
} else
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
/*
* Check to see if our file offset matches with any of
@@ -504,12 +504,12 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
while (buflen) {
struct kcore_list *m;
- read_lock(&kclist_lock);
+ down_read(&kclist_lock);
list_for_each_entry(m, &kclist_head, list) {
if (start >= m->addr && start < (m->addr+m->size))
break;
}
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
if (&m->list == &kclist_head) {
if (clear_user(buffer, tsz))