proc/kcore: replace kclist_lock rwlock with rwsem
Now we only need kclist_lock from user context and at fs init time, and the following changes need to sleep while holding the kclist_lock. Link: http://lkml.kernel.org/r/521ba449ebe921d905177410fee9222d07882f0d.1531953780.git.osandov@fb.com Signed-off-by: Omar Sandoval <osandov@fb.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Bhupesh Sharma <bhsharma@redhat.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: James Morse <james.morse@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bf53183164
commit
0b172f845f
|
@ -59,7 +59,7 @@ struct memelfnote
|
|||
};
|
||||
|
||||
static LIST_HEAD(kclist_head);
|
||||
static DEFINE_RWLOCK(kclist_lock);
|
||||
static DECLARE_RWSEM(kclist_lock);
|
||||
static int kcore_need_update = 1;
|
||||
|
||||
/* This doesn't grab kclist_lock, so it should only be used at init time. */
|
||||
|
@ -117,7 +117,7 @@ static void __kcore_update_ram(struct list_head *list)
|
|||
struct kcore_list *tmp, *pos;
|
||||
LIST_HEAD(garbage);
|
||||
|
||||
write_lock(&kclist_lock);
|
||||
down_write(&kclist_lock);
|
||||
if (xchg(&kcore_need_update, 0)) {
|
||||
list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
|
||||
if (pos->type == KCORE_RAM
|
||||
|
@ -128,7 +128,7 @@ static void __kcore_update_ram(struct list_head *list)
|
|||
} else
|
||||
list_splice(list, &garbage);
|
||||
proc_root_kcore->size = get_kcore_size(&nphdr, &size);
|
||||
write_unlock(&kclist_lock);
|
||||
up_write(&kclist_lock);
|
||||
|
||||
free_kclist_ents(&garbage);
|
||||
}
|
||||
|
@ -451,11 +451,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
|||
int nphdr;
|
||||
unsigned long start;
|
||||
|
||||
read_lock(&kclist_lock);
|
||||
down_read(&kclist_lock);
|
||||
size = get_kcore_size(&nphdr, &elf_buflen);
|
||||
|
||||
if (buflen == 0 || *fpos >= size) {
|
||||
read_unlock(&kclist_lock);
|
||||
up_read(&kclist_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -472,11 +472,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
|||
tsz = buflen;
|
||||
elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
|
||||
if (!elf_buf) {
|
||||
read_unlock(&kclist_lock);
|
||||
up_read(&kclist_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
|
||||
read_unlock(&kclist_lock);
|
||||
up_read(&kclist_lock);
|
||||
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
|
||||
kfree(elf_buf);
|
||||
return -EFAULT;
|
||||
|
@ -491,7 +491,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
|||
if (buflen == 0)
|
||||
return acc;
|
||||
} else
|
||||
read_unlock(&kclist_lock);
|
||||
up_read(&kclist_lock);
|
||||
|
||||
/*
|
||||
* Check to see if our file offset matches with any of
|
||||
|
@ -504,12 +504,12 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
|||
while (buflen) {
|
||||
struct kcore_list *m;
|
||||
|
||||
read_lock(&kclist_lock);
|
||||
down_read(&kclist_lock);
|
||||
list_for_each_entry(m, &kclist_head, list) {
|
||||
if (start >= m->addr && start < (m->addr+m->size))
|
||||
break;
|
||||
}
|
||||
read_unlock(&kclist_lock);
|
||||
up_read(&kclist_lock);
|
||||
|
||||
if (&m->list == &kclist_head) {
|
||||
if (clear_user(buffer, tsz))
|
||||
|
|
Loading…
Reference in New Issue