linux/security/keys/proc.c

324 lines
7.7 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/* procfs files for key database enumeration
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/errno.h>
#include "internal.h"
static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
static void proc_keys_stop(struct seq_file *p, void *v);
static int proc_keys_show(struct seq_file *m, void *v);
static const struct seq_operations proc_keys_ops = {
.start = proc_keys_start,
.next = proc_keys_next,
.stop = proc_keys_stop,
.show = proc_keys_show,
};
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos);
static void proc_key_users_stop(struct seq_file *p, void *v);
static int proc_key_users_show(struct seq_file *m, void *v);
static const struct seq_operations proc_key_users_ops = {
.start = proc_key_users_start,
.next = proc_key_users_next,
.stop = proc_key_users_stop,
.show = proc_key_users_show,
};
/*
* Declare the /proc files.
*/
static int __init key_proc_init(void)
{
struct proc_dir_entry *p;
p = proc_create_seq("keys", 0, NULL, &proc_keys_ops);
if (!p)
panic("Cannot create /proc/keys\n");
p = proc_create_seq("key-users", 0, NULL, &proc_key_users_ops);
if (!p)
panic("Cannot create /proc/key-users\n");
return 0;
}
__initcall(key_proc_init);
/*
* Implement "/proc/keys" to provide a list of the keys on the system that
* grant View permission to the caller.
*/
static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
{
struct user_namespace *user_ns = seq_user_ns(p);
n = rb_next(n);
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
if (kuid_has_mapping(user_ns, key->user->uid))
break;
n = rb_next(n);
}
return n;
}
static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
{
struct user_namespace *user_ns = seq_user_ns(p);
struct rb_node *n = key_serial_tree.rb_node;
struct key *minkey = NULL;
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
if (id < key->serial) {
if (!minkey || minkey->serial > key->serial)
minkey = key;
n = n->rb_left;
} else if (id > key->serial) {
n = n->rb_right;
} else {
minkey = key;
break;
}
key = NULL;
}
if (!minkey)
return NULL;
for (;;) {
if (kuid_has_mapping(user_ns, minkey->user->uid))
return minkey;
n = rb_next(&minkey->serial_node);
if (!n)
return NULL;
minkey = rb_entry(n, struct key, serial_node);
}
}
static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
__acquires(key_serial_lock)
{
key_serial_t pos = *_pos;
struct key *key;
spin_lock(&key_serial_lock);
if (*_pos > INT_MAX)
return NULL;
key = find_ge_key(p, pos);
if (!key)
return NULL;
*_pos = key->serial;
return &key->serial_node;
}
static inline key_serial_t key_node_serial(struct rb_node *n)
{
struct key *key = rb_entry(n, struct key, serial_node);
return key->serial;
}
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
{
struct rb_node *n;
n = key_serial_next(p, v);
if (n)
*_pos = key_node_serial(n);
keys: Fix proc_keys_next to increase position index If seq_file .next function does not change position index, read after some lseek can generate unexpected output: $ dd if=/proc/keys bs=1 # full usual output 0f6bfdf5 I--Q--- 2 perm 3f010000 1000 1000 user 4af2f79ab8848d0a: 740 1fb91b32 I--Q--- 3 perm 1f3f0000 1000 65534 keyring _uid.1000: 2 27589480 I--Q--- 1 perm 0b0b0000 0 0 user invocation_id: 16 2f33ab67 I--Q--- 152 perm 3f030000 0 0 keyring _ses: 2 33f1d8fa I--Q--- 4 perm 3f030000 1000 1000 keyring _ses: 1 3d427fda I--Q--- 2 perm 3f010000 1000 1000 user 69ec44aec7678e5a: 740 3ead4096 I--Q--- 1 perm 1f3f0000 1000 65534 keyring _uid_ses.1000: 1 521+0 records in 521+0 records out 521 bytes copied, 0,00123769 s, 421 kB/s But a read after lseek in middle of last line results in the partial last line and then a repeat of the final line: $ dd if=/proc/keys bs=500 skip=1 dd: /proc/keys: cannot skip to specified offset g _uid_ses.1000: 1 3ead4096 I--Q--- 1 perm 1f3f0000 1000 65534 keyring _uid_ses.1000: 1 0+1 records in 0+1 records out 97 bytes copied, 0,000135035 s, 718 kB/s and a read after lseek beyond end of file results in the last line being shown: $ dd if=/proc/keys bs=1000 skip=1 # read after lseek beyond end of file dd: /proc/keys: cannot skip to specified offset 3ead4096 I--Q--- 1 perm 1f3f0000 1000 65534 keyring _uid_ses.1000: 1 0+1 records in 0+1 records out 76 bytes copied, 0,000119981 s, 633 kB/s See https://bugzilla.kernel.org/show_bug.cgi?id=206283 Fixes: 1f4aace60b0e ("fs/seq_file.c: simplify seq_file iteration code ...") Signed-off-by: Vasily Averin <vvs@virtuozzo.com> Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-15 04:33:16 +08:00
else
(*_pos)++;
return n;
}
static void proc_keys_stop(struct seq_file *p, void *v)
__releases(key_serial_lock)
{
spin_unlock(&key_serial_lock);
}
static int proc_keys_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
unsigned long flags;
key_ref_t key_ref, skey_ref;
time64_t now, expiry;
KEYS: Fix short sprintf buffer in /proc/keys show function This fixes CVE-2016-7042. Fix a short sprintf buffer in proc_keys_show(). If the gcc stack protector is turned on, this can cause a panic due to stack corruption. The problem is that xbuf[] is not big enough to hold a 64-bit timeout rendered as weeks: (gdb) p 0xffffffffffffffffULL/(60*60*24*7) $2 = 30500568904943 That's 14 chars plus NUL, not 11 chars plus NUL. Expand the buffer to 16 chars. I think the unpatched code apparently works if the stack-protector is not enabled because on a 32-bit machine the buffer won't be overflowed and on a 64-bit machine there's a 64-bit aligned pointer at one side and an int that isn't checked again on the other side. The panic incurred looks something like: Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: ffffffff81352ebe CPU: 0 PID: 1692 Comm: reproducer Not tainted 4.7.2-201.fc24.x86_64 #1 Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011 0000000000000086 00000000fbbd2679 ffff8800a044bc00 ffffffff813d941f ffffffff81a28d58 ffff8800a044bc98 ffff8800a044bc88 ffffffff811b2cb6 ffff880000000010 ffff8800a044bc98 ffff8800a044bc30 00000000fbbd2679 Call Trace: [<ffffffff813d941f>] dump_stack+0x63/0x84 [<ffffffff811b2cb6>] panic+0xde/0x22a [<ffffffff81352ebe>] ? proc_keys_show+0x3ce/0x3d0 [<ffffffff8109f7f9>] __stack_chk_fail+0x19/0x30 [<ffffffff81352ebe>] proc_keys_show+0x3ce/0x3d0 [<ffffffff81350410>] ? key_validate+0x50/0x50 [<ffffffff8134db30>] ? key_default_cmp+0x20/0x20 [<ffffffff8126b31c>] seq_read+0x2cc/0x390 [<ffffffff812b6b12>] proc_reg_read+0x42/0x70 [<ffffffff81244fc7>] __vfs_read+0x37/0x150 [<ffffffff81357020>] ? security_file_permission+0xa0/0xc0 [<ffffffff81246156>] vfs_read+0x96/0x130 [<ffffffff81247635>] SyS_read+0x55/0xc0 [<ffffffff817eb872>] entry_SYSCALL_64_fastpath+0x1a/0xa4 Reported-by: Ondrej Kozina <okozina@redhat.com> Signed-off-by: David Howells <dhowells@redhat.com> Tested-by: Ondrej Kozina <okozina@redhat.com> cc: stable@vger.kernel.org Signed-off-by: James Morris <james.l.morris@oracle.com>
2016-10-26 22:01:54 +08:00
char xbuf[16];
KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: stable@vger.kernel.org # v4.4+ Reported-by: Eric Biggers <ebiggers@google.com> Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Eric Biggers <ebiggers@google.com>
2017-10-04 23:43:25 +08:00
short state;
u64 timo;
int rc;
struct keyring_search_context ctx = {
.index_key = key->index_key,
.cred = m->file->f_cred,
.match_data.cmp = lookup_user_key_possessed,
.match_data.raw_data = key,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = (KEYRING_SEARCH_NO_STATE_CHECK |
KEYRING_SEARCH_RECURSE),
};
key_ref = make_key_ref(key, 0);
/* determine if the key is possessed by this process (a test we can
* skip if the key does not indicate the possessor can view it
*/
if (key->perm & KEY_POS_VIEW) {
rcu_read_lock();
skey_ref = search_cred_keyrings_rcu(&ctx);
rcu_read_unlock();
if (!IS_ERR(skey_ref)) {
key_ref_put(skey_ref);
key_ref = make_key_ref(key, 1);
}
}
/* check whether the current task is allowed to view the key */
rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
if (rc < 0)
return 0;
now = ktime_get_real_seconds();
rcu_read_lock();
/* come up with a suitable timeout value */
expiry = READ_ONCE(key->expiry);
if (expiry == 0) {
memcpy(xbuf, "perm", 5);
} else if (now >= expiry) {
memcpy(xbuf, "expd", 5);
} else {
timo = expiry - now;
if (timo < 60)
sprintf(xbuf, "%llus", timo);
else if (timo < 60*60)
sprintf(xbuf, "%llum", div_u64(timo, 60));
else if (timo < 60*60*24)
sprintf(xbuf, "%lluh", div_u64(timo, 60 * 60));
else if (timo < 60*60*24*7)
sprintf(xbuf, "%llud", div_u64(timo, 60 * 60 * 24));
else
sprintf(xbuf, "%lluw", div_u64(timo, 60 * 60 * 24 * 7));
}
KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: stable@vger.kernel.org # v4.4+ Reported-by: Eric Biggers <ebiggers@google.com> Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Eric Biggers <ebiggers@google.com>
2017-10-04 23:43:25 +08:00
state = key_read_state(key);
#define showflag(FLAGS, LETTER, FLAG) \
((FLAGS & (1 << FLAG)) ? LETTER : '-')
[PATCH] keys: Discard key spinlock and use RCU for key payload The attached patch changes the key implementation in a number of ways: (1) It removes the spinlock from the key structure. (2) The key flags are now accessed using atomic bitops instead of write-locking the key spinlock and using C bitwise operators. The three instantiation flags are dealt with with the construction semaphore held during the request_key/instantiate/negate sequence, thus rendering the spinlock superfluous. The key flags are also now bit numbers not bit masks. (3) The key payload is now accessed using RCU. This permits the recursive keyring search algorithm to be simplified greatly since no locks need be taken other than the usual RCU preemption disablement. Searching now does not require any locks or semaphores to be held; merely that the starting keyring be pinned. (4) The keyring payload now includes an RCU head so that it can be disposed of by call_rcu(). This requires that the payload be copied on unlink to prevent introducing races in copy-down vs search-up. (5) The user key payload is now a structure with the data following it. It includes an RCU head like the keyring payload and for the same reason. It also contains a data length because the data length in the key may be changed on another CPU whilst an RCU protected read is in progress on the payload. This would then see the supposed RCU payload and the on-key data length getting out of sync. I'm tempted to drop the key's datalen entirely, except that it's used in conjunction with quota management and so is a little tricky to get rid of. (6) Update the keys documentation. Signed-Off-By: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-24 13:00:49 +08:00
flags = READ_ONCE(key->flags);
seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
key->serial,
KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: stable@vger.kernel.org # v4.4+ Reported-by: Eric Biggers <ebiggers@google.com> Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Eric Biggers <ebiggers@google.com>
2017-10-04 23:43:25 +08:00
state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
showflag(flags, 'R', KEY_FLAG_REVOKED),
showflag(flags, 'D', KEY_FLAG_DEAD),
showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: stable@vger.kernel.org # v4.4+ Reported-by: Eric Biggers <ebiggers@google.com> Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Eric Biggers <ebiggers@google.com>
2017-10-04 23:43:25 +08:00
state < 0 ? 'N' : '-',
showflag(flags, 'i', KEY_FLAG_INVALIDATED),
refcount_read(&key->usage),
xbuf,
key->perm,
from_kuid_munged(seq_user_ns(m), key->uid),
from_kgid_munged(seq_user_ns(m), key->gid),
key->type->name);
[PATCH] keys: Discard key spinlock and use RCU for key payload The attached patch changes the key implementation in a number of ways: (1) It removes the spinlock from the key structure. (2) The key flags are now accessed using atomic bitops instead of write-locking the key spinlock and using C bitwise operators. The three instantiation flags are dealt with with the construction semaphore held during the request_key/instantiate/negate sequence, thus rendering the spinlock superfluous. The key flags are also now bit numbers not bit masks. (3) The key payload is now accessed using RCU. This permits the recursive keyring search algorithm to be simplified greatly since no locks need be taken other than the usual RCU preemption disablement. Searching now does not require any locks or semaphores to be held; merely that the starting keyring be pinned. (4) The keyring payload now includes an RCU head so that it can be disposed of by call_rcu(). This requires that the payload be copied on unlink to prevent introducing races in copy-down vs search-up. (5) The user key payload is now a structure with the data following it. It includes an RCU head like the keyring payload and for the same reason. It also contains a data length because the data length in the key may be changed on another CPU whilst an RCU protected read is in progress on the payload. This would then see the supposed RCU payload and the on-key data length getting out of sync. I'm tempted to drop the key's datalen entirely, except that it's used in conjunction with quota management and so is a little tricky to get rid of. (6) Update the keys documentation. Signed-Off-By: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-24 13:00:49 +08:00
#undef showflag
if (key->type->describe)
key->type->describe(key, m);
seq_putc(m, '\n');
rcu_read_unlock();
return 0;
}
static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
while (n) {
struct key_user *user = rb_entry(n, struct key_user, node);
if (kuid_has_mapping(user_ns, user->uid))
break;
n = rb_next(n);
}
return n;
}
static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
return __key_user_next(user_ns, rb_next(n));
}
static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
{
struct rb_node *n = rb_first(r);
return __key_user_next(user_ns, n);
}
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
__acquires(key_user_lock)
{
struct rb_node *_p;
loff_t pos = *_pos;
spin_lock(&key_user_lock);
_p = key_user_first(seq_user_ns(p), &key_user_tree);
while (pos > 0 && _p) {
pos--;
_p = key_user_next(seq_user_ns(p), _p);
}
return _p;
}
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
return key_user_next(seq_user_ns(p), (struct rb_node *)v);
}
static void proc_key_users_stop(struct seq_file *p, void *v)
__releases(key_user_lock)
{
spin_unlock(&key_user_lock);
}
static int proc_key_users_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key_user *user = rb_entry(_p, struct key_user, node);
unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
from_kuid_munged(seq_user_ns(m), user->uid),
refcount_read(&user->usage),
atomic_read(&user->nkeys),
atomic_read(&user->nikeys),
user->qnkeys,
maxkeys,
user->qnbytes,
maxbytes);
return 0;
}