crypto: user - support incremental algorithm dumps

CRYPTO_MSG_GETALG in NLM_F_DUMP mode sometimes doesn't return all
registered crypto algorithms, because it doesn't support incremental
dumps.  crypto_dump_report() only permits itself to be called once, yet
the netlink subsystem allocates at most ~64 KiB for the skb being dumped
to.  Thus only the first recvmsg() returns data, and it may only include
a subset of the crypto algorithms even if the user buffer passed to
recvmsg() is large enough to hold all of them.

Fix this by using one of the arguments in the netlink_callback structure
to keep track of the current position in the algorithm list.  Then
userspace can do multiple recvmsg() on the socket after sending the dump
request.  This is the way netlink dumps work elsewhere in the kernel;
it's unclear why this was different (probably just an oversight).

Also fix an integer overflow when calculating the dump buffer size hint.

Fixes: a38f7907b9 ("crypto: Add userspace configuration API")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Eric Biggers 2018-12-06 15:55:41 -08:00 committed by Herbert Xu
parent c6018e1a00
commit 0ac6b8fb23
1 changed files with 20 additions and 17 deletions

View File

@ -231,30 +231,33 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb) static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct crypto_alg *alg; const size_t start_pos = cb->args[0];
size_t pos = 0;
struct crypto_dump_info info; struct crypto_dump_info info;
int err; struct crypto_alg *alg;
int res;
if (cb->args[0])
goto out;
cb->args[0] = 1;
info.in_skb = cb->skb; info.in_skb = cb->skb;
info.out_skb = skb; info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI; info.nlmsg_flags = NLM_F_MULTI;
down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list) { list_for_each_entry(alg, &crypto_alg_list, cra_list) {
err = crypto_report_alg(alg, &info); if (pos >= start_pos) {
if (err) res = crypto_report_alg(alg, &info);
goto out_err; if (res == -EMSGSIZE)
break;
if (res)
goto out;
}
pos++;
} }
cb->args[0] = pos;
res = skb->len;
out: out:
return skb->len; up_read(&crypto_alg_sem);
out_err: return res;
return err;
} }
static int crypto_dump_report_done(struct netlink_callback *cb) static int crypto_dump_report_done(struct netlink_callback *cb)
@ -442,7 +445,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) && if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) { (nlh->nlmsg_flags & NLM_F_DUMP))) {
struct crypto_alg *alg; struct crypto_alg *alg;
u16 dump_alloc = 0; unsigned long dump_alloc = 0;
if (link->dump == NULL) if (link->dump == NULL)
return -EINVAL; return -EINVAL;
@ -450,16 +453,16 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
down_read(&crypto_alg_sem); down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list) list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE; dump_alloc += CRYPTO_REPORT_MAXSIZE;
up_read(&crypto_alg_sem);
{ {
struct netlink_dump_control c = { struct netlink_dump_control c = {
.dump = link->dump, .dump = link->dump,
.done = link->done, .done = link->done,
.min_dump_alloc = dump_alloc, .min_dump_alloc = min(dump_alloc, 65535UL),
}; };
err = netlink_dump_start(crypto_nlsk, skb, nlh, &c); err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
} }
up_read(&crypto_alg_sem);
return err; return err;
} }