2019-05-19 20:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-09-20 05:04:18 +08:00
|
|
|
#include <linux/extable.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
#include <linux/sched/debug.h>
|
2017-11-24 16:42:21 +08:00
|
|
|
#include <xen/xen.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
|
x86/fpu: Reinitialize FPU registers if restoring FPU state fails
Userspace can change the FPU state of a task using the ptrace() or
rt_sigreturn() system calls. Because reserved bits in the FPU state can
cause the XRSTOR instruction to fail, the kernel has to carefully
validate that no reserved bits or other invalid values are being set.
Unfortunately, there have been bugs in this validation code. For
example, we were not checking that the 'xcomp_bv' field in the
xstate_header was 0. As-is, such bugs are exploitable to read the FPU
registers of other processes on the system. To do so, an attacker can
create a task, assign to it an invalid FPU state, then spin in a loop
and monitor the values of the FPU registers. Because the task's FPU
registers are not being restored, sometimes the FPU registers will have
the values from another process.
This is likely to continue to be a problem in the future because the
validation done by the CPU instructions like XRSTOR is not immediately
visible to kernel developers. Nor will invalid FPU states ever be
encountered during ordinary use --- they will only be seen during
fuzzing or exploits. There can even be reserved bits outside the
xstate_header which are easy to forget about. For example, the MXCSR
register contains reserved bits, which were not validated by the
KVM_SET_XSAVE ioctl until commit a575813bfe4b ("KVM: x86: Fix load
damaged SSEx MXCSR register").
Therefore, mitigate this class of vulnerability by restoring the FPU
registers from init_fpstate if restoring from the task's state fails.
We actually used to do this, but it was (perhaps unwisely) removed by
commit 9ccc27a5d297 ("x86/fpu: Remove error return values from
copy_kernel_to_*regs() functions"). This new patch is also a bit
different. First, it only clears the registers, not also the bad
in-memory state; this is simpler and makes it easier to make the
mitigation cover all callers of __copy_kernel_to_fpregs(). Second, it
does the register clearing in an exception handler so that no extra
instructions are added to context switches. In fact, we *remove*
instructions, since previously we were always zeroing the register
containing 'err' even if CONFIG_X86_DEBUG_FPU was disabled.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Kevin Hao <haokexin@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Halcrow <mhalcrow@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: kernel-hardening@lists.openwall.com
Link: http://lkml.kernel.org/r/20170922174156.16780-4-ebiggers3@gmail.com
Link: http://lkml.kernel.org/r/20170923130016.21448-27-mingo@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-23 21:00:09 +08:00
|
|
|
#include <asm/fpu/internal.h>
|
2016-04-02 22:01:33 +08:00
|
|
|
#include <asm/traps.h>
|
2016-07-05 06:31:27 +08:00
|
|
|
#include <asm/kdebug.h>
|
2008-01-30 20:31:41 +08:00
|
|
|
|
2016-02-18 02:20:12 +08:00
|
|
|
typedef bool (*ex_handler_t)(const struct exception_table_entry *,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *, int, unsigned long,
|
|
|
|
unsigned long);
|
2016-02-18 02:20:12 +08:00
|
|
|
|
2012-04-21 08:12:48 +08:00
|
|
|
static inline unsigned long
|
|
|
|
ex_fixup_addr(const struct exception_table_entry *x)
|
|
|
|
{
|
|
|
|
return (unsigned long)&x->fixup + x->fixup;
|
|
|
|
}
|
2016-02-18 02:20:12 +08:00
|
|
|
static inline ex_handler_t
|
|
|
|
ex_fixup_handler(const struct exception_table_entry *x)
|
|
|
|
{
|
|
|
|
return (ex_handler_t)((unsigned long)&x->handler + x->handler);
|
|
|
|
}
|
2008-01-30 20:31:41 +08:00
|
|
|
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_handler_default(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2008-01-30 20:31:41 +08:00
|
|
|
{
|
2016-02-18 02:20:12 +08:00
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_default);
|
|
|
|
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_handler_fault(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-02-18 02:20:12 +08:00
|
|
|
{
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
regs->ax = trapnr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ex_handler_fault);
|
|
|
|
|
locking/refcounts, x86/asm: Implement fast refcount overflow protection
This implements refcount_t overflow protection on x86 without a noticeable
performance impact, though without the fuller checking of REFCOUNT_FULL.
This is done by duplicating the existing atomic_t refcount implementation
but with normally a single instruction added to detect if the refcount
has gone negative (e.g. wrapped past INT_MAX or below zero). When detected,
the handler saturates the refcount_t to INT_MIN / 2. With this overflow
protection, the erroneous reference release that would follow a wrap back
to zero is blocked from happening, avoiding the class of refcount-overflow
use-after-free vulnerabilities entirely.
Only the overflow case of refcounting can be perfectly protected, since
it can be detected and stopped before the reference is freed and left to
be abused by an attacker. There isn't a way to block early decrements,
and while REFCOUNT_FULL stops increment-from-zero cases (which would
be the state _after_ an early decrement and stops potential double-free
conditions), this fast implementation does not, since it would require
the more expensive cmpxchg loops. Since the overflow case is much more
common (e.g. missing a "put" during an error path), this protection
provides real-world protection. For example, the two public refcount
overflow use-after-free exploits published in 2016 would have been
rendered unexploitable:
http://perception-point.io/2016/01/14/analysis-and-exploitation-of-a-linux-kernel-vulnerability-cve-2016-0728/
http://cyseclabs.com/page?n=02012016
This implementation does, however, notice an unchecked decrement to zero
(i.e. caller used refcount_dec() instead of refcount_dec_and_test() and it
resulted in a zero). Decrements under zero are noticed (since they will
have resulted in a negative value), though this only indicates that a
use-after-free may have already happened. Such notifications are likely
avoidable by an attacker that has already exploited a use-after-free
vulnerability, but it's better to have them reported than allow such
conditions to remain universally silent.
On first overflow detection, the refcount value is reset to INT_MIN / 2
(which serves as a saturation value) and a report and stack trace are
produced. When operations detect only negative value results (such as
changing an already saturated value), saturation still happens but no
notification is performed (since the value was already saturated).
On the matter of races, since the entire range beyond INT_MAX but before
0 is negative, every operation at INT_MIN / 2 will trap, leaving no
overflow-only race condition.
As for performance, this implementation adds a single "js" instruction
to the regular execution flow of a copy of the standard atomic_t refcount
operations. (The non-"and_test" refcount_dec() function, which is uncommon
in regular refcount design patterns, has an additional "jz" instruction
to detect reaching exactly zero.) Since this is a forward jump, it is by
default the non-predicted path, which will be reinforced by dynamic branch
prediction. The result is this protection having virtually no measurable
change in performance over standard atomic_t operations. The error path,
located in .text.unlikely, saves the refcount location and then uses UD0
to fire a refcount exception handler, which resets the refcount, handles
reporting, and returns to regular execution. This keeps the changes to
.text size minimal, avoiding return jumps and open-coded calls to the
error reporting routine.
Example assembly comparison:
refcount_inc() before:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
refcount_inc() after:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
ffffffff8154614d: 0f 88 80 d5 17 00 js ffffffff816c36d3
...
.text.unlikely:
ffffffff816c36d3: 48 8d 4d f4 lea -0xc(%rbp),%rcx
ffffffff816c36d7: 0f ff (bad)
These are the cycle counts comparing a loop of refcount_inc() from 1
to INT_MAX and back down to 0 (via refcount_dec_and_test()), between
unprotected refcount_t (atomic_t), fully protected REFCOUNT_FULL
(refcount_t-full), and this overflow-protected refcount (refcount_t-fast):
2147483646 refcount_inc()s and 2147483647 refcount_dec_and_test()s:
cycles protections
atomic_t 82249267387 none
refcount_t-fast 82211446892 overflow, untested dec-to-zero
refcount_t-full 144814735193 overflow, untested dec-to-zero, inc-from-zero
This code is a modified version of the x86 PAX_REFCOUNT atomic_t
overflow defense from the last public patch of PaX/grsecurity, based
on my understanding of the code. Changes or omissions from the original
code are mine and don't reflect the original grsecurity/PaX code. Thanks
to PaX Team for various suggestions for improvement for repurposing this
code to be a refcount-only protection.
Signed-off-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Hans Liljestrand <ishkamiel@gmail.com>
Cc: James Bottomley <James.Bottomley@hansenpartnership.com>
Cc: Jann Horn <jannh@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Serge E. Hallyn <serge@hallyn.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arozansk@redhat.com
Cc: axboe@kernel.dk
Cc: kernel-hardening@lists.openwall.com
Cc: linux-arch <linux-arch@vger.kernel.org>
Link: http://lkml.kernel.org/r/20170815161924.GA133115@beast
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-16 00:19:24 +08:00
|
|
|
/*
|
|
|
|
* Handler for UD0 exception following a failed test against the
|
|
|
|
* result of a refcount inc/dec/add/sub.
|
|
|
|
*/
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_handler_refcount(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
locking/refcounts, x86/asm: Implement fast refcount overflow protection
This implements refcount_t overflow protection on x86 without a noticeable
performance impact, though without the fuller checking of REFCOUNT_FULL.
This is done by duplicating the existing atomic_t refcount implementation
but with normally a single instruction added to detect if the refcount
has gone negative (e.g. wrapped past INT_MAX or below zero). When detected,
the handler saturates the refcount_t to INT_MIN / 2. With this overflow
protection, the erroneous reference release that would follow a wrap back
to zero is blocked from happening, avoiding the class of refcount-overflow
use-after-free vulnerabilities entirely.
Only the overflow case of refcounting can be perfectly protected, since
it can be detected and stopped before the reference is freed and left to
be abused by an attacker. There isn't a way to block early decrements,
and while REFCOUNT_FULL stops increment-from-zero cases (which would
be the state _after_ an early decrement and stops potential double-free
conditions), this fast implementation does not, since it would require
the more expensive cmpxchg loops. Since the overflow case is much more
common (e.g. missing a "put" during an error path), this protection
provides real-world protection. For example, the two public refcount
overflow use-after-free exploits published in 2016 would have been
rendered unexploitable:
http://perception-point.io/2016/01/14/analysis-and-exploitation-of-a-linux-kernel-vulnerability-cve-2016-0728/
http://cyseclabs.com/page?n=02012016
This implementation does, however, notice an unchecked decrement to zero
(i.e. caller used refcount_dec() instead of refcount_dec_and_test() and it
resulted in a zero). Decrements under zero are noticed (since they will
have resulted in a negative value), though this only indicates that a
use-after-free may have already happened. Such notifications are likely
avoidable by an attacker that has already exploited a use-after-free
vulnerability, but it's better to have them reported than allow such
conditions to remain universally silent.
On first overflow detection, the refcount value is reset to INT_MIN / 2
(which serves as a saturation value) and a report and stack trace are
produced. When operations detect only negative value results (such as
changing an already saturated value), saturation still happens but no
notification is performed (since the value was already saturated).
On the matter of races, since the entire range beyond INT_MAX but before
0 is negative, every operation at INT_MIN / 2 will trap, leaving no
overflow-only race condition.
As for performance, this implementation adds a single "js" instruction
to the regular execution flow of a copy of the standard atomic_t refcount
operations. (The non-"and_test" refcount_dec() function, which is uncommon
in regular refcount design patterns, has an additional "jz" instruction
to detect reaching exactly zero.) Since this is a forward jump, it is by
default the non-predicted path, which will be reinforced by dynamic branch
prediction. The result is this protection having virtually no measurable
change in performance over standard atomic_t operations. The error path,
located in .text.unlikely, saves the refcount location and then uses UD0
to fire a refcount exception handler, which resets the refcount, handles
reporting, and returns to regular execution. This keeps the changes to
.text size minimal, avoiding return jumps and open-coded calls to the
error reporting routine.
Example assembly comparison:
refcount_inc() before:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
refcount_inc() after:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
ffffffff8154614d: 0f 88 80 d5 17 00 js ffffffff816c36d3
...
.text.unlikely:
ffffffff816c36d3: 48 8d 4d f4 lea -0xc(%rbp),%rcx
ffffffff816c36d7: 0f ff (bad)
These are the cycle counts comparing a loop of refcount_inc() from 1
to INT_MAX and back down to 0 (via refcount_dec_and_test()), between
unprotected refcount_t (atomic_t), fully protected REFCOUNT_FULL
(refcount_t-full), and this overflow-protected refcount (refcount_t-fast):
2147483646 refcount_inc()s and 2147483647 refcount_dec_and_test()s:
cycles protections
atomic_t 82249267387 none
refcount_t-fast 82211446892 overflow, untested dec-to-zero
refcount_t-full 144814735193 overflow, untested dec-to-zero, inc-from-zero
This code is a modified version of the x86 PAX_REFCOUNT atomic_t
overflow defense from the last public patch of PaX/grsecurity, based
on my understanding of the code. Changes or omissions from the original
code are mine and don't reflect the original grsecurity/PaX code. Thanks
to PaX Team for various suggestions for improvement for repurposing this
code to be a refcount-only protection.
Signed-off-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Hans Liljestrand <ishkamiel@gmail.com>
Cc: James Bottomley <James.Bottomley@hansenpartnership.com>
Cc: Jann Horn <jannh@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Serge E. Hallyn <serge@hallyn.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arozansk@redhat.com
Cc: axboe@kernel.dk
Cc: kernel-hardening@lists.openwall.com
Cc: linux-arch <linux-arch@vger.kernel.org>
Link: http://lkml.kernel.org/r/20170815161924.GA133115@beast
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-16 00:19:24 +08:00
|
|
|
{
|
|
|
|
/* First unconditionally saturate the refcount. */
|
|
|
|
*(int *)regs->cx = INT_MIN / 2;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Strictly speaking, this reports the fixup destination, not
|
|
|
|
* the fault location, and not the actually overflowing
|
|
|
|
* instruction, which is the instruction before the "js", but
|
|
|
|
* since that instruction could be a variety of lengths, just
|
|
|
|
* report the location after the overflow, which should be close
|
|
|
|
* enough for finding the overflow, as it's at least back in
|
|
|
|
* the function, having returned from .text.unlikely.
|
|
|
|
*/
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function has been called because either a negative refcount
|
|
|
|
* value was seen by any of the refcount functions, or a zero
|
|
|
|
* refcount value was seen by refcount_dec().
|
|
|
|
*
|
|
|
|
* If we crossed from INT_MAX to INT_MIN, OF (Overflow Flag: result
|
|
|
|
* wrapped around) will be set. Additionally, seeing the refcount
|
|
|
|
* reach 0 will set ZF (Zero Flag: result was zero). In each of
|
|
|
|
* these cases we want a report, since it's a boundary condition.
|
locking/refcounts, x86/asm: Use unique .text section for refcount exceptions
Using .text.unlikely for refcount exceptions isn't safe because gcc may
move entire functions into .text.unlikely (e.g. in6_dev_dev()), which
would cause any uses of a protected refcount_t function to stay inline
with the function, triggering the protection unconditionally:
.section .text.unlikely,"ax",@progbits
.type in6_dev_get, @function
in6_dev_getx:
.LFB4673:
.loc 2 4128 0
.cfi_startproc
...
lock; incl 480(%rbx)
js 111f
.pushsection .text.unlikely
111: lea 480(%rbx), %rcx
112: .byte 0x0f, 0xff
.popsection
113:
This creates a unique .text..refcount section and adds an additional
test to the exception handler to WARN in the case of having none of OF,
SF, nor ZF set so we can see things like this more easily in the future.
The double dot for the section name keeps it out of the TEXT_MAIN macro
namespace, to avoid collisions and so it can be put at the end with
text.unlikely to keep the cold code together.
See commit:
cb87481ee89db ("kbuild: linker script do not match C names unless LD_DEAD_CODE_DATA_ELIMINATION is configured")
... which matches C names: [a-zA-Z0-9_] but not ".".
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Elena <elena.reshetova@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch <linux-arch@vger.kernel.org>
Fixes: 7a46ec0e2f48 ("locking/refcounts, x86/asm: Implement fast refcount overflow protection")
Link: http://lkml.kernel.org/r/1504382986-49301-2-git-send-email-keescook@chromium.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-03 04:09:45 +08:00
|
|
|
* The SF case is not reported since it indicates post-boundary
|
|
|
|
* manipulations below zero or above INT_MAX. And if none of the
|
|
|
|
* flags are set, something has gone very wrong, so report it.
|
locking/refcounts, x86/asm: Implement fast refcount overflow protection
This implements refcount_t overflow protection on x86 without a noticeable
performance impact, though without the fuller checking of REFCOUNT_FULL.
This is done by duplicating the existing atomic_t refcount implementation
but with normally a single instruction added to detect if the refcount
has gone negative (e.g. wrapped past INT_MAX or below zero). When detected,
the handler saturates the refcount_t to INT_MIN / 2. With this overflow
protection, the erroneous reference release that would follow a wrap back
to zero is blocked from happening, avoiding the class of refcount-overflow
use-after-free vulnerabilities entirely.
Only the overflow case of refcounting can be perfectly protected, since
it can be detected and stopped before the reference is freed and left to
be abused by an attacker. There isn't a way to block early decrements,
and while REFCOUNT_FULL stops increment-from-zero cases (which would
be the state _after_ an early decrement and stops potential double-free
conditions), this fast implementation does not, since it would require
the more expensive cmpxchg loops. Since the overflow case is much more
common (e.g. missing a "put" during an error path), this protection
provides real-world protection. For example, the two public refcount
overflow use-after-free exploits published in 2016 would have been
rendered unexploitable:
http://perception-point.io/2016/01/14/analysis-and-exploitation-of-a-linux-kernel-vulnerability-cve-2016-0728/
http://cyseclabs.com/page?n=02012016
This implementation does, however, notice an unchecked decrement to zero
(i.e. caller used refcount_dec() instead of refcount_dec_and_test() and it
resulted in a zero). Decrements under zero are noticed (since they will
have resulted in a negative value), though this only indicates that a
use-after-free may have already happened. Such notifications are likely
avoidable by an attacker that has already exploited a use-after-free
vulnerability, but it's better to have them reported than allow such
conditions to remain universally silent.
On first overflow detection, the refcount value is reset to INT_MIN / 2
(which serves as a saturation value) and a report and stack trace are
produced. When operations detect only negative value results (such as
changing an already saturated value), saturation still happens but no
notification is performed (since the value was already saturated).
On the matter of races, since the entire range beyond INT_MAX but before
0 is negative, every operation at INT_MIN / 2 will trap, leaving no
overflow-only race condition.
As for performance, this implementation adds a single "js" instruction
to the regular execution flow of a copy of the standard atomic_t refcount
operations. (The non-"and_test" refcount_dec() function, which is uncommon
in regular refcount design patterns, has an additional "jz" instruction
to detect reaching exactly zero.) Since this is a forward jump, it is by
default the non-predicted path, which will be reinforced by dynamic branch
prediction. The result is this protection having virtually no measurable
change in performance over standard atomic_t operations. The error path,
located in .text.unlikely, saves the refcount location and then uses UD0
to fire a refcount exception handler, which resets the refcount, handles
reporting, and returns to regular execution. This keeps the changes to
.text size minimal, avoiding return jumps and open-coded calls to the
error reporting routine.
Example assembly comparison:
refcount_inc() before:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
refcount_inc() after:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
ffffffff8154614d: 0f 88 80 d5 17 00 js ffffffff816c36d3
...
.text.unlikely:
ffffffff816c36d3: 48 8d 4d f4 lea -0xc(%rbp),%rcx
ffffffff816c36d7: 0f ff (bad)
These are the cycle counts comparing a loop of refcount_inc() from 1
to INT_MAX and back down to 0 (via refcount_dec_and_test()), between
unprotected refcount_t (atomic_t), fully protected REFCOUNT_FULL
(refcount_t-full), and this overflow-protected refcount (refcount_t-fast):
2147483646 refcount_inc()s and 2147483647 refcount_dec_and_test()s:
cycles protections
atomic_t 82249267387 none
refcount_t-fast 82211446892 overflow, untested dec-to-zero
refcount_t-full 144814735193 overflow, untested dec-to-zero, inc-from-zero
This code is a modified version of the x86 PAX_REFCOUNT atomic_t
overflow defense from the last public patch of PaX/grsecurity, based
on my understanding of the code. Changes or omissions from the original
code are mine and don't reflect the original grsecurity/PaX code. Thanks
to PaX Team for various suggestions for improvement for repurposing this
code to be a refcount-only protection.
Signed-off-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Hans Liljestrand <ishkamiel@gmail.com>
Cc: James Bottomley <James.Bottomley@hansenpartnership.com>
Cc: Jann Horn <jannh@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Serge E. Hallyn <serge@hallyn.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arozansk@redhat.com
Cc: axboe@kernel.dk
Cc: kernel-hardening@lists.openwall.com
Cc: linux-arch <linux-arch@vger.kernel.org>
Link: http://lkml.kernel.org/r/20170815161924.GA133115@beast
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-16 00:19:24 +08:00
|
|
|
*/
|
|
|
|
if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
|
|
|
|
bool zero = regs->flags & X86_EFLAGS_ZF;
|
|
|
|
|
|
|
|
refcount_error_report(regs, zero ? "hit zero" : "overflow");
|
locking/refcounts, x86/asm: Use unique .text section for refcount exceptions
Using .text.unlikely for refcount exceptions isn't safe because gcc may
move entire functions into .text.unlikely (e.g. in6_dev_dev()), which
would cause any uses of a protected refcount_t function to stay inline
with the function, triggering the protection unconditionally:
.section .text.unlikely,"ax",@progbits
.type in6_dev_get, @function
in6_dev_getx:
.LFB4673:
.loc 2 4128 0
.cfi_startproc
...
lock; incl 480(%rbx)
js 111f
.pushsection .text.unlikely
111: lea 480(%rbx), %rcx
112: .byte 0x0f, 0xff
.popsection
113:
This creates a unique .text..refcount section and adds an additional
test to the exception handler to WARN in the case of having none of OF,
SF, nor ZF set so we can see things like this more easily in the future.
The double dot for the section name keeps it out of the TEXT_MAIN macro
namespace, to avoid collisions and so it can be put at the end with
text.unlikely to keep the cold code together.
See commit:
cb87481ee89db ("kbuild: linker script do not match C names unless LD_DEAD_CODE_DATA_ELIMINATION is configured")
... which matches C names: [a-zA-Z0-9_] but not ".".
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Elena <elena.reshetova@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch <linux-arch@vger.kernel.org>
Fixes: 7a46ec0e2f48 ("locking/refcounts, x86/asm: Implement fast refcount overflow protection")
Link: http://lkml.kernel.org/r/1504382986-49301-2-git-send-email-keescook@chromium.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-03 04:09:45 +08:00
|
|
|
} else if ((regs->flags & X86_EFLAGS_SF) == 0) {
|
|
|
|
/* Report if none of OF, ZF, nor SF are set. */
|
|
|
|
refcount_error_report(regs, "unexpected saturation");
|
locking/refcounts, x86/asm: Implement fast refcount overflow protection
This implements refcount_t overflow protection on x86 without a noticeable
performance impact, though without the fuller checking of REFCOUNT_FULL.
This is done by duplicating the existing atomic_t refcount implementation
but with normally a single instruction added to detect if the refcount
has gone negative (e.g. wrapped past INT_MAX or below zero). When detected,
the handler saturates the refcount_t to INT_MIN / 2. With this overflow
protection, the erroneous reference release that would follow a wrap back
to zero is blocked from happening, avoiding the class of refcount-overflow
use-after-free vulnerabilities entirely.
Only the overflow case of refcounting can be perfectly protected, since
it can be detected and stopped before the reference is freed and left to
be abused by an attacker. There isn't a way to block early decrements,
and while REFCOUNT_FULL stops increment-from-zero cases (which would
be the state _after_ an early decrement and stops potential double-free
conditions), this fast implementation does not, since it would require
the more expensive cmpxchg loops. Since the overflow case is much more
common (e.g. missing a "put" during an error path), this protection
provides real-world protection. For example, the two public refcount
overflow use-after-free exploits published in 2016 would have been
rendered unexploitable:
http://perception-point.io/2016/01/14/analysis-and-exploitation-of-a-linux-kernel-vulnerability-cve-2016-0728/
http://cyseclabs.com/page?n=02012016
This implementation does, however, notice an unchecked decrement to zero
(i.e. caller used refcount_dec() instead of refcount_dec_and_test() and it
resulted in a zero). Decrements under zero are noticed (since they will
have resulted in a negative value), though this only indicates that a
use-after-free may have already happened. Such notifications are likely
avoidable by an attacker that has already exploited a use-after-free
vulnerability, but it's better to have them reported than allow such
conditions to remain universally silent.
On first overflow detection, the refcount value is reset to INT_MIN / 2
(which serves as a saturation value) and a report and stack trace are
produced. When operations detect only negative value results (such as
changing an already saturated value), saturation still happens but no
notification is performed (since the value was already saturated).
On the matter of races, since the entire range beyond INT_MAX but before
0 is negative, every operation at INT_MIN / 2 will trap, leaving no
overflow-only race condition.
As for performance, this implementation adds a single "js" instruction
to the regular execution flow of a copy of the standard atomic_t refcount
operations. (The non-"and_test" refcount_dec() function, which is uncommon
in regular refcount design patterns, has an additional "jz" instruction
to detect reaching exactly zero.) Since this is a forward jump, it is by
default the non-predicted path, which will be reinforced by dynamic branch
prediction. The result is this protection having virtually no measurable
change in performance over standard atomic_t operations. The error path,
located in .text.unlikely, saves the refcount location and then uses UD0
to fire a refcount exception handler, which resets the refcount, handles
reporting, and returns to regular execution. This keeps the changes to
.text size minimal, avoiding return jumps and open-coded calls to the
error reporting routine.
Example assembly comparison:
refcount_inc() before:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
refcount_inc() after:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
ffffffff8154614d: 0f 88 80 d5 17 00 js ffffffff816c36d3
...
.text.unlikely:
ffffffff816c36d3: 48 8d 4d f4 lea -0xc(%rbp),%rcx
ffffffff816c36d7: 0f ff (bad)
These are the cycle counts comparing a loop of refcount_inc() from 1
to INT_MAX and back down to 0 (via refcount_dec_and_test()), between
unprotected refcount_t (atomic_t), fully protected REFCOUNT_FULL
(refcount_t-full), and this overflow-protected refcount (refcount_t-fast):
2147483646 refcount_inc()s and 2147483647 refcount_dec_and_test()s:
cycles protections
atomic_t 82249267387 none
refcount_t-fast 82211446892 overflow, untested dec-to-zero
refcount_t-full 144814735193 overflow, untested dec-to-zero, inc-from-zero
This code is a modified version of the x86 PAX_REFCOUNT atomic_t
overflow defense from the last public patch of PaX/grsecurity, based
on my understanding of the code. Changes or omissions from the original
code are mine and don't reflect the original grsecurity/PaX code. Thanks
to PaX Team for various suggestions for improvement for repurposing this
code to be a refcount-only protection.
Signed-off-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Hans Liljestrand <ishkamiel@gmail.com>
Cc: James Bottomley <James.Bottomley@hansenpartnership.com>
Cc: Jann Horn <jannh@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Serge E. Hallyn <serge@hallyn.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arozansk@redhat.com
Cc: axboe@kernel.dk
Cc: kernel-hardening@lists.openwall.com
Cc: linux-arch <linux-arch@vger.kernel.org>
Link: http://lkml.kernel.org/r/20170815161924.GA133115@beast
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-16 00:19:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2017-12-05 09:24:54 +08:00
|
|
|
EXPORT_SYMBOL(ex_handler_refcount);
|
locking/refcounts, x86/asm: Implement fast refcount overflow protection
This implements refcount_t overflow protection on x86 without a noticeable
performance impact, though without the fuller checking of REFCOUNT_FULL.
This is done by duplicating the existing atomic_t refcount implementation
but with normally a single instruction added to detect if the refcount
has gone negative (e.g. wrapped past INT_MAX or below zero). When detected,
the handler saturates the refcount_t to INT_MIN / 2. With this overflow
protection, the erroneous reference release that would follow a wrap back
to zero is blocked from happening, avoiding the class of refcount-overflow
use-after-free vulnerabilities entirely.
Only the overflow case of refcounting can be perfectly protected, since
it can be detected and stopped before the reference is freed and left to
be abused by an attacker. There isn't a way to block early decrements,
and while REFCOUNT_FULL stops increment-from-zero cases (which would
be the state _after_ an early decrement and stops potential double-free
conditions), this fast implementation does not, since it would require
the more expensive cmpxchg loops. Since the overflow case is much more
common (e.g. missing a "put" during an error path), this protection
provides real-world protection. For example, the two public refcount
overflow use-after-free exploits published in 2016 would have been
rendered unexploitable:
http://perception-point.io/2016/01/14/analysis-and-exploitation-of-a-linux-kernel-vulnerability-cve-2016-0728/
http://cyseclabs.com/page?n=02012016
This implementation does, however, notice an unchecked decrement to zero
(i.e. caller used refcount_dec() instead of refcount_dec_and_test() and it
resulted in a zero). Decrements under zero are noticed (since they will
have resulted in a negative value), though this only indicates that a
use-after-free may have already happened. Such notifications are likely
avoidable by an attacker that has already exploited a use-after-free
vulnerability, but it's better to have them reported than allow such
conditions to remain universally silent.
On first overflow detection, the refcount value is reset to INT_MIN / 2
(which serves as a saturation value) and a report and stack trace are
produced. When operations detect only negative value results (such as
changing an already saturated value), saturation still happens but no
notification is performed (since the value was already saturated).
On the matter of races, since the entire range beyond INT_MAX but before
0 is negative, every operation at INT_MIN / 2 will trap, leaving no
overflow-only race condition.
As for performance, this implementation adds a single "js" instruction
to the regular execution flow of a copy of the standard atomic_t refcount
operations. (The non-"and_test" refcount_dec() function, which is uncommon
in regular refcount design patterns, has an additional "jz" instruction
to detect reaching exactly zero.) Since this is a forward jump, it is by
default the non-predicted path, which will be reinforced by dynamic branch
prediction. The result is this protection having virtually no measurable
change in performance over standard atomic_t operations. The error path,
located in .text.unlikely, saves the refcount location and then uses UD0
to fire a refcount exception handler, which resets the refcount, handles
reporting, and returns to regular execution. This keeps the changes to
.text size minimal, avoiding return jumps and open-coded calls to the
error reporting routine.
Example assembly comparison:
refcount_inc() before:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
refcount_inc() after:
.text:
ffffffff81546149: f0 ff 45 f4 lock incl -0xc(%rbp)
ffffffff8154614d: 0f 88 80 d5 17 00 js ffffffff816c36d3
...
.text.unlikely:
ffffffff816c36d3: 48 8d 4d f4 lea -0xc(%rbp),%rcx
ffffffff816c36d7: 0f ff (bad)
These are the cycle counts comparing a loop of refcount_inc() from 1
to INT_MAX and back down to 0 (via refcount_dec_and_test()), between
unprotected refcount_t (atomic_t), fully protected REFCOUNT_FULL
(refcount_t-full), and this overflow-protected refcount (refcount_t-fast):
2147483646 refcount_inc()s and 2147483647 refcount_dec_and_test()s:
cycles protections
atomic_t 82249267387 none
refcount_t-fast 82211446892 overflow, untested dec-to-zero
refcount_t-full 144814735193 overflow, untested dec-to-zero, inc-from-zero
This code is a modified version of the x86 PAX_REFCOUNT atomic_t
overflow defense from the last public patch of PaX/grsecurity, based
on my understanding of the code. Changes or omissions from the original
code are mine and don't reflect the original grsecurity/PaX code. Thanks
to PaX Team for various suggestions for improvement for repurposing this
code to be a refcount-only protection.
Signed-off-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Hans Liljestrand <ishkamiel@gmail.com>
Cc: James Bottomley <James.Bottomley@hansenpartnership.com>
Cc: Jann Horn <jannh@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Serge E. Hallyn <serge@hallyn.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arozansk@redhat.com
Cc: axboe@kernel.dk
Cc: kernel-hardening@lists.openwall.com
Cc: linux-arch <linux-arch@vger.kernel.org>
Link: http://lkml.kernel.org/r/20170815161924.GA133115@beast
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-16 00:19:24 +08:00
|
|
|
|
x86/fpu: Reinitialize FPU registers if restoring FPU state fails
Userspace can change the FPU state of a task using the ptrace() or
rt_sigreturn() system calls. Because reserved bits in the FPU state can
cause the XRSTOR instruction to fail, the kernel has to carefully
validate that no reserved bits or other invalid values are being set.
Unfortunately, there have been bugs in this validation code. For
example, we were not checking that the 'xcomp_bv' field in the
xstate_header was 0. As-is, such bugs are exploitable to read the FPU
registers of other processes on the system. To do so, an attacker can
create a task, assign to it an invalid FPU state, then spin in a loop
and monitor the values of the FPU registers. Because the task's FPU
registers are not being restored, sometimes the FPU registers will have
the values from another process.
This is likely to continue to be a problem in the future because the
validation done by the CPU instructions like XRSTOR is not immediately
visible to kernel developers. Nor will invalid FPU states ever be
encountered during ordinary use --- they will only be seen during
fuzzing or exploits. There can even be reserved bits outside the
xstate_header which are easy to forget about. For example, the MXCSR
register contains reserved bits, which were not validated by the
KVM_SET_XSAVE ioctl until commit a575813bfe4b ("KVM: x86: Fix load
damaged SSEx MXCSR register").
Therefore, mitigate this class of vulnerability by restoring the FPU
registers from init_fpstate if restoring from the task's state fails.
We actually used to do this, but it was (perhaps unwisely) removed by
commit 9ccc27a5d297 ("x86/fpu: Remove error return values from
copy_kernel_to_*regs() functions"). This new patch is also a bit
different. First, it only clears the registers, not also the bad
in-memory state; this is simpler and makes it easier to make the
mitigation cover all callers of __copy_kernel_to_fpregs(). Second, it
does the register clearing in an exception handler so that no extra
instructions are added to context switches. In fact, we *remove*
instructions, since previously we were always zeroing the register
containing 'err' even if CONFIG_X86_DEBUG_FPU was disabled.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Kevin Hao <haokexin@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Halcrow <mhalcrow@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: kernel-hardening@lists.openwall.com
Link: http://lkml.kernel.org/r/20170922174156.16780-4-ebiggers3@gmail.com
Link: http://lkml.kernel.org/r/20170923130016.21448-27-mingo@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-23 21:00:09 +08:00
|
|
|
/*
|
|
|
|
* Handler for when we fail to restore a task's FPU state. We should never get
|
|
|
|
* here because the FPU state of a task using the FPU (task->thread.fpu.state)
|
|
|
|
* should always be valid. However, past bugs have allowed userspace to set
|
|
|
|
* reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
|
|
|
|
* These caused XRSTOR to fail when switching to the task, leaking the FPU
|
|
|
|
* registers of the task previously executing on the CPU. Mitigate this class
|
|
|
|
* of vulnerability by restoring from the initial state (essentially, zeroing
|
|
|
|
* out all the FPU registers) if we can't restore from the task's FPU state.
|
|
|
|
*/
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
x86/fpu: Reinitialize FPU registers if restoring FPU state fails
Userspace can change the FPU state of a task using the ptrace() or
rt_sigreturn() system calls. Because reserved bits in the FPU state can
cause the XRSTOR instruction to fail, the kernel has to carefully
validate that no reserved bits or other invalid values are being set.
Unfortunately, there have been bugs in this validation code. For
example, we were not checking that the 'xcomp_bv' field in the
xstate_header was 0. As-is, such bugs are exploitable to read the FPU
registers of other processes on the system. To do so, an attacker can
create a task, assign to it an invalid FPU state, then spin in a loop
and monitor the values of the FPU registers. Because the task's FPU
registers are not being restored, sometimes the FPU registers will have
the values from another process.
This is likely to continue to be a problem in the future because the
validation done by the CPU instructions like XRSTOR is not immediately
visible to kernel developers. Nor will invalid FPU states ever be
encountered during ordinary use --- they will only be seen during
fuzzing or exploits. There can even be reserved bits outside the
xstate_header which are easy to forget about. For example, the MXCSR
register contains reserved bits, which were not validated by the
KVM_SET_XSAVE ioctl until commit a575813bfe4b ("KVM: x86: Fix load
damaged SSEx MXCSR register").
Therefore, mitigate this class of vulnerability by restoring the FPU
registers from init_fpstate if restoring from the task's state fails.
We actually used to do this, but it was (perhaps unwisely) removed by
commit 9ccc27a5d297 ("x86/fpu: Remove error return values from
copy_kernel_to_*regs() functions"). This new patch is also a bit
different. First, it only clears the registers, not also the bad
in-memory state; this is simpler and makes it easier to make the
mitigation cover all callers of __copy_kernel_to_fpregs(). Second, it
does the register clearing in an exception handler so that no extra
instructions are added to context switches. In fact, we *remove*
instructions, since previously we were always zeroing the register
containing 'err' even if CONFIG_X86_DEBUG_FPU was disabled.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Kevin Hao <haokexin@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Halcrow <mhalcrow@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: kernel-hardening@lists.openwall.com
Link: http://lkml.kernel.org/r/20170922174156.16780-4-ebiggers3@gmail.com
Link: http://lkml.kernel.org/r/20170923130016.21448-27-mingo@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-23 21:00:09 +08:00
|
|
|
{
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
|
|
|
|
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
|
|
|
|
(void *)instruction_pointer(regs));
|
|
|
|
|
|
|
|
__copy_kernel_to_fpregs(&init_fpstate, -1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
|
|
|
|
|
2018-08-29 04:14:18 +08:00
|
|
|
__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2018-08-29 04:14:18 +08:00
|
|
|
{
|
x86-64: add warning for non-canonical user access address dereferences
This adds a warning (once) for any kernel dereference that has a user
exception handler, but accesses a non-canonical address. It basically
is a simpler - and more limited - version of commit 9da3f2b74054
("x86/fault: BUG() when uaccess helpers fault on kernel addresses") that
got reverted.
Note that unlike that original commit, this only causes a warning,
because there are real situations where we currently can do this
(notably speculative argument fetching for uprobes etc). Also, unlike
that original commit, this _only_ triggers for #GP accesses, so the
cases of valid kernel pointers that cross into a non-mapped page aren't
affected.
The intent of this is two-fold:
- the uprobe/tracing accesses really do need to be more careful. In
particular, from a portability standpoint it's just wrong to think
that "a pointer is a pointer", and use the same logic for any random
pointer value you find on the stack. It may _work_ on x86-64, but it
doesn't necessarily work on other architectures (where the same
pointer value can be either a kernel pointer _or_ a user pointer, and
you really need to be much more careful in how you try to access it)
The warning can hopefully end up being a reminder that just any
random pointer access won't do.
- Kees in particular wanted a way to actually report invalid uses of
wild pointers to user space accessors, instead of just silently
failing them. Automated fuzzers want a way to get reports if the
kernel ever uses invalid values that the fuzzer fed it.
The non-canonical address range is a fair chunk of the address space,
and with this you can teach syzkaller to feed in invalid pointer
values and find cases where we do not properly validate user
addresses (possibly due to bad uses of "set_fs()").
Acked-by: Kees Cook <keescook@chromium.org>
Cc: Jann Horn <jannh@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-02-27 01:16:04 +08:00
|
|
|
WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
|
2018-08-29 04:14:18 +08:00
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_uaccess);
|
|
|
|
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_handler_ext(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-02-18 02:20:12 +08:00
|
|
|
{
|
|
|
|
/* Special hack for uaccess_err */
|
2016-07-15 04:22:56 +08:00
|
|
|
current->thread.uaccess_err = 1;
|
2016-02-18 02:20:12 +08:00
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_ext);
|
|
|
|
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-04-02 22:01:37 +08:00
|
|
|
{
|
2019-03-26 03:32:28 +08:00
|
|
|
if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
|
2016-07-05 06:31:27 +08:00
|
|
|
(unsigned int)regs->cx, regs->ip, (void *)regs->ip))
|
|
|
|
show_stack_regs(regs);
|
2016-04-02 22:01:37 +08:00
|
|
|
|
|
|
|
/* Pretend that the read succeeded and returned 0. */
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
regs->ax = 0;
|
|
|
|
regs->dx = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
|
|
|
|
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-04-02 22:01:37 +08:00
|
|
|
{
|
2019-03-26 03:32:28 +08:00
|
|
|
if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
|
2016-07-05 06:31:27 +08:00
|
|
|
(unsigned int)regs->cx, (unsigned int)regs->dx,
|
|
|
|
(unsigned int)regs->ax, regs->ip, (void *)regs->ip))
|
|
|
|
show_stack_regs(regs);
|
2016-04-02 22:01:37 +08:00
|
|
|
|
|
|
|
/* Pretend that the write succeeded. */
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
|
|
|
|
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
|
2018-08-29 04:14:19 +08:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-04-27 03:23:26 +08:00
|
|
|
{
|
|
|
|
if (static_cpu_has(X86_BUG_NULL_SEG))
|
|
|
|
asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
|
|
|
|
asm volatile ("mov %0, %%fs" : : "rm" (0));
|
2018-08-29 04:14:19 +08:00
|
|
|
return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr);
|
2016-04-27 03:23:26 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_clear_fs);
|
|
|
|
|
2017-12-22 08:18:20 +08:00
|
|
|
__visible bool ex_has_fault_handler(unsigned long ip)
|
2016-02-18 02:20:12 +08:00
|
|
|
{
|
|
|
|
const struct exception_table_entry *e;
|
|
|
|
ex_handler_t handler;
|
|
|
|
|
|
|
|
e = search_exception_tables(ip);
|
|
|
|
if (!e)
|
|
|
|
return false;
|
|
|
|
handler = ex_fixup_handler(e);
|
|
|
|
|
|
|
|
return handler == ex_handler_fault;
|
|
|
|
}
|
|
|
|
|
2018-08-29 04:14:19 +08:00
|
|
|
int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-02-18 02:20:12 +08:00
|
|
|
{
|
|
|
|
const struct exception_table_entry *e;
|
|
|
|
ex_handler_t handler;
|
2008-01-30 20:31:41 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PNPBIOS
|
|
|
|
if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
|
|
|
|
extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
|
|
|
|
extern u32 pnp_bios_is_utter_crap;
|
|
|
|
pnp_bios_is_utter_crap = 1;
|
|
|
|
printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
|
|
|
|
__asm__ volatile(
|
|
|
|
"movl %0, %%esp\n\t"
|
|
|
|
"jmp *%1\n\t"
|
|
|
|
: : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
|
|
|
|
panic("do_trap: can't hit this");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-02-18 02:20:12 +08:00
|
|
|
e = search_exception_tables(regs->ip);
|
|
|
|
if (!e)
|
|
|
|
return 0;
|
2008-01-30 20:31:41 +08:00
|
|
|
|
2016-02-18 02:20:12 +08:00
|
|
|
handler = ex_fixup_handler(e);
|
2018-08-29 04:14:19 +08:00
|
|
|
return handler(e, regs, trapnr, error_code, fault_addr);
|
2008-01-30 20:31:41 +08:00
|
|
|
}
|
2012-04-20 06:24:20 +08:00
|
|
|
|
2016-04-02 22:01:34 +08:00
|
|
|
extern unsigned int early_recursion_flag;
|
|
|
|
|
2012-04-20 06:24:20 +08:00
|
|
|
/* Restricted version used during very early boot */
|
2016-04-02 22:01:34 +08:00
|
|
|
void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
|
2012-04-20 06:24:20 +08:00
|
|
|
{
|
2016-04-02 22:01:33 +08:00
|
|
|
/* Ignore early NMIs. */
|
|
|
|
if (trapnr == X86_TRAP_NMI)
|
2016-04-02 22:01:34 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (early_recursion_flag > 2)
|
|
|
|
goto halt_loop;
|
|
|
|
|
2016-11-20 10:42:40 +08:00
|
|
|
/*
|
|
|
|
* Old CPUs leave the high bits of CS on the stack
|
|
|
|
* undefined. I'm not sure which CPUs do this, but at least
|
|
|
|
* the 486 DX works this way.
|
2017-11-24 16:42:21 +08:00
|
|
|
* Xen pv domains are not using the default __KERNEL_CS.
|
2016-11-20 10:42:40 +08:00
|
|
|
*/
|
2017-11-24 16:42:21 +08:00
|
|
|
if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
|
2016-04-02 22:01:34 +08:00
|
|
|
goto fail;
|
2016-04-02 22:01:33 +08:00
|
|
|
|
2016-04-04 23:46:22 +08:00
|
|
|
/*
|
|
|
|
* The full exception fixup machinery is available as soon as
|
|
|
|
* the early IDT is loaded. This means that it is the
|
|
|
|
* responsibility of extable users to either function correctly
|
|
|
|
* when handlers are invoked early or to simply avoid causing
|
|
|
|
* exceptions before they're ready to handle them.
|
|
|
|
*
|
|
|
|
* This is better than filtering which handlers can be used,
|
|
|
|
* because refusing to call a handler here is guaranteed to
|
|
|
|
* result in a hard-to-debug panic.
|
|
|
|
*
|
|
|
|
* Keep in mind that not all vectors actually get here. Early
|
2018-08-29 04:14:19 +08:00
|
|
|
* page faults, for example, are special.
|
2016-04-04 23:46:22 +08:00
|
|
|
*/
|
2018-08-29 04:14:19 +08:00
|
|
|
if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
|
2016-04-02 22:01:35 +08:00
|
|
|
return;
|
2016-04-02 22:01:34 +08:00
|
|
|
|
2017-06-12 19:52:46 +08:00
|
|
|
if (fixup_bug(regs, trapnr))
|
|
|
|
return;
|
|
|
|
|
2016-04-02 22:01:34 +08:00
|
|
|
fail:
|
|
|
|
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
|
|
|
|
(unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
|
|
|
|
regs->orig_ax, read_cr2());
|
|
|
|
|
|
|
|
show_regs(regs);
|
|
|
|
|
|
|
|
halt_loop:
|
|
|
|
while (true)
|
|
|
|
halt();
|
2012-04-20 06:24:20 +08:00
|
|
|
}
|