powerpc/8xx: Add Kernel Userspace Access Protection

This patch adds Kernel Userspace Access Protection on the 8xx.

When a page is RO or RW, it is set RO or RW for Key 0 and NA
for Key 1.

Up to now, the User group is defined with Key 0 for both User and
Supervisor.

By changing the group to Key 0 for User and Key 1 for Supervisor,
this patch prevents the Kernel from being able to access user data.

At exception entry, the kernel saves SPRN_MD_AP in the regs struct,
and reapply the protection. At exception exit it restores SPRN_MD_AP
with the value saved on exception entry.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Drop allow_read/write_to/from_user() as they're now in kup.h]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Christophe Leroy 2019-03-11 08:30:34 +00:00 committed by Michael Ellerman
parent 06fbe81b59
commit 2679f9bd0a
5 changed files with 81 additions and 0 deletions

View File

@ -5,6 +5,9 @@
#ifdef CONFIG_PPC64
#include <asm/book3s/64/kup-radix.h>
#endif
#ifdef CONFIG_PPC_8xx
#include <asm/nohash/32/kup-8xx.h>
#endif
#ifdef __ASSEMBLY__
#ifndef CONFIG_PPC_KUAP

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_KUP_8XX_H_
#define _ASM_POWERPC_KUP_8XX_H_
#include <asm/bug.h>
#ifdef CONFIG_PPC_KUAP
#ifdef __ASSEMBLY__
.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */
mfspr \gpr1, SPRN_MD_AP
mtspr SPRN_MD_AP, \gpr2
stw \gpr1, STACK_REGS_KUAP(\sp)
.endm
.macro kuap_restore sp, current, gpr1, gpr2, gpr3
lwz \gpr1, STACK_REGS_KUAP(\sp)
mtspr SPRN_MD_AP, \gpr1
.endm
.macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG
mfspr \gpr, SPRN_MD_AP
rlwinm \gpr, \gpr, 16, 0xffff
999: twnei \gpr, MD_APG_KUAP@h
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif
.endm
#else /* !__ASSEMBLY__ */
#include <asm/reg.h>
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size)
{
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size)
{
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
{
return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
"Bug: fault blocked by AP register !");
}
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_PPC_KUAP */
#endif /* _ASM_POWERPC_KUP_8XX_H_ */

View File

@ -121,6 +121,13 @@
*/
#define MD_APG_INIT 0x4fffffff
/*
* 0 => No user => 01 (all accesses performed according to page definition)
* 1 => User => 10 (all accesses performed according to swaped page definition)
* 2-16 => NA => 11 (all accesses performed as user iaw page definition)
*/
#define MD_APG_KUAP 0x6fffffff
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
* this register are used to create the TLB entry.

View File

@ -225,3 +225,15 @@ void __init setup_kuep(bool disabled)
mtspr(SPRN_MI_AP, MI_APG_KUEP);
}
#endif
#ifdef CONFIG_PPC_KUAP
void __init setup_kuap(bool disabled)
{
pr_info("Activating Kernel Userspace Access Protection\n");
if (disabled)
pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
#endif

View File

@ -35,6 +35,7 @@ config PPC_8xx
select FSL_SOC
select SYS_SUPPORTS_HUGETLBFS
select PPC_HAVE_KUEP
select PPC_HAVE_KUAP
config 40x
bool "AMCC 40x"