Skip to content

Commit

Permalink
powerpc: Implement UACCESS validation
Browse files Browse the repository at this point in the history
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
  • Loading branch information
chleroy committed Jun 21, 2023
1 parent ab4a08a commit 2686bde
Show file tree
Hide file tree
Showing 9 changed files with 165 additions and 24 deletions.
2 changes: 2 additions & 0 deletions arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ config PPC
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_OBJTOOL_SKIP_ASM
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_SPLIT_ARG64 if PPC32
Expand Down Expand Up @@ -258,6 +259,7 @@ config PPC
select HAVE_OPTPROBES
select HAVE_OBJTOOL if PPC32 || MPROFILE_KERNEL
select HAVE_OBJTOOL_MCOUNT if HAVE_OBJTOOL
select HAVE_UACCESS_VALIDATION if HAVE_OBJTOOL
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
Expand Down
30 changes: 22 additions & 8 deletions arch/powerpc/include/asm/book3s/64/kup.h
Original file line number Diff line number Diff line change
Expand Up @@ -322,20 +322,34 @@ static __always_inline unsigned long get_kuap(void)
return mfspr(SPRN_AMR);
}

static __always_inline void set_kuap(unsigned long value)
static __always_inline void set_kuap(unsigned long value, bool begin)
{
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
return;

BUILD_BUG_ON(!__builtin_constant_p(begin));
/*
* ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
* before and after the move to AMR. See table 6 on page 1134.
*/
isync();
mtspr(SPRN_AMR, value);
if (begin)
mtspr_uaccess_begin(SPRN_AMR, value);
else
mtspr_uaccess_end(SPRN_AMR, value);
isync();
}

static __always_inline void set_kuap_begin(unsigned long value)
{
set_kuap(value, true);
}

static __always_inline void set_kuap_end(unsigned long value)
{
set_kuap(value, false);
}

static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
/*
Expand Down Expand Up @@ -368,11 +382,11 @@ static __always_inline void allow_user_access(void __user *to, const void __user
thread_amr = current_thread_amr();

if (dir == KUAP_READ)
set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
set_kuap_begin(thread_amr | AMR_KUAP_BLOCK_WRITE);
else if (dir == KUAP_WRITE)
set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
set_kuap_begin(thread_amr | AMR_KUAP_BLOCK_READ);
else if (dir == KUAP_READ_WRITE)
set_kuap(thread_amr);
set_kuap_begin(thread_amr);
else
BUILD_BUG();
}
Expand All @@ -394,7 +408,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user

static __always_inline void prevent_user_access(unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
set_kuap_end(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
do_uaccess_flush();
}
Expand All @@ -403,7 +417,7 @@ static __always_inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = get_kuap();

set_kuap(AMR_KUAP_BLOCKED);
set_kuap_end(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
do_uaccess_flush();

Expand All @@ -412,7 +426,7 @@ static __always_inline unsigned long prevent_user_access_return(void)

static __always_inline void restore_user_access(unsigned long flags)
{
set_kuap(flags);
set_kuap_begin(flags);
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
do_uaccess_flush();
}
Expand Down
12 changes: 12 additions & 0 deletions arch/powerpc/include/asm/kup.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
#include <linux/types.h>

static __always_inline bool kuap_is_disabled(void);
static __always_inline void mtspr_uaccess_begin(int rn, unsigned long val);
static __always_inline void mtspr_uaccess_end(int rn, unsigned long val);
#endif

#ifdef CONFIG_PPC_BOOK3S_64
Expand Down Expand Up @@ -224,6 +226,16 @@ static __always_inline void prevent_current_write_to_user(void)
prevent_user_access(KUAP_WRITE);
}

static __always_inline void mtspr_uaccess_begin(int rn, unsigned long val)
{
asm(ASM_UACCESS_BEGIN "mtspr %0, %1\n\t" : : "i"(rn), "r"(val) : "memory");
}

static __always_inline void mtspr_uaccess_end(int rn, unsigned long val)
{
asm(ASM_UACCESS_END "mtspr %0, %1\n\t" : : "i"(rn), "r"(val) : "memory");
}

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_POWERPC_KUAP_H_ */
8 changes: 4 additions & 4 deletions arch/powerpc/include/asm/nohash/32/kup-8xx.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,12 @@ static __always_inline unsigned long __kuap_get_and_assert_locked(void)
static __always_inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
mtspr(SPRN_MD_AP, MD_APG_INIT);
mtspr_uaccess_begin(SPRN_MD_AP, MD_APG_INIT);
}

static __always_inline void __prevent_user_access(unsigned long dir)
{
mtspr(SPRN_MD_AP, MD_APG_KUAP);
mtspr_uaccess_end(SPRN_MD_AP, MD_APG_KUAP);
}

static __always_inline unsigned long __prevent_user_access_return(void)
Expand All @@ -56,14 +56,14 @@ static __always_inline unsigned long __prevent_user_access_return(void)

flags = mfspr(SPRN_MD_AP);

mtspr(SPRN_MD_AP, MD_APG_KUAP);
mtspr_uaccess_end(SPRN_MD_AP, MD_APG_KUAP);

return flags;
}

static __always_inline void __restore_user_access(unsigned long flags)
{
mtspr(SPRN_MD_AP, flags);
mtspr_uaccess_begin(SPRN_MD_AP, flags);
}

static __always_inline bool
Expand Down
8 changes: 4 additions & 4 deletions arch/powerpc/include/asm/nohash/kup-booke.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,21 +64,21 @@ static __always_inline unsigned long __kuap_get_and_assert_locked(void)
static __always_inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
mtspr(SPRN_PID, current->thread.pid);
mtspr_uaccess_begin(SPRN_PID, current->thread.pid);
isync();
}

static __always_inline void __prevent_user_access(unsigned long dir)
{
mtspr(SPRN_PID, 0);
mtspr_uaccess_end(SPRN_PID, 0);
isync();
}

static __always_inline unsigned long __prevent_user_access_return(void)
{
unsigned long flags = mfspr(SPRN_PID);

mtspr(SPRN_PID, 0);
mtspr_uaccess_end(SPRN_PID, 0);
isync();

return flags;
Expand All @@ -87,7 +87,7 @@ static __always_inline unsigned long __prevent_user_access_return(void)
static __always_inline void __restore_user_access(unsigned long flags)
{
if (flags) {
mtspr(SPRN_PID, current->thread.pid);
mtspr_uaccess_begin(SPRN_PID, current->thread.pid);
isync();
}
}
Expand Down
1 change: 0 additions & 1 deletion arch/powerpc/platforms/Kconfig.cputype
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,6 @@ config PPC_KUAP
config PPC_KUAP_BOOTTIME
bool "Allow disabling Kernel Userspace Access Protection at boottime"
depends on PPC_KUAP
default y
help
Allow the user to disable Kernel Userspace Access Protection (KUAP)
at boot time using 'nosmap' kernel parameter.
Expand Down
81 changes: 76 additions & 5 deletions tools/objtool/arch/powerpc/decode.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,24 +43,93 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
unsigned long offset, unsigned int maxlen,
struct instruction *insn)
{
unsigned int opcode;
unsigned int opcode, xop;
unsigned int rs, ra, rb, bo, bi, to, uimm, simm, lk, aa;
enum insn_type typ;
unsigned long imm;
u32 ins;

if (file->elf->ehdr.e_flags & EF_PPC_RELOCATABLE_LIB) {
struct reloc *reloc;

reloc = find_reloc_by_dest_range(file->elf, insn->sec, insn->offset, 4);

if (reloc && !strncmp(reloc->sym->sec->name, ".got2", 5)) {
insn->type = INSN_OTHER;
insn->ignore = true;
insn->len = 4;

return 0;
}
}

ins = bswap_if_needed(file->elf, *(u32 *)(sec->data->d_buf + offset));
opcode = ins >> 26;
typ = INSN_OTHER;
imm = 0;
xop = (ins >> 1) & 0x3ff;
rs = bo = to = (ins >> 21) & 0x1f;
ra = bi = (ins >> 16) & 0x1f;
rb = (ins >> 11) & 0x1f;
uimm = simm = (ins >> 0) & 0xffff;
aa = ins & 2;
lk = ins & 1;

switch (opcode) {
case 3:
if (to == 31 && ra == 0 && simm == 0) /* twi 31, r0, 0 */
typ = INSN_BUG;
else
typ = INSN_OTHER;
break;
case 16: /* bc[l][a] */
if (lk) /* bcl[a] */
typ = INSN_OTHER;
else /* bc[a] */
typ = INSN_JUMP_CONDITIONAL;

imm = ins & 0xfffc;
if (imm & 0x8000)
imm -= 0x10000;
insn->immediate = imm | aa;
break;
case 18: /* b[l][a] */
if ((ins & 3) == 1) /* bl */
if (lk) /* bl[a] */
typ = INSN_CALL;
else /* b[a] */
typ = INSN_JUMP_UNCONDITIONAL;

imm = ins & 0x3fffffc;
if (imm & 0x2000000)
imm -= 0x4000000;
insn->immediate = imm | aa;
break;
case 19:
if (xop == 16 && bo == 20 && bi == 0) /* blr */
typ = INSN_RETURN;
else if (xop == 16) /* bclr */
typ = INSN_RETURN_CONDITIONAL;
else if (xop == 50) /* rfi */
typ = INSN_JUMP_DYNAMIC;
else if (xop == 528 && bo == 20 && bi ==0 && !lk) /* bctr */
typ = INSN_JUMP_DYNAMIC;
else if (xop == 528 && bo == 20 && bi ==0 && lk) /* bctrl */
typ = INSN_CALL_DYNAMIC;
else
typ = INSN_OTHER;
break;
case 24:
if (rs == 0 && ra == 0 && uimm == 0)
typ = INSN_NOP;
else
typ = INSN_OTHER;
break;
case 31:
if (xop == 4 && to == 31 && ra == 0 && rb == 0) /* trap */
typ = INSN_BUG;
else
typ = INSN_OTHER;
break;
default:
typ = INSN_OTHER;
break;
}

Expand All @@ -70,13 +139,15 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
insn->len = 4;

insn->type = typ;
insn->immediate = imm;

return 0;
}

unsigned long arch_jump_destination(struct instruction *insn)
{
if (insn->immediate & 2)
return insn->immediate & ~2;

return insn->offset + insn->immediate;
}

Expand Down
44 changes: 42 additions & 2 deletions tools/objtool/arch/powerpc/special.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,47 @@ bool arch_support_alt_relocation(struct special_alt *special_alt,
}

struct reloc *arch_find_switch_table(struct objtool_file *file,
struct instruction *insn)
struct instruction *insn, bool *is_rel)
{
exit(-1);
struct reloc *text_reloc, *rodata_reloc;
struct section *table_sec;
unsigned long table_offset;

/* look for a relocation which references .rodata */
text_reloc = find_reloc_by_dest_range(file->elf, insn->sec,
insn->offset, insn->len);
if (!text_reloc || text_reloc->sym->type != STT_SECTION ||
!text_reloc->sym->sec->rodata)
return NULL;

table_offset = text_reloc->addend;
table_sec = text_reloc->sym->sec;

/*
* Make sure the .rodata address isn't associated with a
* symbol. GCC jump tables are anonymous data.
*
* Also support C jump tables which are in the same format as
* switch jump tables. For objtool to recognize them, they
* need to be placed in the C_JUMP_TABLE_SECTION section. They
* have symbols associated with them.
*/
if (find_symbol_containing(table_sec, table_offset)) {
*is_rel = false;
if (strcmp(table_sec->name, C_JUMP_TABLE_SECTION))
return NULL;
} else {
*is_rel = true;
}

/*
* Each table entry has a rela associated with it. The rela
* should reference text in the same function as the original
* instruction.
*/
rodata_reloc = find_reloc_by_dest(file->elf, table_sec, table_offset);
if (!rodata_reloc)
return NULL;

return rodata_reloc;
}
3 changes: 3 additions & 0 deletions tools/objtool/check.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"x86_64_start_reservations",
"xen_cpu_bringup_again",
"xen_start_kernel",
"longjmp",
};

if (!func)
Expand Down Expand Up @@ -1335,6 +1336,8 @@ static const char *uaccess_safe_builtin[] = {
"rep_stos_alternative",
"rep_movs_alternative",
"__copy_user_nocache",
"__copy_tofrom_user",
"__arch_clear_user",
NULL
};

Expand Down

0 comments on commit 2686bde

Please sign in to comment.