Skip to content

Commit

Permalink
arm64: dump: Add checking for writable and exectuable pages
Browse files Browse the repository at this point in the history
Page mappings with full RWX permissions are a security risk. x86
has an option to walk the page tables and dump any bad pages.
(See e1a5832 ("x86/mm: Warn on W^X mappings")). Add a similar
implementation for arm64.

Reviewed-by: Kees Cook <[email protected]>
Reviewed-by: Mark Rutland <[email protected]>
Tested-by: Mark Rutland <[email protected]>
Signed-off-by: Laura Abbott <[email protected]>
Reviewed-by: Ard Biesheuvel <[email protected]>
[[email protected]: folded fix for KASan out of bounds from Mark Rutland]
Signed-off-by: Catalin Marinas <[email protected]>
  • Loading branch information
labbott authored and ctmarinas committed Nov 7, 2016
1 parent cfd69e9 commit 1404d6f
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 0 deletions.
29 changes: 29 additions & 0 deletions arch/arm64/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,35 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
of TEXT_OFFSET and platforms must not require a specific
value.

config DEBUG_WX
bool "Warn on W+X mappings at boot"
select ARM64_PTDUMP_CORE
---help---
Generate a warning if any W+X mappings are found at boot.

This is useful for discovering cases where the kernel is leaving
W+X mappings after applying NX, as such mappings are a security risk.
This check also includes UXN, which should be set on all kernel
mappings.

Look for a message in dmesg output like this:

arm64/mm: Checked W+X mappings: passed, no W+X pages found.

or like this, if the check failed:

arm64/mm: Checked W+X mappings: FAILED, <N> W+X pages found.

Note that even if the check fails, your kernel is possibly
still fine, as W+X mappings are not a security hole in
themselves, what they do is that they make the exploitation
of other unfixed kernel bugs easier.

There is no runtime or memory usage effect of this option
once the kernel has booted up - it's a one time check.

If in doubt, say "Y".

config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES
Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/include/asm/ptdump.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,5 +42,13 @@ static inline int ptdump_debugfs_register(struct ptdump_info *info,
return 0;
}
#endif
void ptdump_check_wx(void);
#endif /* CONFIG_ARM64_PTDUMP_CORE */

#ifdef CONFIG_DEBUG_WX
#define debug_checkwx() ptdump_check_wx()
#else
#define debug_checkwx() do { } while (0)
#endif

#endif /* __ASM_PTDUMP_H */
54 changes: 54 additions & 0 deletions arch/arm64/mm/dump.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,9 @@ struct pg_state {
unsigned long start_address;
unsigned level;
u64 current_prot;
bool check_wx;
unsigned long wx_pages;
unsigned long uxn_pages;
};

struct prot_bits {
Expand Down Expand Up @@ -202,6 +205,35 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
}
}

static void note_prot_uxn(struct pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;

if ((st->current_prot & PTE_UXN) == PTE_UXN)
return;

WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
(void *)st->start_address, (void *)st->start_address);

st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
}

static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;
if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
return;
if ((st->current_prot & PTE_PXN) == PTE_PXN)
return;

WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
(void *)st->start_address, (void *)st->start_address);

st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}

static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
u64 val)
{
Expand All @@ -219,6 +251,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
unsigned long delta;

if (st->current_prot) {
note_prot_uxn(st, addr);
note_prot_wx(st, addr);
pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
st->start_address, addr);

Expand Down Expand Up @@ -344,6 +378,26 @@ static struct ptdump_info kernel_ptdump_info = {
.base_addr = VA_START,
};

void ptdump_check_wx(void)
{
struct pg_state st = {
.seq = NULL,
.marker = (struct addr_marker[]) {
{ 0, NULL},
{ -1, NULL},
},
.check_wx = true,
};

walk_pgd(&st, &init_mm, 0);
note_page(&st, 0, 0, 0);
if (st.wx_pages || st.uxn_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
st.wx_pages, st.uxn_pages);
else
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}

static int ptdump_init(void)
{
ptdump_initialize();
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#include <asm/tlb.h>
#include <asm/memblock.h>
#include <asm/mmu_context.h>
#include <asm/ptdump.h>

u64 idmap_t0sz = TCR_T0SZ(VA_BITS);

Expand Down Expand Up @@ -438,6 +439,8 @@ void mark_rodata_ro(void)

/* flush the TLBs after updating live kernel mappings */
flush_tlb_all();

debug_checkwx();
}

static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
Expand Down

0 comments on commit 1404d6f

Please sign in to comment.