Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cve 2017 5754 #2048

Merged
merged 13 commits into from
Jan 10, 2018
Merged
6 changes: 6 additions & 0 deletions core/arch/arm/arm.mk
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,12 @@ ifeq ($(CFG_CORE_LARGE_PHYS_ADDR),y)
$(call force,CFG_WITH_LPAE,y)
endif

# Unmaps all kernel mode code except the code needed to take exceptions
# from user space and restore kernel mode mapping again. This gives more
# strict control over what is accessible while in user mode.
# Addresses CVE-2017-5715 (aka Meltdown) known to affect Arm Cortex-A75
CFG_CORE_UNMAP_CORE_AT_EL0 ?= y

ifeq ($(CFG_ARM32_core),y)
# Configration directive related to ARMv7 optee boot arguments.
# CFG_PAGEABLE_ADDR: if defined, forces pageable data physical address.
Expand Down
7 changes: 7 additions & 0 deletions core/arch/arm/include/kernel/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,13 @@ void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
vaddr_t thread_get_saved_thread_sp(void);
#endif /*ARM64*/

/*
* Provides addresses and size of kernel code that must be mapped while in
* user mode.
*/
void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
vaddr_t *va, size_t *sz);

/*
* Returns the start address (bottom) of the stack for the current thread,
* zero if there is no current thread.
Expand Down
1 change: 0 additions & 1 deletion core/arch/arm/include/kernel/user_ta.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ struct user_ta_ctx {
struct mobj *mobj_code; /* secure world memory */
struct mobj *mobj_stack; /* stack */
uint32_t load_addr; /* elf load addr (from TAs address space) */
uint32_t context; /* Context ID of the process */
struct tee_mmu_info *mmu; /* Saved MMU information (ddr only) */
void *ta_time_offs; /* Time reference used by the TA */
struct tee_pager_area_head *areas;
Expand Down
19 changes: 18 additions & 1 deletion core/arch/arm/include/mm/core_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,16 @@
#ifndef CORE_MMU_H
#define CORE_MMU_H

#ifndef ASM
#include <assert.h>
#include <compiler.h>
#include <kernel/user_ta.h>
#include <mm/tee_mmu_types.h>
#include <platform_config.h>
#include <types_ext.h>
#include <util.h>
#endif

#include <platform_config.h>

/* A small page is the smallest unit of memory that can be mapped */
#define SMALL_PAGE_SHIFT 12
Expand Down Expand Up @@ -74,6 +77,17 @@
#define CFG_TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE
#endif

/*
* CORE_MMU_L1_TBL_OFFSET is used when switching to/from reduced kernel
* mapping. The actual value depends on internals in core_mmu_lpae.c and
* core_mmu_v7.c which we rather not expose here. There's a compile time
* assertion to check that these magic numbers are correct.
*/
#ifdef CFG_WITH_LPAE
#define CORE_MMU_L1_TBL_OFFSET (CFG_TEE_CORE_NB_CORE * 4 * 8)
#else
#define CORE_MMU_L1_TBL_OFFSET (4096 * 4)
#endif
/*
* TEE_RAM_VA_START: The start virtual address of the TEE RAM
* TEE_TEXT_VA_START: The start virtual address of the OP-TEE text
Expand All @@ -90,6 +104,7 @@
#define STACK_ALIGNMENT (sizeof(long) * 2)
#endif

#ifndef ASM
/*
* Memory area type:
* MEM_AREA_END: Reserved, marks the end of a table of mapping areas.
Expand Down Expand Up @@ -556,4 +571,6 @@ void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
struct mobj **core_sdp_mem_create_mobjs(void);
#endif

#endif /*ASM*/

#endif /* CORE_MMU_H */
1 change: 1 addition & 0 deletions core/arch/arm/include/mm/mobj.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ struct mobj_ops {

extern struct mobj mobj_virt;
extern struct mobj *mobj_sec_ddr;
extern struct mobj *mobj_tee_ram;

static inline void *mobj_get_va(struct mobj *mobj, size_t offset)
{
Expand Down
1 change: 1 addition & 0 deletions core/arch/arm/kernel/asm-defines.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ DEFINES
DEFINE(THREAD_SMC_ARGS_SIZE, sizeof(struct thread_smc_args));

DEFINE(THREAD_SVC_REG_X0, offsetof(struct thread_svc_regs, x0));
DEFINE(THREAD_SVC_REG_X2, offsetof(struct thread_svc_regs, x2));
DEFINE(THREAD_SVC_REG_X5, offsetof(struct thread_svc_regs, x5));
DEFINE(THREAD_SVC_REG_X6, offsetof(struct thread_svc_regs, x6));
DEFINE(THREAD_SVC_REG_X30, offsetof(struct thread_svc_regs, x30));
Expand Down
37 changes: 37 additions & 0 deletions core/arch/arm/kernel/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,11 @@ thread_pm_handler_t thread_cpu_resume_handler_ptr;
thread_pm_handler_t thread_system_off_handler_ptr;
thread_pm_handler_t thread_system_reset_handler_ptr;

#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
static vaddr_t thread_user_kcode_va;
long thread_user_kcode_offset;
static size_t thread_user_kcode_size;
#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/

static unsigned int thread_global_lock = SPINLOCK_UNLOCK;
static bool thread_prealloc_rpc_cache;
Expand Down Expand Up @@ -883,6 +888,25 @@ static void init_thread_stacks(void)
}
#endif /*CFG_WITH_PAGER*/

static void init_user_kcode(void)
{
#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
vaddr_t v;

v = (vaddr_t)thread_vect_table;
thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE);
/*
* The maximum size of the exception vector and associated code is
* something slightly larger than 2 KiB. Worst case the exception
* vector can span two pages.
*/
thread_user_kcode_size = CORE_MMU_USER_CODE_SIZE * 2;

core_mmu_get_user_va_range(&v, NULL);
thread_user_kcode_offset = thread_user_kcode_va - v;
#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
}

void thread_init_primary(const struct thread_handlers *handlers)
{
init_handlers(handlers);
Expand All @@ -892,6 +916,8 @@ void thread_init_primary(const struct thread_handlers *handlers)

init_thread_stacks();
pgt_init();

init_user_kcode();
}

static void init_sec_mon(size_t pos __maybe_unused)
Expand Down Expand Up @@ -1144,6 +1170,17 @@ uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
spsr, exit_status0, exit_status1);
}

#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
vaddr_t *va, size_t *sz)
{
core_mmu_get_user_va_range(va, NULL);
*mobj = mobj_tee_ram;
*offset = thread_user_kcode_va - CFG_TEE_RAM_START;
*sz = thread_user_kcode_size;
}
#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/

void thread_add_mutex(struct mutex *m)
{
struct thread_core_local *l = thread_get_core_local();
Expand Down
Loading