Skip to content

Commit

Permalink
x86/uaccess: Provide untagged_addr() and remove tags before address c…
Browse files Browse the repository at this point in the history
…heck

untagged_addr() is a helper used by the core-mm to strip tag bits and
get the address to the canonical shape based on rules of the current
thread. It only handles userspace addresses.

The untagging mask is stored in per-CPU variable and set on context
switching to the task.

The tags must not be included into check whether it's okay to access the
userspace address. Strip tags in access_ok().

Signed-off-by: Kirill A. Shutemov <[email protected]>
Signed-off-by: Dave Hansen <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Tested-by: Alexander Potapenko <[email protected]>
Link: https://lore.kernel.org/all/20230312112612.31869-7-kirill.shutemov%40linux.intel.com
  • Loading branch information
kiryl authored and hansendc committed Mar 16, 2023
1 parent 428e106 commit 74c228d
Show file tree
Hide file tree
Showing 6 changed files with 69 additions and 2 deletions.
3 changes: 3 additions & 0 deletions arch/x86/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ typedef struct {
#ifdef CONFIG_ADDRESS_MASKING
/* Active LAM mode: X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
unsigned long lam_cr3_mask;

/* Significant bits of the virtual address. Excludes tag bits. */
u64 untag_mask;
#endif

struct mutex lock;
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,12 @@ static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
{
mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
mm->context.untag_mask = oldmm->context.untag_mask;
}

static inline void mm_reset_untag_mask(struct mm_struct *mm)
{
mm->context.untag_mask = -1UL;
}

#else
Expand All @@ -113,6 +119,10 @@ static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
{
}

static inline void mm_reset_untag_mask(struct mm_struct *mm)
{
}
#endif

#define enter_lazy_tlb enter_lazy_tlb
Expand All @@ -139,6 +149,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1;
}
#endif
mm_reset_untag_mask(mm);
init_new_context_ldt(mm);
return 0;
}
Expand Down
10 changes: 10 additions & 0 deletions arch/x86/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,15 @@ static inline void cr4_clear_bits(unsigned long mask)
local_irq_restore(flags);
}

#ifdef CONFIG_ADDRESS_MASKING
DECLARE_PER_CPU(u64, tlbstate_untag_mask);

static inline u64 current_untag_mask(void)
{
return this_cpu_read(tlbstate_untag_mask);
}
#endif

#ifndef MODULE
/*
* 6 because 6 should be plenty and struct tlb_state will fit in two cache
Expand Down Expand Up @@ -380,6 +389,7 @@ static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
{
this_cpu_write(cpu_tlbstate.lam,
mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask);
}

#else
Expand Down
39 changes: 37 additions & 2 deletions arch/x86/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@
#include <linux/compiler.h>
#include <linux/instrumented.h>
#include <linux/kasan-checks.h>
#include <linux/mm_types.h>
#include <linux/string.h>
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/smap.h>
#include <asm/extable.h>
#include <asm/tlbflush.h>

#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline bool pagefault_disabled(void);
Expand All @@ -21,6 +23,39 @@ static inline bool pagefault_disabled(void);
# define WARN_ON_IN_IRQ()
#endif

#ifdef CONFIG_ADDRESS_MASKING
/*
* Mask out tag bits from the address.
*
* Magic with the 'sign' allows to untag userspace pointer without any branches
* while leaving kernel addresses intact.
*/
static inline unsigned long __untagged_addr(unsigned long addr,
unsigned long mask)
{
long sign = addr >> 63;

addr &= mask | sign;
return addr;
}

#define untagged_addr(addr) ({ \
u64 __addr = (__force u64)(addr); \
__addr = __untagged_addr(__addr, current_untag_mask()); \
(__force __typeof__(addr))__addr; \
})

#define untagged_addr_remote(mm, addr) ({ \
u64 __addr = (__force u64)(addr); \
mmap_assert_locked(mm); \
__addr = __untagged_addr(__addr, (mm)->context.untag_mask); \
(__force __typeof__(addr))__addr; \
})

#else
#define untagged_addr(addr) (addr)
#endif

/**
* access_ok - Checks if a user space pointer is valid
* @addr: User space pointer to start of block to check
Expand All @@ -38,10 +73,10 @@ static inline bool pagefault_disabled(void);
* Return: true (nonzero) if the memory block may be valid, false (zero)
* if it is definitely invalid.
*/
#define access_ok(addr, size) \
#define access_ok(addr, size) \
({ \
WARN_ON_IN_IRQ(); \
likely(__access_ok(addr, size)); \
likely(__access_ok(untagged_addr(addr), size)); \
})

#include <asm-generic/access_ok.h>
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
#include <asm/frame.h>
#include <asm/unwind.h>
#include <asm/tdx.h>
#include <asm/mmu_context.h>

#include "process.h"

Expand Down Expand Up @@ -368,6 +369,8 @@ void arch_setup_new_exec(void)
task_clear_spec_ssb_noexec(current);
speculation_ctrl_update(read_thread_flags());
}

mm_reset_untag_mask(current->mm);
}

#ifdef CONFIG_X86_IOPL_IOPERM
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -1048,6 +1048,11 @@ __visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
};

#ifdef CONFIG_ADDRESS_MASKING
DEFINE_PER_CPU(u64, tlbstate_untag_mask);
EXPORT_PER_CPU_SYMBOL(tlbstate_untag_mask);
#endif

void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
{
/* entry 0 MUST be WB (hardwired to speed up translations) */
Expand Down

0 comments on commit 74c228d

Please sign in to comment.