@@ -227,8 +227,8 @@ static inline unsigned long kaslr_offset(void)
#define __untagged_addr(addr) \
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
-#define untagged_addr(addr) ({ \
- u64 __addr = (__force u64)(addr); \
+#define untagged_addr(mm, addr) ({ \
+ u64 __addr = (__force u64)(addr); \
__addr &= __untagged_addr(__addr); \
(__force __typeof__(addr))__addr; \
})
@@ -18,7 +18,7 @@ static inline void __user *arch_untagged_si_addr(void __user *addr,
if (sig == SIGTRAP && si_code == TRAP_BRKPT)
return addr;
- return untagged_addr(addr);
+ return untagged_addr(current->mm, addr);
}
#define arch_untagged_si_addr arch_untagged_si_addr
@@ -44,7 +44,7 @@ static inline int access_ok(const void __user *addr, unsigned long size)
*/
if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
(current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
- addr = untagged_addr(addr);
+ addr = untagged_addr(current->mm, addr);
return likely(__access_ok(addr, size));
}
@@ -715,7 +715,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
u64 wp_low, wp_high;
u32 lens, lene;
- addr = untagged_addr(addr);
+ addr = untagged_addr(current->mm, addr);
lens = __ffs(ctrl->len);
lene = __fls(ctrl->len);
@@ -477,7 +477,7 @@ void arm64_notify_segfault(unsigned long addr)
int code;
mmap_read_lock(current->mm);
- if (find_vma(current->mm, untagged_addr(addr)) == NULL)
+ if (find_vma(current->mm, untagged_addr(current->mm, addr)) == NULL)
code = SEGV_MAPERR;
else
code = SEGV_ACCERR;
@@ -551,7 +551,7 @@ static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs)
int ret = 0;
tagged_address = pt_regs_read_reg(regs, rt);
- address = untagged_addr(tagged_address);
+ address = untagged_addr(current->mm, tagged_address);
switch (crm) {
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
@@ -454,7 +454,7 @@ static void set_thread_esr(unsigned long address, unsigned long esr)
static void do_bad_area(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
- unsigned long addr = untagged_addr(far);
+ unsigned long addr = untagged_addr(current->mm, far);
/*
* If we are in kernel mode at this point, we have no context to
@@ -524,7 +524,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
vm_fault_t fault;
unsigned long vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
- unsigned long addr = untagged_addr(far);
+ unsigned long addr = untagged_addr(mm, far);
if (kprobe_page_fault(regs, esr))
return 0;
@@ -679,7 +679,7 @@ static int __kprobes do_translation_fault(unsigned long far,
unsigned long esr,
struct pt_regs *regs)
{
- unsigned long addr = untagged_addr(far);
+ unsigned long addr = untagged_addr(current->mm, far);
if (is_ttbr0_addr(addr))
return do_page_fault(far, esr, regs);
@@ -726,7 +726,7 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
* UNKNOWN for synchronous external aborts. Mask them out now
* so that userspace doesn't see them.
*/
- siaddr = untagged_addr(far);
+ siaddr = untagged_addr(current->mm, far);
}
arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
@@ -816,7 +816,7 @@ static const struct fault_info fault_info[] = {
void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_fault_info(esr);
- unsigned long addr = untagged_addr(far);
+ unsigned long addr = untagged_addr(current->mm, far);
if (!inf->fn(far, esr, regs))
return;
@@ -1052,7 +1052,7 @@ static inline unsigned long __untagged_addr(unsigned long start)
return start;
}
-#define untagged_addr(addr) \
+#define untagged_addr(mm, addr) \
((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
static inline bool pte_access_permitted(pte_t pte, bool write)
@@ -8,8 +8,10 @@
#include <linux/compiler.h>
#include <linux/string.h>
+#include <linux/mm_types.h>
#include <asm/asi.h>
#include <asm/spitfire.h>
+#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm-generic/access_ok.h>
@@ -1659,7 +1659,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
if (!offset || !*offset)
return -EINVAL;
- user_addr = untagged_addr(*offset);
+ user_addr = untagged_addr(current->mm, *offset);
} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
bo_type = ttm_bo_type_sg;
@@ -382,7 +382,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
- args->addr = untagged_addr(args->addr);
+ args->addr = untagged_addr(current->mm, args->addr);
if (offset_in_page(args->addr | args->size))
return -EINVAL;
@@ -371,7 +371,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
- args->addr = untagged_addr(args->addr);
+ args->addr = untagged_addr(current->mm, args->addr);
if (offset_in_page(args->addr | args->size))
return -EINVAL;
@@ -379,7 +379,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
* again
*/
if (!ib_access_writable(access_flags)) {
- unsigned long untagged_start = untagged_addr(start);
+ unsigned long untagged_start = untagged_addr(current->mm, start);
struct vm_area_struct *vma;
mmap_read_lock(current->mm);
@@ -47,7 +47,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
nr_frames = vec->nr_allocated;
- start = untagged_addr(start);
+ start = untagged_addr(mm, start);
ret = pin_user_pages_fast(start, nr_frames,
FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
@@ -157,8 +157,8 @@ static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
struct videobuf_buffer *vb)
{
- unsigned long untagged_baddr = untagged_addr(vb->baddr);
struct mm_struct *mm = current->mm;
+ unsigned long untagged_baddr = untagged_addr(mm, vb->baddr);
struct vm_area_struct *vma;
unsigned long prev_pfn, this_pfn;
unsigned long pages_done, user_address;
@@ -794,7 +794,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
* and map to user space
*/
- userptr = untagged_addr(userptr);
+ userptr = untagged_addr(current->mm, userptr);
if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
@@ -262,7 +262,7 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
shm->flags = flags;
shm->ctx = ctx;
shm->id = id;
- addr = untagged_addr(addr);
+ addr = untagged_addr(current->mm, addr);
start = rounddown(addr, PAGE_SIZE);
shm->offset = addr - start;
shm->size = length;
@@ -573,7 +573,7 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
goto done;
}
- vaddr = untagged_addr(vaddr);
+ vaddr = untagged_addr(mm, vaddr);
retry:
vma = vma_lookup(mm, vaddr);
@@ -1685,7 +1685,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
/* watch out for wraparound */
start_vaddr = end_vaddr;
if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
- start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
+ start_vaddr = untagged_addr(mm, svpfn << PAGE_SHIFT);
/* Ensure the address is inside the task */
if (start_vaddr > mm->task_size)
@@ -95,17 +95,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#include <asm/page.h>
#include <asm/processor.h>
-/*
- * Architectures that support memory tagging (assigning tags to memory regions,
- * embedding these tags into addresses that point to these memory regions, and
- * checking that the memory and the pointer tags match on memory accesses)
- * redefine this macro to strip tags from pointers.
- * It's defined as noop for architectures that don't support memory tagging.
- */
-#ifndef untagged_addr
-#define untagged_addr(addr) (addr)
-#endif
-
#ifndef __pa_symbol
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
@@ -10,6 +10,21 @@
#include <asm/uaccess.h>
+/*
+ * Architectures that support memory tagging (assigning tags to memory regions,
+ * embedding these tags into addresses that point to these memory regions, and
+ * checking that the memory and the pointer tags match on memory accesses)
+ * redefine this macro to strip tags from pointers.
+ *
+ * Passing down mm_struct allows to define untagging rules on per-process
+ * basis.
+ *
+ * It's defined as noop for architectures that don't support memory tagging.
+ */
+#ifndef untagged_addr
+#define untagged_addr(mm, addr) (addr)
+#endif
+
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
@@ -121,7 +121,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
return 0;
max_addr = TASK_SIZE_MAX;
- src_addr = (unsigned long)untagged_addr(src);
+ src_addr = (unsigned long)untagged_addr(current->mm, src);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
@@ -97,7 +97,7 @@ long strnlen_user(const char __user *str, long count)
return 0;
max_addr = TASK_SIZE_MAX;
- src_addr = (unsigned long)untagged_addr(str);
+ src_addr = (unsigned long)untagged_addr(current->mm, str);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
@@ -1168,7 +1168,7 @@ static long __get_user_pages(struct mm_struct *mm,
if (!nr_pages)
return 0;
- start = untagged_addr(start);
+ start = untagged_addr(mm, start);
VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
@@ -1342,7 +1342,7 @@ int fixup_user_fault(struct mm_struct *mm,
struct vm_area_struct *vma;
vm_fault_t ret;
- address = untagged_addr(address);
+ address = untagged_addr(mm, address);
if (unlocked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -3027,7 +3027,7 @@ static int internal_get_user_pages_fast(unsigned long start,
if (!(gup_flags & FOLL_FAST_ONLY))
might_lock_read(¤t->mm->mmap_lock);
- start = untagged_addr(start) & PAGE_MASK;
+ start = untagged_addr(current->mm, start) & PAGE_MASK;
len = nr_pages << PAGE_SHIFT;
if (check_add_overflow(start, len, &end))
return 0;
@@ -1382,7 +1382,7 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
size_t len;
struct blk_plug plug;
- start = untagged_addr(start);
+ start = untagged_addr(mm, start);
if (!madvise_behavior_valid(behavior))
return -EINVAL;
@@ -1467,7 +1467,7 @@ static long kernel_mbind(unsigned long start, unsigned long len,
int lmode = mode;
int err;
- start = untagged_addr(start);
+ start = untagged_addr(current->mm, start);
err = sanitize_mpol_flags(&lmode, &mode_flags);
if (err)
return err;
@@ -1491,7 +1491,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
int err = -ENOENT;
VMA_ITERATOR(vmi, mm, start);
- start = untagged_addr(start);
+ start = untagged_addr(mm, start);
if (start & ~PAGE_MASK)
return -EINVAL;
/*
@@ -1692,7 +1692,7 @@ static int kernel_get_mempolicy(int __user *policy,
if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
- addr = untagged_addr(addr);
+ addr = untagged_addr(current->mm, addr);
err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1795,7 +1795,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
goto out_flush;
if (get_user(node, nodes + i))
goto out_flush;
- addr = (unsigned long)untagged_addr(p);
+ addr = (unsigned long)untagged_addr(mm, p);
err = -ENODEV;
if (node < 0 || node >= MAX_NUMNODES)
@@ -236,7 +236,7 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
unsigned long pages;
unsigned char *tmp;
- start = untagged_addr(start);
+ start = untagged_addr(current->mm, start);
/* Check the start address: needs to be page-aligned.. */
if (start & ~PAGE_MASK)
@@ -570,7 +570,7 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
unsigned long lock_limit;
int error = -ENOMEM;
- start = untagged_addr(start);
+ start = untagged_addr(current->mm, start);
if (!can_do_mlock())
return -EPERM;
@@ -633,7 +633,7 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
{
int ret;
- start = untagged_addr(start);
+ start = untagged_addr(current->mm, start);
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
@@ -2796,7 +2796,7 @@ EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
- addr = untagged_addr(addr);
+ addr = untagged_addr(current->mm, addr);
return __vm_munmap(addr, len, true);
}
@@ -680,7 +680,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
struct mmu_gather tlb;
MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
- start = untagged_addr(start);
+ start = untagged_addr(current->mm, start);
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
@@ -909,7 +909,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
*
* See Documentation/arm64/tagged-address-abi.rst for more information.
*/
- addr = untagged_addr(addr);
+ addr = untagged_addr(mm, addr);
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
return ret;
@@ -37,7 +37,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
int unmapped_error = 0;
int error = -EINVAL;
- start = untagged_addr(start);
+ start = untagged_addr(mm, start);
if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
goto out;
@@ -1945,7 +1945,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
return -EINVAL;
/* We can read the guest memory with __xxx_user() later on. */
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
- (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
+ (mem->userspace_addr != untagged_addr(kvm->mm, mem->userspace_addr)) ||
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;