@@ -158,7 +158,7 @@ static inline int
read_int(struct task_struct *task, unsigned long addr, int * data)
{
int copied = access_process_vm(task, addr, data, sizeof(int),
- FOLL_FORCE);
+ FOLL_PTRACE);
return (copied == sizeof(int)) ? 0 : -EIO;
}
@@ -166,7 +166,7 @@ static inline int
write_int(struct task_struct *task, unsigned long addr, int data)
{
int copied = access_process_vm(task, addr, &data, sizeof(int),
- FOLL_FORCE | FOLL_WRITE);
+ FOLL_PTRACE | FOLL_WRITE);
return (copied == sizeof(int)) ? 0 : -EIO;
}
@@ -284,7 +284,7 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
copied = ptrace_access_vm(child, addr, &tmp, sizeof(tmp),
- FOLL_FORCE);
+ FOLL_PTRACE);
ret = -EIO;
if (copied != sizeof(tmp))
break;
@@ -525,7 +525,7 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
int ret;
struct iovec kiov;
struct iovec __user *uiov = (void __user *)data;
- unsigned int gup_flags = FOLL_FORCE;
+ unsigned int gup_flags = FOLL_PTRACE;
if (!system_supports_mte())
return -EIO;
@@ -452,7 +452,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
return 0;
}
}
- copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
+ copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_PTRACE);
if (copied != sizeof(ret))
return -EIO;
*val = ret;
@@ -489,7 +489,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
}
}
} else if (access_process_vm(child, addr, &val, sizeof(val),
- FOLL_FORCE | FOLL_WRITE)
+ FOLL_PTRACE | FOLL_WRITE)
!= sizeof(val))
return -EIO;
return 0;
@@ -544,7 +544,7 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
if (ret < 0)
return ret;
if (access_process_vm(child, addr, &val, sizeof(val),
- FOLL_FORCE | FOLL_WRITE)
+ FOLL_PTRACE | FOLL_WRITE)
!= sizeof(val))
return -EIO;
}
@@ -561,7 +561,7 @@ ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
/* now copy word for word from user rbs to kernel rbs: */
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
if (access_process_vm(child, addr, &val, sizeof(val),
- FOLL_FORCE)
+ FOLL_PTRACE)
!= sizeof(val))
return -EIO;
@@ -1105,7 +1105,7 @@ arch_ptrace (struct task_struct *child, long request,
case PTRACE_PEEKDATA:
/* read word at location addr */
if (ptrace_access_vm(child, addr, &data, sizeof(data),
- FOLL_FORCE)
+ FOLL_PTRACE)
!= sizeof(data))
return -EIO;
/* ensure return value is not mistaken for error code */
@@ -71,7 +71,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
- sizeof(tmp), FOLL_FORCE);
+ sizeof(tmp), FOLL_PTRACE);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *) (unsigned long) data);
@@ -185,7 +185,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
ret = 0;
if (ptrace_access_vm(child, (u64)addrOthers, &data,
sizeof(data),
- FOLL_FORCE | FOLL_WRITE) == sizeof(data))
+ FOLL_PTRACE | FOLL_WRITE) == sizeof(data))
break;
ret = -EIO;
break;
@@ -271,7 +271,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
/* Write the frame to user memory */
fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
- FOLL_FORCE | FOLL_WRITE);
+ FOLL_PTRACE | FOLL_WRITE);
if (unlikely(ret != sizeof(fr))) {
MIPS_FPU_EMU_INC_STATS(errors);
free_emuframe(fr_idx, current->mm);
@@ -65,7 +65,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
- sizeof(tmp), FOLL_FORCE);
+ sizeof(tmp), FOLL_PTRACE);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *)data);
@@ -169,7 +169,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
ret = 0;
if (ptrace_access_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp),
- FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
+ FOLL_PTRACE | FOLL_WRITE) == sizeof(tmp))
break;
ret = -EIO;
break;
@@ -56,7 +56,7 @@ static int regwindow32_get(struct task_struct *target,
return -EFAULT;
} else {
if (access_process_vm(target, reg_window, uregs, size,
- FOLL_FORCE) != size)
+ FOLL_PTRACE) != size)
return -EFAULT;
}
return 0;
@@ -74,7 +74,7 @@ static int regwindow32_set(struct task_struct *target,
return -EFAULT;
} else {
if (access_process_vm(target, reg_window, uregs, size,
- FOLL_FORCE | FOLL_WRITE) != size)
+ FOLL_PTRACE | FOLL_WRITE) != size)
return -EFAULT;
}
return 0;
@@ -165,7 +165,7 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr,
return -EFAULT;
} else {
int len2 = access_process_vm(target, uaddr, kbuf, len,
- FOLL_FORCE);
+ FOLL_PTRACE);
if (len2 != len)
return -EFAULT;
}
@@ -180,7 +180,7 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr,
return -EFAULT;
} else {
int len2 = access_process_vm(target, uaddr, kbuf, len,
- FOLL_FORCE | FOLL_WRITE);
+ FOLL_PTRACE | FOLL_WRITE);
if (len2 != len)
return -EFAULT;
}
@@ -592,7 +592,7 @@ static int genregs32_set(struct task_struct *target,
®_window[pos],
(void *) k,
sizeof(*k),
- FOLL_FORCE | FOLL_WRITE)
+ FOLL_PTRACE | FOLL_WRITE)
!= sizeof(*k))
return -EFAULT;
k++;
@@ -622,7 +622,7 @@ static int genregs32_set(struct task_struct *target,
(unsigned long)
®_window[pos],
®, sizeof(reg),
- FOLL_FORCE | FOLL_WRITE)
+ FOLL_PTRACE | FOLL_WRITE)
!= sizeof(reg))
return -EFAULT;
pos++;
@@ -60,7 +60,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
unsigned long addr = convert_ip_to_linear(child, regs);
copied = access_process_vm(child, addr, opcode, sizeof(opcode),
- FOLL_FORCE);
+ FOLL_PTRACE);
for (i = 0; i < copied; i++) {
switch (opcode[i]) {
/* popf and iret */
@@ -38,7 +38,7 @@ int is_syscall(unsigned long addr)
* in case of singlestepping, if copy_from_user failed.
*/
n = access_process_vm(current, addr, &instr, sizeof(instr),
- FOLL_FORCE);
+ FOLL_PTRACE);
if (n != sizeof(instr)) {
printk(KERN_ERR "is_syscall : failed to read "
"instruction from 0x%lx\n", addr);
@@ -203,7 +203,7 @@ int is_syscall(unsigned long addr)
* in case of singlestepping, if copy_from_user failed.
*/
n = access_process_vm(current, addr, &instr, sizeof(instr),
- FOLL_FORCE);
+ FOLL_PTRACE);
if (n != sizeof(instr)) {
printk("is_syscall : failed to read instruction from "
"0x%lx\n", addr);
@@ -199,7 +199,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
{
struct page *page;
int ret;
- unsigned int gup_flags = FOLL_FORCE;
+ unsigned int gup_flags = FOLL_PTRACE;
#ifdef CONFIG_STACK_GROWSUP
if (write) {
@@ -854,7 +854,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
if (!mmget_not_zero(mm))
goto free;
- flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
+ flags = FOLL_PTRACE | (write ? FOLL_WRITE : 0);
while (count > 0) {
size_t this_len = min_t(size_t, count, PAGE_SIZE);
@@ -2999,7 +2999,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_TOUCH 0x02 /* mark page accessed */
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
-#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
+#define FOLL_PTRACE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
* and return without waiting upon it */
#define FOLL_NOFAULT 0x80 /* do not fault in pages */
@@ -3151,12 +3151,12 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma,
static inline bool gup_can_follow_protnone(unsigned int flags)
{
/*
- * FOLL_FORCE has to be able to make progress even if the VMA is
- * inaccessible. Further, FOLL_FORCE access usually does not represent
+ * FOLL_PTRACE has to be able to make progress even if the VMA is
+ * inaccessible. Further, FOLL_PTRACE access usually does not represent
* application behaviour and we should avoid triggering NUMA hinting
* faults.
*/
- return flags & FOLL_FORCE;
+ return flags & FOLL_PTRACE;
}
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
@@ -466,7 +466,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
struct vm_area_struct *vma;
int ret, is_register, ref_ctr_updated = 0;
bool orig_page_huge = false;
- unsigned int gup_flags = FOLL_FORCE;
+ unsigned int gup_flags = FOLL_PTRACE;
is_register = is_swbp_insn(&opcode);
uprobe = container_of(auprobe, struct uprobe, arch);
@@ -2028,7 +2028,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
* but we treat this as a 'remote' access since it is
* essentially a kernel access to the memory.
*/
- result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page,
+ result = get_user_pages_remote(mm, vaddr, 1, FOLL_PTRACE, &page,
NULL, NULL);
if (result < 0)
return result;
@@ -632,7 +632,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
int this_len, retval;
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
- retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
+ retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_PTRACE);
if (!retval) {
if (copied)
@@ -661,7 +661,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
if (copy_from_user(buf, src, this_len))
return -EFAULT;
retval = ptrace_access_vm(tsk, dst, buf, this_len,
- FOLL_FORCE | FOLL_WRITE);
+ FOLL_PTRACE | FOLL_WRITE);
if (!retval) {
if (copied)
break;
@@ -1309,7 +1309,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
unsigned long tmp;
int copied;
- copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
+ copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_PTRACE);
if (copied != sizeof(tmp))
return -EIO;
return put_user(tmp, (unsigned long __user *)data);
@@ -1321,7 +1321,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
int copied;
copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
- FOLL_FORCE | FOLL_WRITE);
+ FOLL_PTRACE | FOLL_WRITE);
return (copied == sizeof(data)) ? 0 : -EIO;
}
@@ -1339,7 +1339,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
ret = ptrace_access_vm(child, addr, &word, sizeof(word),
- FOLL_FORCE);
+ FOLL_PTRACE);
if (ret != sizeof(word))
ret = -EIO;
else
@@ -1349,7 +1349,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
ret = ptrace_access_vm(child, addr, &data, sizeof(data),
- FOLL_FORCE | FOLL_WRITE);
+ FOLL_PTRACE | FOLL_WRITE);
ret = (ret != sizeof(data) ? -EIO : 0);
break;
@@ -482,7 +482,7 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
return -EEXIST;
}
-/* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
+/* FOLL_PTRACE can write to even unwritable PTEs in COW mappings. */
static inline bool can_follow_write_pte(pte_t pte, struct page *page,
struct vm_area_struct *vma,
unsigned int flags)
@@ -491,11 +491,11 @@ static inline bool can_follow_write_pte(pte_t pte, struct page *page,
if (pte_write(pte))
return true;
- /* Maybe FOLL_FORCE is set to override it? */
- if (!(flags & FOLL_FORCE))
+ /* Maybe FOLL_PTRACE is set to override it? */
+ if (!(flags & FOLL_PTRACE))
return false;
- /* But FOLL_FORCE has no effect on shared mappings */
+ /* But FOLL_PTRACE has no effect on shared mappings */
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
return false;
@@ -942,7 +942,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (write) {
if (!(vm_flags & VM_WRITE)) {
- if (!(gup_flags & FOLL_FORCE))
+ if (!(gup_flags & FOLL_PTRACE))
return -EFAULT;
/*
* We used to let the write,force case do COW in a
@@ -957,7 +957,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
return -EFAULT;
}
} else if (!(vm_flags & VM_READ)) {
- if (!(gup_flags & FOLL_FORCE))
+ if (!(gup_flags & FOLL_PTRACE))
return -EFAULT;
/*
* Is there actually any vma we can reach here which does not
@@ -1455,7 +1455,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
* other than PROT_NONE.
*/
if (vma_is_accessible(vma))
- gup_flags |= FOLL_FORCE;
+ gup_flags |= FOLL_PTRACE;
/*
* We made sure addr is within a VMA, so the following will
@@ -1507,11 +1507,11 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
/*
* FOLL_TOUCH: Mark page accessed and thereby young; will also mark
* the page dirty with FOLL_WRITE -- which doesn't make a
- * difference with !FOLL_FORCE, because the page is writable
+ * difference with !FOLL_PTRACE, because the page is writable
* in the page table.
* FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
* a poisoned page.
- * !FOLL_FORCE: Require proper access permissions.
+ * !FOLL_PTRACE: Require proper access permissions.
*/
gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
if (write)
@@ -1601,11 +1601,11 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
long i;
/* calculate required read or write permissions.
- * If FOLL_FORCE is set, we only require the "MAY" flags.
+ * If FOLL_PTRACE is set, we only require the "MAY" flags.
*/
vm_flags = (foll_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= (foll_flags & FOLL_FORCE) ?
+ vm_flags &= (foll_flags & FOLL_PTRACE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
for (i = 0; i < nr_pages; i++) {
@@ -1807,7 +1807,7 @@ struct page *get_dump_page(unsigned long addr)
if (mmap_read_lock_killable(mm))
return NULL;
ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
- FOLL_FORCE | FOLL_DUMP | FOLL_GET);
+ FOLL_PTRACE | FOLL_DUMP | FOLL_GET);
if (locked)
mmap_read_unlock(mm);
return (ret == 1) ? page : NULL;
@@ -2198,7 +2198,7 @@ EXPORT_SYMBOL(get_user_pages);
*
* It is functionally equivalent to get_user_pages_fast so
* get_user_pages_fast should be used instead if specific gup_flags
- * (e.g. FOLL_FORCE) are not required.
+ * (e.g. FOLL_PTRACE) are not required.
*/
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
@@ -2869,7 +2869,7 @@ static int internal_get_user_pages_fast(unsigned long start,
int ret;
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
- FOLL_FORCE | FOLL_PIN | FOLL_GET |
+ FOLL_PTRACE | FOLL_PIN | FOLL_GET |
FOLL_FAST_ONLY | FOLL_NOFAULT)))
return -EINVAL;
@@ -1371,7 +1371,7 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
return pmd_dirty(pmd);
}
-/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
+/* FOLL_PTRACE can write to even unwritable PMDs in COW mappings. */
static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
struct vm_area_struct *vma,
unsigned int flags)
@@ -1380,11 +1380,11 @@ static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
if (pmd_write(pmd))
return true;
- /* Maybe FOLL_FORCE is set to override it? */
- if (!(flags & FOLL_FORCE))
+ /* Maybe FOLL_PTRACE is set to override it? */
+ if (!(flags & FOLL_PTRACE))
return false;
- /* But FOLL_FORCE has no effect on shared mappings */
+ /* But FOLL_PTRACE has no effect on shared mappings */
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
return false;
@@ -5315,7 +5315,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
struct mmu_notifier_range range;
/*
- * hugetlb does not support FOLL_FORCE-style write faults that keep the
+ * hugetlb does not support FOLL_PTRACE-style write faults that keep the
* PTE mapped R/O such as maybe_mkwrite() would do.
*/
if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
@@ -3383,7 +3383,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
/*
* Private mapping: create an exclusive anonymous page copy if reuse
- * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
+ * is impossible. We might miss VM_WRITE for FOLL_PTRACE handling.
*/
if (folio && folio_test_anon(folio)) {
/*
@@ -5172,7 +5172,7 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
/* Write faults on read-only mappings are impossible ... */
if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
return VM_FAULT_SIGSEGV;
- /* ... and FOLL_FORCE only applies to COW mappings. */
+ /* ... and FOLL_PTRACE only applies to COW mappings. */
if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
!is_cow_mapping(vma->vm_flags)))
return VM_FAULT_SIGSEGV;
@@ -985,7 +985,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
if (len > buflen)
len = buflen;
- res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
+ res = access_process_vm(task, arg_start, buffer, len, FOLL_PTRACE);
/*
* If the nul at the end of args has been overwritten, then
@@ -1001,7 +1001,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
len = buflen - res;
res += access_process_vm(task, env_start,
buffer+res, len,
- FOLL_FORCE);
+ FOLL_PTRACE);
res = strnlen(buffer, res);
}
}
@@ -916,7 +916,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
*/
mmap_read_lock(bprm->mm);
ret = get_user_pages_remote(bprm->mm, pos, 1,
- FOLL_FORCE, &page, NULL, NULL);
+ FOLL_PTRACE, &page, NULL, NULL);
mmap_read_unlock(bprm->mm);
if (ret <= 0)
return false;