@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
+#include <linux/migrate.h>
+#include <linux/task_work.h>
#include <asm/nmi.h>
#include <asm/perf_event.h> /* TODO: Move defns like IBS_OP_ENABLE into non-perf header */
@@ -8,12 +10,30 @@
static u64 ibs_config __read_mostly;
+struct ibs_access_work {
+ struct callback_head work;
+ u64 laddr, paddr;
+};
+
+void task_ibs_access_work(struct callback_head *work)
+{
+ struct ibs_access_work *iwork = container_of(work, struct ibs_access_work, work);
+ struct task_struct *p = current;
+
+ u64 laddr = iwork->laddr;
+ u64 paddr = iwork->paddr;
+
+ kfree(iwork);
+ do_numa_access(p, laddr, paddr);
+}
+
static int ibs_overflow_handler(unsigned int cmd, struct pt_regs *regs)
{
u64 ops_ctl, ops_data3, ops_data2;
u64 remote_access;
u64 laddr = -1, paddr = -1;
struct mm_struct *mm = current->mm;
+ struct ibs_access_work *iwork;
rdmsrl(MSR_AMD64_IBSOPCTL, ops_ctl);
@@ -86,8 +106,24 @@ static int ibs_overflow_handler(unsigned int cmd, struct pt_regs *regs)
/* Is phys addr valid? */
if (ops_data3 & MSR_AMD64_IBSOPDATA3_PADDR_VALID)
rdmsrl(MSR_AMD64_IBSDCPHYSAD, paddr);
- else
+ else {
count_vm_event(IBS_PADDR_INVALID);
+ goto handled;
+ }
+
+ /*
+ * TODO: GFP_ATOMIC!
+ */
+ iwork = kzalloc(sizeof(*iwork), GFP_ATOMIC);
+ if (!iwork)
+ goto handled;
+
+ count_vm_event(IBS_USEFUL_SAMPLES);
+
+ iwork->laddr = laddr;
+ iwork->paddr = paddr;
+ init_task_work(&iwork->work, task_ibs_access_work);
+ task_work_add(current, &iwork->work, TWA_RESUME);
handled:
return NMI_HANDLED;
@@ -216,6 +216,7 @@ void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
unsigned long npages);
void migrate_device_finalize(unsigned long *src_pfns,
unsigned long *dst_pfns, unsigned long npages);
+void do_numa_access(struct task_struct *p, u64 laddr, u64 paddr);
#endif /* CONFIG_MIGRATION */
@@ -2420,4 +2420,5 @@ static inline void sched_core_fork(struct task_struct *p) { }
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
+DECLARE_STATIC_KEY_FALSE(hw_access_hints);
#endif
@@ -159,6 +159,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
IBS_LADDR_INVALID,
IBS_KERNEL_ADDR,
IBS_PADDR_INVALID,
+ IBS_USEFUL_SAMPLES,
#endif
#endif
NR_VM_EVENT_ITEMS
@@ -47,6 +47,7 @@
#include <linux/psi.h>
#include <linux/ratelimit.h>
#include <linux/task_work.h>
+#include <linux/migrate.h>
#include <asm/switch_to.h>
@@ -3125,6 +3126,8 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
}
}
+DEFINE_STATIC_KEY_FALSE(hw_access_hints);
+
/*
* Drive the periodic memory faults..
*/
@@ -3133,6 +3136,13 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr)
struct callback_head *work = &curr->numa_work;
u64 period, now;
+ /*
+ * If we are using access hints from hardware (like using
+ * IBS), don't scan the address space.
+ */
+ if (static_branch_unlikely(&hw_access_hints))
+ return;
+
/*
* We don't care about NUMA placement if we don't have memory.
*/
@@ -4668,6 +4668,98 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
return mpol_misplaced(page, vma, addr);
}
+/*
+ * Called from task_work context to act upon the page access.
+ *
+ * Physical address (provided by IBS) is used directly instead
+ * of walking the page tables to get to the PTE/page. Hence we
+ * don't check if PTE is writable for the TNF_NO_GROUP
+ * optimization, which means RO pages are considered for grouping.
+ */
+void do_numa_access(struct task_struct *p, u64 laddr, u64 paddr)
+{
+ struct mm_struct *mm = p->mm;
+ struct vm_area_struct *vma;
+ struct page *page = NULL;
+ int page_nid = NUMA_NO_NODE;
+ int last_cpupid;
+ int target_nid;
+ int flags = 0;
+
+ if (!mm)
+ return;
+
+ if (!mmap_read_trylock(mm))
+ return;
+
+ vma = find_vma(mm, laddr);
+ if (!vma)
+ goto out_unlock;
+
+ if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
+ is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP))
+ goto out_unlock;
+
+ if (!vma->vm_mm ||
+ (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
+ goto out_unlock;
+
+ if (!vma_is_accessible(vma))
+ goto out_unlock;
+
+ page = pfn_to_online_page(PHYS_PFN(paddr));
+ if (!page || is_zone_device_page(page))
+ goto out_unlock;
+
+ if (unlikely(!PageLRU(page)))
+ goto out_unlock;
+
+ /* TODO: handle PTE-mapped THP */
+ if (PageCompound(page))
+ goto out_unlock;
+
+ /*
+ * Flag if the page is shared between multiple address spaces. This
+ * is later used when determining whether to group tasks together
+ */
+ if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
+ flags |= TNF_SHARED;
+
+ last_cpupid = page_cpupid_last(page);
+ page_nid = page_to_nid(page);
+
+ /*
+ * For memory tiering mode, cpupid of slow memory page is used
+ * to record page access time. So use default value.
+ */
+ if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
+ !node_is_toptier(page_nid))
+ last_cpupid = (-1 & LAST_CPUPID_MASK);
+ else
+ last_cpupid = page_cpupid_last(page);
+
+ target_nid = numa_migrate_prep(page, vma, laddr, page_nid, &flags);
+ if (target_nid == NUMA_NO_NODE) {
+ put_page(page);
+ goto out;
+ }
+
+ /* Migrate to the requested node */
+ if (migrate_misplaced_page(page, vma, target_nid)) {
+ page_nid = target_nid;
+ flags |= TNF_MIGRATED;
+ } else {
+ flags |= TNF_MIGRATE_FAIL;
+ }
+
+out:
+ if (page_nid != NUMA_NO_NODE)
+ task_numa_fault(last_cpupid, page_nid, 1, flags);
+
+out_unlock:
+ mmap_read_unlock(mm);
+}
+
static vm_fault_t do_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -1408,6 +1408,7 @@ const char * const vmstat_text[] = {
"ibs_invalid_laddr",
"ibs_kernel_addr",
"ibs_invalid_paddr",
+ "ibs_useful_samples",
#endif
#endif
#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */