[5/5] arm64: mm: Add TLB flush trace on context switch
Commit Message
We do not know how many times the TLB is flushed on context switch.
Adding trace_tlb_flush() in check_and_switch_context() may be useful.
Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com>
---
arch/arm64/mm/context.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
@@ -19,6 +19,8 @@
#include <asm/smp.h>
#include <asm/tlbflush.h>
+#include <trace/events/tlb.h>
+
struct asid_bitmap {
unsigned long *map;
unsigned long nr;
@@ -60,6 +62,8 @@ static DEFINE_STATIC_KEY_FALSE(asid_isolation_enable);
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
#define asid2ctxid(asid, genid) ((asid) | (genid))
+#define TLB_FLUSH_ALL (-1)
+
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
{
@@ -416,8 +420,10 @@ void check_and_switch_context(struct mm_struct *mm)
atomic64_set(&mm->context.id, asid);
}
- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_tlb_all();
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ }
atomic64_set(this_cpu_ptr(&active_asids), asid);
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);