[v4] arm64: pmuv3: dynamically map PERF_COUNT_HW_BRANCH_INSTRUCTIONS

Message ID 20230327122527.3913496-1-peternewman@google.com
State New
Headers
Series [v4] arm64: pmuv3: dynamically map PERF_COUNT_HW_BRANCH_INSTRUCTIONS |

Commit Message

Peter Newman March 27, 2023, 12:25 p.m. UTC
  From: Stephane Eranian <eranian@google.com>

The mapping of perf_events generic hardware events to actual PMU events on
ARM PMUv3 may not always be correct. This is in particular true for the
PERF_COUNT_HW_BRANCH_INSTRUCTIONS event. Although the mapping points to an
architected event, it may not always be available. This can be seen with a
simple:

$ perf stat -e branches sleep 0
 Performance counter stats for 'sleep 0':

   <not supported>      branches

       0.001401081 seconds time elapsed

Yet the hardware does have an event that could be used for branches.

Dynamically check for a supported hardware event which can be used for
PERF_COUNT_HW_BRANCH_INSTRUCTIONS at mapping time.

And with that:

$ perf stat -e branches sleep 0

 Performance counter stats for 'sleep 0':

           166,739      branches

       0.000832163 seconds time elapsed

Based-on: https://lore.kernel.org/all/YvunKCJHSXKz%2FkZB@FVFF77S0Q05N
Based-on-patch-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Newman <peternewman@google.com>
---
v3->v4:
 - splice Mark's patch with Stephane's problem statement
v2->v3:
 - removed prints per Will's suggestion
 
[v3] https://lore.kernel.org/all/20220816130221.885920-1-peternewman@google.com/
[v2] https://lore.kernel.org/lkml/20220324181458.3216262-1-eranian@google.com/

 arch/arm64/kernel/perf_event.c | 27 +++++++++++++++++++++++----
 1 file changed, 23 insertions(+), 4 deletions(-)
  

Comments

Mark Rutland April 3, 2023, 7:24 a.m. UTC | #1
On Mon, Mar 27, 2023 at 02:25:27PM +0200, Peter Newman wrote:
> From: Stephane Eranian <eranian@google.com>
> 
> The mapping of perf_events generic hardware events to actual PMU events on
> ARM PMUv3 may not always be correct. This is in particular true for the
> PERF_COUNT_HW_BRANCH_INSTRUCTIONS event. Although the mapping points to an
> architected event, it may not always be available. This can be seen with a
> simple:
> 
> $ perf stat -e branches sleep 0
>  Performance counter stats for 'sleep 0':
> 
>    <not supported>      branches
> 
>        0.001401081 seconds time elapsed
> 
> Yet the hardware does have an event that could be used for branches.
> 
> Dynamically check for a supported hardware event which can be used for
> PERF_COUNT_HW_BRANCH_INSTRUCTIONS at mapping time.
> 
> And with that:
> 
> $ perf stat -e branches sleep 0
> 
>  Performance counter stats for 'sleep 0':
> 
>            166,739      branches
> 
>        0.000832163 seconds time elapsed
> 
> Based-on: https://lore.kernel.org/all/YvunKCJHSXKz%2FkZB@FVFF77S0Q05N
> Based-on-patch-by: Mark Rutland <mark.rutland@arm.com>
> Signed-off-by: Stephane Eranian <eranian@google.com>
> Signed-off-by: Peter Newman <peternewman@google.com>

Thanks for reworking the patch; the patch itself and commit message look good
to me.

I'd like to keep my S-o-b here for the code itself; could we please make the
tags:

  Co-Developed-by: Stephane Eranian <eranian@google.com>
  Signed-off-by: Stephane Eranian <eranian@google.com>
  Co-Developed-by: Mark Rutland <mark.rutland@arm.com>
  Signed-off-by: Mark Rutland <mark.rutland@arm.com>
  Co-Developed-by: Peter Newman <peternewman@google.com>
  Signed-off-by: Peter Newman <peternewman@google.com>
  Link: https://lore.kernel.org/all/YvunKCJHSXKz%2FkZB@FVFF77S0Q05N

That follow the conventions documented in:

  https://www.kernel.org/doc/html/latest/process/submitting-patches.html

Thanks,
Mark.

> ---
> v3->v4:
>  - splice Mark's patch with Stephane's problem statement
> v2->v3:
>  - removed prints per Will's suggestion
>  
> [v3] https://lore.kernel.org/all/20220816130221.885920-1-peternewman@google.com/
> [v2] https://lore.kernel.org/lkml/20220324181458.3216262-1-eranian@google.com/
> 
>  arch/arm64/kernel/perf_event.c | 27 +++++++++++++++++++++++----
>  1 file changed, 23 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
> index dde06c0f97f3..ee63f8e719ea 100644
> --- a/arch/arm64/kernel/perf_event.c
> +++ b/arch/arm64/kernel/perf_event.c
> @@ -45,7 +45,6 @@ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
>  	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INST_RETIRED,
>  	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
>  	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
> -	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
>  	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
>  	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
>  	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
> @@ -1048,6 +1047,28 @@ static void armv8pmu_reset(void *info)
>  	armv8pmu_pmcr_write(pmcr);
>  }
>  
> +static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu,
> +				      struct perf_event *event)
> +{
> +	if (event->attr.type == PERF_TYPE_HARDWARE &&
> +	    event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) {
> +
> +		if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
> +			     armpmu->pmceid_bitmap))
> +			return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
> +
> +		if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED,
> +			     armpmu->pmceid_bitmap))
> +			return ARMV8_PMUV3_PERFCTR_BR_RETIRED;
> +
> +		return HW_OP_UNSUPPORTED;
> +	}
> +
> +	return armpmu_map_event(event, &armv8_pmuv3_perf_map,
> +				&armv8_pmuv3_perf_cache_map,
> +				ARMV8_PMU_EVTYPE_EVENT);
> +}
> +
>  static int __armv8_pmuv3_map_event(struct perf_event *event,
>  				   const unsigned (*extra_event_map)
>  						  [PERF_COUNT_HW_MAX],
> @@ -1059,9 +1080,7 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
>  	int hw_event_id;
>  	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
>  
> -	hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
> -				       &armv8_pmuv3_perf_cache_map,
> -				       ARMV8_PMU_EVTYPE_EVENT);
> +	hw_event_id = __armv8_pmuv3_map_event_id(armpmu, event);
>  
>  	/*
>  	 * CHAIN events only work when paired with an adjacent counter, and it
> -- 
> 2.40.0.348.gf938b09366-goog
>
  
Peter Newman April 3, 2023, 8:31 a.m. UTC | #2
On Mon, Apr 3, 2023 at 9:24 AM Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Mon, Mar 27, 2023 at 02:25:27PM +0200, Peter Newman wrote:
> > Based-on: https://lore.kernel.org/all/YvunKCJHSXKz%2FkZB@FVFF77S0Q05N
> > Based-on-patch-by: Mark Rutland <mark.rutland@arm.com>
> > Signed-off-by: Stephane Eranian <eranian@google.com>
> > Signed-off-by: Peter Newman <peternewman@google.com>
>
> Thanks for reworking the patch; the patch itself and commit message look good
> to me.
>
> I'd like to keep my S-o-b here for the code itself; could we please make the
> tags:
>
>   Co-Developed-by: Stephane Eranian <eranian@google.com>
>   Signed-off-by: Stephane Eranian <eranian@google.com>
>   Co-Developed-by: Mark Rutland <mark.rutland@arm.com>
>   Signed-off-by: Mark Rutland <mark.rutland@arm.com>
>   Co-Developed-by: Peter Newman <peternewman@google.com>
>   Signed-off-by: Peter Newman <peternewman@google.com>
>   Link: https://lore.kernel.org/all/YvunKCJHSXKz%2FkZB@FVFF77S0Q05N
>
> That follow the conventions documented in:
>
>   https://www.kernel.org/doc/html/latest/process/submitting-patches.html

Yes, of course. I'll send an update shortly.

Thanks!
-Peter
  

Patch

diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index dde06c0f97f3..ee63f8e719ea 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -45,7 +45,6 @@  static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INST_RETIRED,
 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
@@ -1048,6 +1047,28 @@  static void armv8pmu_reset(void *info)
 	armv8pmu_pmcr_write(pmcr);
 }
 
+static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu,
+				      struct perf_event *event)
+{
+	if (event->attr.type == PERF_TYPE_HARDWARE &&
+	    event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) {
+
+		if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
+			     armpmu->pmceid_bitmap))
+			return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
+
+		if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED,
+			     armpmu->pmceid_bitmap))
+			return ARMV8_PMUV3_PERFCTR_BR_RETIRED;
+
+		return HW_OP_UNSUPPORTED;
+	}
+
+	return armpmu_map_event(event, &armv8_pmuv3_perf_map,
+				&armv8_pmuv3_perf_cache_map,
+				ARMV8_PMU_EVTYPE_EVENT);
+}
+
 static int __armv8_pmuv3_map_event(struct perf_event *event,
 				   const unsigned (*extra_event_map)
 						  [PERF_COUNT_HW_MAX],
@@ -1059,9 +1080,7 @@  static int __armv8_pmuv3_map_event(struct perf_event *event,
 	int hw_event_id;
 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 
-	hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
-				       &armv8_pmuv3_perf_cache_map,
-				       ARMV8_PMU_EVTYPE_EVENT);
+	hw_event_id = __armv8_pmuv3_map_event_id(armpmu, event);
 
 	/*
 	 * CHAIN events only work when paired with an adjacent counter, and it