[V3] perf/x86/intel/ds: Flush the PEBS buffer in PEBS enable

Message ID 20230410181309.827175-1-kan.liang@linux.intel.com
State New
Headers
Series [V3] perf/x86/intel/ds: Flush the PEBS buffer in PEBS enable |

Commit Message

Liang, Kan April 10, 2023, 6:13 p.m. UTC
  From: Kan Liang <kan.liang@linux.intel.com>

Several similar kernel warnings can be triggered,

  [56605.607840] CPU0 PEBS record size 0, expected 32, config 0
  cpuc->record_size=208

when the below commands are running in parallel for a while on SPR.

  while true; do perf record --no-buildid -a --intr-regs=AX -e
  cpu/event=0xd0,umask=0x81/pp -c 10003 -o /dev/null ./triad; done &

  while true; do perf record -o /tmp/out -W -d -e
  '{ld_blocks.store_forward:period=1000000,
  MEM_TRANS_RETIRED.LOAD_LATENCY:u:precise=2:ldlat=4}'
  -c 1037 ./triad; done
  *The triad program is just the generation of loads/stores.

The warnings are triggered when an unexpected PEBS record (with a
different config and size) is found.

A system-wide PEBS event with the large PEBS config may be enabled
during a context switch. Some PEBS records for the system-wide PEBS may
be generated while the old task is sched out but the new one hasn't been
sched in yet. When the new task is sched in, the cpuc->pebs_record_size
may be updated for the per-task PEBS events. So the existing system-wide
PEBS records have a different size from the later PEBS records.

The PEBS buffer should be flushed right before the hardware is
reprogrammed. The new size and threshold should be updated after the old
buffer has been flushed.

Reported-by: Stephane Eranian <eranian@google.com>
Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---

Changes since V2:
- Flush the buffer when the hardware is reprogrammed.
https://lore.kernel.org/lkml/1185d81f-71cc-0428-881a-db4f2cbac823@linux.intel.com/

 arch/x86/events/intel/ds.c | 39 ++++++++++++++++++++++++++------------
 1 file changed, 27 insertions(+), 12 deletions(-)
  

Comments

Peter Zijlstra April 14, 2023, 10:29 a.m. UTC | #1
On Mon, Apr 10, 2023 at 11:13:09AM -0700, kan.liang@linux.intel.com wrote:

>  arch/x86/events/intel/ds.c | 39 ++++++++++++++++++++++++++------------
>  1 file changed, 27 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
> index 3a77f4336df7..4639d4c1e98d 100644
> --- a/arch/x86/events/intel/ds.c
> +++ b/arch/x86/events/intel/ds.c
> @@ -1257,20 +1257,18 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
>  	if (x86_pmu.intel_cap.pebs_baseline && add) {
>  		u64 pebs_data_cfg;
>  
> -		/* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
> -		if (cpuc->n_pebs == 1) {
> +		/* Clear pebs_data_cfg for first PEBS. */
> +		if (cpuc->n_pebs == 1)
>  			cpuc->pebs_data_cfg = 0;
> -			cpuc->pebs_record_size = sizeof(struct pebs_basic);
> -		}
>  
>  		pebs_data_cfg = pebs_update_adaptive_cfg(event);
>  
> -		/* Update pebs_record_size if new event requires more data. */
> -		if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
> +		/*
> +		 * Only update the pebs_data_cfg here. The pebs_record_size
> +		 * will be updated later when the new pebs_data_cfg takes effect.
> +		 */
> +		if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
>  			cpuc->pebs_data_cfg |= pebs_data_cfg;
> -			adaptive_pebs_record_size_update();
> -			update = true;
> -		}
>  	}
>  
>  	if (update)
		pebs_update_threshold(cpuc);

Now, pebs_update_threshold() will actually use
->pebs_record_size, but afaict the above now has a path through (for
example for the first event) where update is true but ->pebs_record_size
is unset/stale.

I think it all works out, but it is quite a mess and hard to follow.

> @@ -1331,6 +1329,13 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
>  	wrmsrl(base + idx, value);
>  }
>  
> +static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
> +{
> +	if (cpuc->n_pebs == cpuc->n_large_pebs &&
> +	    cpuc->n_pebs != cpuc->n_pebs_via_pt)
> +		intel_pmu_drain_pebs_buffer();
> +}

Its been a minute since I looked at this code; but why only for large
pebs? Surely flushing is quick when the DS is actually empty and that
stops us having to worry if there's races where there might be a single
entry in.

>  void intel_pmu_pebs_enable(struct perf_event *event)
>  {
>  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> @@ -1350,6 +1355,18 @@ void intel_pmu_pebs_enable(struct perf_event *event)
>  	if (x86_pmu.intel_cap.pebs_baseline) {
>  		hwc->config |= ICL_EVENTSEL_ADAPTIVE;
>  		if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
> +			/*
> +			 * A system-wide PEBS event with the large PEBS
> +			 * config may still be enabled when switching the
> +			 * context. Some PEBS records for the system-wide
> +			 * PEBS may be generated while the old event has
> +			 * been scheduled out but the new one hasn't been
> +			 * scheduled in. It's not enough to only flush the
> +			 * buffer when a PEBS event is disable.
> +			 */

Perhaps just:

			/*
			 * drain_pebs() assumes uniform record size;
			 * hence we need to drain when changing said
			 * size.
			 */


> +			intel_pmu_drain_large_pebs(cpuc);
> +			adaptive_pebs_record_size_update();
> +			pebs_update_threshold(cpuc);
>  			wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
>  			cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
>  		}
  
Liang, Kan April 14, 2023, 2:22 p.m. UTC | #2
On 2023-04-14 6:29 a.m., Peter Zijlstra wrote:
> On Mon, Apr 10, 2023 at 11:13:09AM -0700, kan.liang@linux.intel.com wrote:
> 
>>  arch/x86/events/intel/ds.c | 39 ++++++++++++++++++++++++++------------
>>  1 file changed, 27 insertions(+), 12 deletions(-)
>>
>> diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
>> index 3a77f4336df7..4639d4c1e98d 100644
>> --- a/arch/x86/events/intel/ds.c
>> +++ b/arch/x86/events/intel/ds.c
>> @@ -1257,20 +1257,18 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
>>  	if (x86_pmu.intel_cap.pebs_baseline && add) {
>>  		u64 pebs_data_cfg;
>>  
>> -		/* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
>> -		if (cpuc->n_pebs == 1) {
>> +		/* Clear pebs_data_cfg for first PEBS. */
>> +		if (cpuc->n_pebs == 1)
>>  			cpuc->pebs_data_cfg = 0;
>> -			cpuc->pebs_record_size = sizeof(struct pebs_basic);
>> -		}
>>  
>>  		pebs_data_cfg = pebs_update_adaptive_cfg(event);
>>  
>> -		/* Update pebs_record_size if new event requires more data. */
>> -		if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
>> +		/*
>> +		 * Only update the pebs_data_cfg here. The pebs_record_size
>> +		 * will be updated later when the new pebs_data_cfg takes effect.
>> +		 */
>> +		if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
>>  			cpuc->pebs_data_cfg |= pebs_data_cfg;
>> -			adaptive_pebs_record_size_update();
>> -			update = true;
>> -		}
>>  	}
>>  
>>  	if (update)
> 		pebs_update_threshold(cpuc);
> 
> Now, pebs_update_threshold() will actually use
> ->pebs_record_size, but afaict the above now has a path through (for
> example for the first event) where update is true but ->pebs_record_size
> is unset/stale.
> 
> I think it all works out, but it is quite a mess and hard to follow.

With this patch, the pebs_update_threshold() will be delayed to
intel_pmu_pebs_enable() for the adaptive PEBS.

I think we may reuse the pebs_data_cfg method for the previous fixed
PEBS as well and delay the DS update to intel_pmu_pebs_enable() as well.
So everything will be consistent.

I will do more tests and probably send a clean up patch later separately.


> 
>> @@ -1331,6 +1329,13 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
>>  	wrmsrl(base + idx, value);
>>  }
>>  
>> +static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
>> +{
>> +	if (cpuc->n_pebs == cpuc->n_large_pebs &&
>> +	    cpuc->n_pebs != cpuc->n_pebs_via_pt)
>> +		intel_pmu_drain_pebs_buffer();
>> +}
> 
> Its been a minute since I looked at this code; but why only for large
> pebs? Surely flushing is quick when the DS is actually empty and that
> stops us having to worry if there's races where there might be a single
> entry in.

The AUTO_RELOAD is a separate feature. It should be always enabled when
a fixed period is set. That's not the case for the large PEBS, which
only supports partial sample type.

There should be some overhead for the AUTO_RELOAD + single PEBS case. We
have to update the event count.

> 
>>  void intel_pmu_pebs_enable(struct perf_event *event)
>>  {
>>  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>> @@ -1350,6 +1355,18 @@ void intel_pmu_pebs_enable(struct perf_event *event)
>>  	if (x86_pmu.intel_cap.pebs_baseline) {
>>  		hwc->config |= ICL_EVENTSEL_ADAPTIVE;
>>  		if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
>> +			/*
>> +			 * A system-wide PEBS event with the large PEBS
>> +			 * config may still be enabled when switching the
>> +			 * context. Some PEBS records for the system-wide
>> +			 * PEBS may be generated while the old event has
>> +			 * been scheduled out but the new one hasn't been
>> +			 * scheduled in. It's not enough to only flush the
>> +			 * buffer when a PEBS event is disable.
>> +			 */
> 
> Perhaps just:
> 
> 			/*
> 			 * drain_pebs() assumes uniform record size;
> 			 * hence we need to drain when changing said
> 			 * size.
> 			 */
> 

Sure, I will update in V4.

Thanks,
Kan

> 
>> +			intel_pmu_drain_large_pebs(cpuc);
>> +			adaptive_pebs_record_size_update();
>> +			pebs_update_threshold(cpuc);
>>  			wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
>>  			cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
>>  		}
  

Patch

diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 3a77f4336df7..4639d4c1e98d 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1257,20 +1257,18 @@  pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
 	if (x86_pmu.intel_cap.pebs_baseline && add) {
 		u64 pebs_data_cfg;
 
-		/* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
-		if (cpuc->n_pebs == 1) {
+		/* Clear pebs_data_cfg for first PEBS. */
+		if (cpuc->n_pebs == 1)
 			cpuc->pebs_data_cfg = 0;
-			cpuc->pebs_record_size = sizeof(struct pebs_basic);
-		}
 
 		pebs_data_cfg = pebs_update_adaptive_cfg(event);
 
-		/* Update pebs_record_size if new event requires more data. */
-		if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
+		/*
+		 * Only update the pebs_data_cfg here. The pebs_record_size
+		 * will be updated later when the new pebs_data_cfg takes effect.
+		 */
+		if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
 			cpuc->pebs_data_cfg |= pebs_data_cfg;
-			adaptive_pebs_record_size_update();
-			update = true;
-		}
 	}
 
 	if (update)
@@ -1331,6 +1329,13 @@  static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
 	wrmsrl(base + idx, value);
 }
 
+static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
+{
+	if (cpuc->n_pebs == cpuc->n_large_pebs &&
+	    cpuc->n_pebs != cpuc->n_pebs_via_pt)
+		intel_pmu_drain_pebs_buffer();
+}
+
 void intel_pmu_pebs_enable(struct perf_event *event)
 {
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1350,6 +1355,18 @@  void intel_pmu_pebs_enable(struct perf_event *event)
 	if (x86_pmu.intel_cap.pebs_baseline) {
 		hwc->config |= ICL_EVENTSEL_ADAPTIVE;
 		if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+			/*
+			 * A system-wide PEBS event with the large PEBS
+			 * config may still be enabled when switching the
+			 * context. Some PEBS records for the system-wide
+			 * PEBS may be generated while the old event has
+			 * been scheduled out but the new one hasn't been
+			 * scheduled in. It's not enough to only flush the
+			 * buffer when a PEBS event is disable.
+			 */
+			intel_pmu_drain_large_pebs(cpuc);
+			adaptive_pebs_record_size_update();
+			pebs_update_threshold(cpuc);
 			wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
 			cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
 		}
@@ -1396,9 +1413,7 @@  void intel_pmu_pebs_disable(struct perf_event *event)
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 
-	if (cpuc->n_pebs == cpuc->n_large_pebs &&
-	    cpuc->n_pebs != cpuc->n_pebs_via_pt)
-		intel_pmu_drain_pebs_buffer();
+	intel_pmu_drain_large_pebs(cpuc);
 
 	cpuc->pebs_enabled &= ~(1ULL << hwc->idx);