[2/3] perf/core: Reduce PMU access to adjust sample freq

Message ID 20231120221932.213710-2-namhyung@kernel.org
State New
Headers
Series [1/3] perf/core: Update perf_adjust_freq_unthr_context() |

Commit Message

Namhyung Kim Nov. 20, 2023, 10:19 p.m. UTC
  For throttled events, it first starts the event and then stop
unnecessarily.  As it's already stopped, it can directly adjust
the frequency and then move on.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 kernel/events/core.c | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)
  

Comments

Liang, Kan Nov. 21, 2023, 3:57 p.m. UTC | #1
On 2023-11-20 5:19 p.m., Namhyung Kim wrote:
> For throttled events, it first starts the event and then stop
> unnecessarily.  As it's already stopped, it can directly adjust
> the frequency and then move on.
> 
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>

Reviewed-by: Kan Liang <kan.liang@linux.intel.com>

Thanks,
Kan

> ---
>  kernel/events/core.c | 13 ++++++++++---
>  1 file changed, 10 insertions(+), 3 deletions(-)
> 
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index 53e2ad73102d..fd3449e4d081 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -4119,10 +4119,15 @@ static void perf_adjust_freq_unthr_events(struct list_head *event_list)
>  		if (hwc->interrupts == MAX_INTERRUPTS) {
>  			hwc->interrupts = 0;
>  			perf_log_throttle(event, 1);
> -			event->pmu->start(event, 0);
> -		}
>  
> -		if (!event->attr.freq || !event->attr.sample_freq)
> +			if (!event->attr.freq || !event->attr.sample_freq) {
> +				delta = 0;
> +				goto next;
> +			}
> +
> +			if (event->hw.state & PERF_HES_STOPPED)
> +				goto adjust;
> +		} else if (!event->attr.freq || !event->attr.sample_freq)
>  			continue;
>  
>  		/*
> @@ -4130,6 +4135,7 @@ static void perf_adjust_freq_unthr_events(struct list_head *event_list)
>  		 */
>  		event->pmu->stop(event, PERF_EF_UPDATE);
>  
> +adjust:
>  		now = local64_read(&event->count);
>  		delta = now - hwc->freq_count_stamp;
>  		hwc->freq_count_stamp = now;
> @@ -4144,6 +4150,7 @@ static void perf_adjust_freq_unthr_events(struct list_head *event_list)
>  		if (delta > 0)
>  			perf_adjust_period(event, period, delta, false);
>  
> +next:
>  		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
>  	}
>  }
  

Patch

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 53e2ad73102d..fd3449e4d081 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4119,10 +4119,15 @@  static void perf_adjust_freq_unthr_events(struct list_head *event_list)
 		if (hwc->interrupts == MAX_INTERRUPTS) {
 			hwc->interrupts = 0;
 			perf_log_throttle(event, 1);
-			event->pmu->start(event, 0);
-		}
 
-		if (!event->attr.freq || !event->attr.sample_freq)
+			if (!event->attr.freq || !event->attr.sample_freq) {
+				delta = 0;
+				goto next;
+			}
+
+			if (event->hw.state & PERF_HES_STOPPED)
+				goto adjust;
+		} else if (!event->attr.freq || !event->attr.sample_freq)
 			continue;
 
 		/*
@@ -4130,6 +4135,7 @@  static void perf_adjust_freq_unthr_events(struct list_head *event_list)
 		 */
 		event->pmu->stop(event, PERF_EF_UPDATE);
 
+adjust:
 		now = local64_read(&event->count);
 		delta = now - hwc->freq_count_stamp;
 		hwc->freq_count_stamp = now;
@@ -4144,6 +4150,7 @@  static void perf_adjust_freq_unthr_events(struct list_head *event_list)
 		if (delta > 0)
 			perf_adjust_period(event, period, delta, false);
 
+next:
 		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
 	}
 }