[1/3] perf/core: Update perf_adjust_freq_unthr_context()

Message ID 20231120221932.213710-1-namhyung@kernel.org
State New
Headers
Series [1/3] perf/core: Update perf_adjust_freq_unthr_context() |

Commit Message

Namhyung Kim Nov. 20, 2023, 10:19 p.m. UTC
  It was unnecessarily disabling and enabling PMUs for each event.  It
should be done at PMU level.  Add pmu_ctx->nr_freq counter to check it
at each PMU.  As pmu context has separate active lists for pinned group
and flexible group, factor out a new function to do the job.

Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT
even if it needs to unthrottle sampling events.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 include/linux/perf_event.h |  1 +
 kernel/events/core.c       | 68 +++++++++++++++++++++++---------------
 2 files changed, 43 insertions(+), 26 deletions(-)
  

Comments

Ian Rogers Nov. 20, 2023, 10:41 p.m. UTC | #1
On Mon, Nov 20, 2023 at 2:19 PM Namhyung Kim <namhyung@kernel.org> wrote:
>
> It was unnecessarily disabling and enabling PMUs for each event.  It
> should be done at PMU level.  Add pmu_ctx->nr_freq counter to check it
> at each PMU.  As pmu context has separate active lists for pinned group
> and flexible group, factor out a new function to do the job.
>
> Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT
> even if it needs to unthrottle sampling events.
>
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>

Series:
Reviewed-by: Ian Rogers <irogers@google.com>

Thanks,
Ian

> ---
>  include/linux/perf_event.h |  1 +
>  kernel/events/core.c       | 68 +++++++++++++++++++++++---------------
>  2 files changed, 43 insertions(+), 26 deletions(-)
>
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index 0367d748fae0..3eb17dc89f5e 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -879,6 +879,7 @@ struct perf_event_pmu_context {
>
>         unsigned int                    nr_events;
>         unsigned int                    nr_cgroups;
> +       unsigned int                    nr_freq;
>
>         atomic_t                        refcount; /* event <-> epc */
>         struct rcu_head                 rcu_head;
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index 3eb26c2c6e65..53e2ad73102d 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -2275,8 +2275,10 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
>
>         if (!is_software_event(event))
>                 cpc->active_oncpu--;
> -       if (event->attr.freq && event->attr.sample_freq)
> +       if (event->attr.freq && event->attr.sample_freq) {
>                 ctx->nr_freq--;
> +               epc->nr_freq--;
> +       }
>         if (event->attr.exclusive || !cpc->active_oncpu)
>                 cpc->exclusive = 0;
>
> @@ -2531,9 +2533,10 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
>
>         if (!is_software_event(event))
>                 cpc->active_oncpu++;
> -       if (event->attr.freq && event->attr.sample_freq)
> +       if (event->attr.freq && event->attr.sample_freq) {
>                 ctx->nr_freq++;
> -
> +               epc->nr_freq++;
> +       }
>         if (event->attr.exclusive)
>                 cpc->exclusive = 1;
>
> @@ -4096,30 +4099,14 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
>         }
>  }
>
> -/*
> - * combine freq adjustment with unthrottling to avoid two passes over the
> - * events. At the same time, make sure, having freq events does not change
> - * the rate of unthrottling as that would introduce bias.
> - */
> -static void
> -perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> +static void perf_adjust_freq_unthr_events(struct list_head *event_list)
>  {
>         struct perf_event *event;
>         struct hw_perf_event *hwc;
>         u64 now, period = TICK_NSEC;
>         s64 delta;
>
> -       /*
> -        * only need to iterate over all events iff:
> -        * - context have events in frequency mode (needs freq adjust)
> -        * - there are events to unthrottle on this cpu
> -        */
> -       if (!(ctx->nr_freq || unthrottle))
> -               return;
> -
> -       raw_spin_lock(&ctx->lock);
> -
> -       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
> +       list_for_each_entry(event, event_list, active_list) {
>                 if (event->state != PERF_EVENT_STATE_ACTIVE)
>                         continue;
>
> @@ -4127,8 +4114,6 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>                 if (!event_filter_match(event))
>                         continue;
>
> -               perf_pmu_disable(event->pmu);
> -
>                 hwc = &event->hw;
>
>                 if (hwc->interrupts == MAX_INTERRUPTS) {
> @@ -4138,7 +4123,7 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>                 }
>
>                 if (!event->attr.freq || !event->attr.sample_freq)
> -                       goto next;
> +                       continue;
>
>                 /*
>                  * stop the event and update event->count
> @@ -4160,8 +4145,39 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>                         perf_adjust_period(event, period, delta, false);
>
>                 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
> -       next:
> -               perf_pmu_enable(event->pmu);
> +       }
> +}
> +
> +/*
> + * combine freq adjustment with unthrottling to avoid two passes over the
> + * events. At the same time, make sure, having freq events does not change
> + * the rate of unthrottling as that would introduce bias.
> + */
> +static void
> +perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> +{
> +       struct perf_event_pmu_context *pmu_ctx;
> +
> +       /*
> +        * only need to iterate over all events iff:
> +        * - context have events in frequency mode (needs freq adjust)
> +        * - there are events to unthrottle on this cpu
> +        */
> +       if (!(ctx->nr_freq || unthrottle))
> +               return;
> +
> +       raw_spin_lock(&ctx->lock);
> +
> +       list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
> +               if (!(pmu_ctx->nr_freq || unthrottle))
> +                       continue;
> +               if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT)
> +                       continue;
> +
> +               perf_pmu_disable(pmu_ctx->pmu);
> +               perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active);
> +               perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active);
> +               perf_pmu_enable(pmu_ctx->pmu);
>         }
>
>         raw_spin_unlock(&ctx->lock);
> --
> 2.43.0.rc1.413.gea7ed67945-goog
>
  
Mingwei Zhang Nov. 20, 2023, 11:23 p.m. UTC | #2
On Mon, Nov 20, 2023, Ian Rogers wrote:
> On Mon, Nov 20, 2023 at 2:19 PM Namhyung Kim <namhyung@kernel.org> wrote:
> >
> > It was unnecessarily disabling and enabling PMUs for each event.  It
> > should be done at PMU level.  Add pmu_ctx->nr_freq counter to check it
> > at each PMU.  As pmu context has separate active lists for pinned group
> > and flexible group, factor out a new function to do the job.
> >
> > Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT
> > even if it needs to unthrottle sampling events.
> >
> > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> 
> Series:
> Reviewed-by: Ian Rogers <irogers@google.com>
> 
> Thanks,
> Ian
> 

Can we have "Cc: stable@vger.kernel.org" for the whole series? This
series should have a great performance improvement for all VMs in which
perf sampling events without specifying period.

The key point behind is that disabling/enabling PMU in virtualized
environment is super heavyweight which can reaches up to 50% of the CPU
time, ie., When multiplxing is used in the VM, a vCPU on a pCPU can only
use 50% of the resource, the other half was entirely wasted in host PMU
code doing the enabling/disabling PMU.

Thanks.
-Mingwei

> > ---
> >  include/linux/perf_event.h |  1 +
> >  kernel/events/core.c       | 68 +++++++++++++++++++++++---------------
> >  2 files changed, 43 insertions(+), 26 deletions(-)
> >
> > diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> > index 0367d748fae0..3eb17dc89f5e 100644
> > --- a/include/linux/perf_event.h
> > +++ b/include/linux/perf_event.h
> > @@ -879,6 +879,7 @@ struct perf_event_pmu_context {
> >
> >         unsigned int                    nr_events;
> >         unsigned int                    nr_cgroups;
> > +       unsigned int                    nr_freq;
> >
> >         atomic_t                        refcount; /* event <-> epc */
> >         struct rcu_head                 rcu_head;
> > diff --git a/kernel/events/core.c b/kernel/events/core.c
> > index 3eb26c2c6e65..53e2ad73102d 100644
> > --- a/kernel/events/core.c
> > +++ b/kernel/events/core.c
> > @@ -2275,8 +2275,10 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
> >
> >         if (!is_software_event(event))
> >                 cpc->active_oncpu--;
> > -       if (event->attr.freq && event->attr.sample_freq)
> > +       if (event->attr.freq && event->attr.sample_freq) {
> >                 ctx->nr_freq--;
> > +               epc->nr_freq--;
> > +       }
> >         if (event->attr.exclusive || !cpc->active_oncpu)
> >                 cpc->exclusive = 0;
> >
> > @@ -2531,9 +2533,10 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
> >
> >         if (!is_software_event(event))
> >                 cpc->active_oncpu++;
> > -       if (event->attr.freq && event->attr.sample_freq)
> > +       if (event->attr.freq && event->attr.sample_freq) {
> >                 ctx->nr_freq++;
> > -
> > +               epc->nr_freq++;
> > +       }
> >         if (event->attr.exclusive)
> >                 cpc->exclusive = 1;
> >
> > @@ -4096,30 +4099,14 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
> >         }
> >  }
> >
> > -/*
> > - * combine freq adjustment with unthrottling to avoid two passes over the
> > - * events. At the same time, make sure, having freq events does not change
> > - * the rate of unthrottling as that would introduce bias.
> > - */
> > -static void
> > -perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> > +static void perf_adjust_freq_unthr_events(struct list_head *event_list)
> >  {
> >         struct perf_event *event;
> >         struct hw_perf_event *hwc;
> >         u64 now, period = TICK_NSEC;
> >         s64 delta;
> >
> > -       /*
> > -        * only need to iterate over all events iff:
> > -        * - context have events in frequency mode (needs freq adjust)
> > -        * - there are events to unthrottle on this cpu
> > -        */
> > -       if (!(ctx->nr_freq || unthrottle))
> > -               return;
> > -
> > -       raw_spin_lock(&ctx->lock);
> > -
> > -       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
> > +       list_for_each_entry(event, event_list, active_list) {
> >                 if (event->state != PERF_EVENT_STATE_ACTIVE)
> >                         continue;
> >
> > @@ -4127,8 +4114,6 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> >                 if (!event_filter_match(event))
> >                         continue;
> >
> > -               perf_pmu_disable(event->pmu);
> > -
> >                 hwc = &event->hw;
> >
> >                 if (hwc->interrupts == MAX_INTERRUPTS) {
> > @@ -4138,7 +4123,7 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> >                 }
> >
> >                 if (!event->attr.freq || !event->attr.sample_freq)
> > -                       goto next;
> > +                       continue;
> >
> >                 /*
> >                  * stop the event and update event->count
> > @@ -4160,8 +4145,39 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> >                         perf_adjust_period(event, period, delta, false);
> >
> >                 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
> > -       next:
> > -               perf_pmu_enable(event->pmu);
> > +       }
> > +}
> > +
> > +/*
> > + * combine freq adjustment with unthrottling to avoid two passes over the
> > + * events. At the same time, make sure, having freq events does not change
> > + * the rate of unthrottling as that would introduce bias.
> > + */
> > +static void
> > +perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> > +{
> > +       struct perf_event_pmu_context *pmu_ctx;
> > +
> > +       /*
> > +        * only need to iterate over all events iff:
> > +        * - context have events in frequency mode (needs freq adjust)
> > +        * - there are events to unthrottle on this cpu
> > +        */
> > +       if (!(ctx->nr_freq || unthrottle))
> > +               return;
> > +
> > +       raw_spin_lock(&ctx->lock);
> > +
> > +       list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
> > +               if (!(pmu_ctx->nr_freq || unthrottle))
> > +                       continue;
> > +               if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT)
> > +                       continue;
> > +
> > +               perf_pmu_disable(pmu_ctx->pmu);
> > +               perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active);
> > +               perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active);
> > +               perf_pmu_enable(pmu_ctx->pmu);
> >         }
> >
> >         raw_spin_unlock(&ctx->lock);
> > --
> > 2.43.0.rc1.413.gea7ed67945-goog
> >
  
Liang, Kan Nov. 21, 2023, 3:57 p.m. UTC | #3
On 2023-11-20 5:19 p.m., Namhyung Kim wrote:
> It was unnecessarily disabling and enabling PMUs for each event.  It
> should be done at PMU level.  Add pmu_ctx->nr_freq counter to check it
> at each PMU.  As pmu context has separate active lists for pinned group
> and flexible group, factor out a new function to do the job.
> 
> Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT
> even if it needs to unthrottle sampling events.
> 
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>

Reviewed-by: Kan Liang <kan.liang@linux.intel.com>

Thanks,
Kan

> ---
>  include/linux/perf_event.h |  1 +
>  kernel/events/core.c       | 68 +++++++++++++++++++++++---------------
>  2 files changed, 43 insertions(+), 26 deletions(-)
> 
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index 0367d748fae0..3eb17dc89f5e 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -879,6 +879,7 @@ struct perf_event_pmu_context {
>  
>  	unsigned int			nr_events;
>  	unsigned int			nr_cgroups;
> +	unsigned int			nr_freq;
>  
>  	atomic_t			refcount; /* event <-> epc */
>  	struct rcu_head			rcu_head;
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index 3eb26c2c6e65..53e2ad73102d 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -2275,8 +2275,10 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
>  
>  	if (!is_software_event(event))
>  		cpc->active_oncpu--;
> -	if (event->attr.freq && event->attr.sample_freq)
> +	if (event->attr.freq && event->attr.sample_freq) {
>  		ctx->nr_freq--;
> +		epc->nr_freq--;
> +	}
>  	if (event->attr.exclusive || !cpc->active_oncpu)
>  		cpc->exclusive = 0;
>  
> @@ -2531,9 +2533,10 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
>  
>  	if (!is_software_event(event))
>  		cpc->active_oncpu++;
> -	if (event->attr.freq && event->attr.sample_freq)
> +	if (event->attr.freq && event->attr.sample_freq) {
>  		ctx->nr_freq++;
> -
> +		epc->nr_freq++;
> +	}
>  	if (event->attr.exclusive)
>  		cpc->exclusive = 1;
>  
> @@ -4096,30 +4099,14 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
>  	}
>  }
>  
> -/*
> - * combine freq adjustment with unthrottling to avoid two passes over the
> - * events. At the same time, make sure, having freq events does not change
> - * the rate of unthrottling as that would introduce bias.
> - */
> -static void
> -perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> +static void perf_adjust_freq_unthr_events(struct list_head *event_list)
>  {
>  	struct perf_event *event;
>  	struct hw_perf_event *hwc;
>  	u64 now, period = TICK_NSEC;
>  	s64 delta;
>  
> -	/*
> -	 * only need to iterate over all events iff:
> -	 * - context have events in frequency mode (needs freq adjust)
> -	 * - there are events to unthrottle on this cpu
> -	 */
> -	if (!(ctx->nr_freq || unthrottle))
> -		return;
> -
> -	raw_spin_lock(&ctx->lock);
> -
> -	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
> +	list_for_each_entry(event, event_list, active_list) {
>  		if (event->state != PERF_EVENT_STATE_ACTIVE)
>  			continue;
>  
> @@ -4127,8 +4114,6 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>  		if (!event_filter_match(event))
>  			continue;
>  
> -		perf_pmu_disable(event->pmu);
> -
>  		hwc = &event->hw;
>  
>  		if (hwc->interrupts == MAX_INTERRUPTS) {
> @@ -4138,7 +4123,7 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>  		}
>  
>  		if (!event->attr.freq || !event->attr.sample_freq)
> -			goto next;
> +			continue;
>  
>  		/*
>  		 * stop the event and update event->count
> @@ -4160,8 +4145,39 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>  			perf_adjust_period(event, period, delta, false);
>  
>  		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
> -	next:
> -		perf_pmu_enable(event->pmu);
> +	}
> +}
> +
> +/*
> + * combine freq adjustment with unthrottling to avoid two passes over the
> + * events. At the same time, make sure, having freq events does not change
> + * the rate of unthrottling as that would introduce bias.
> + */
> +static void
> +perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> +{
> +	struct perf_event_pmu_context *pmu_ctx;
> +
> +	/*
> +	 * only need to iterate over all events iff:
> +	 * - context have events in frequency mode (needs freq adjust)
> +	 * - there are events to unthrottle on this cpu
> +	 */
> +	if (!(ctx->nr_freq || unthrottle))
> +		return;
> +
> +	raw_spin_lock(&ctx->lock);
> +
> +	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
> +		if (!(pmu_ctx->nr_freq || unthrottle))
> +			continue;
> +		if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT)
> +			continue;
> +
> +		perf_pmu_disable(pmu_ctx->pmu);
> +		perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active);
> +		perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active);
> +		perf_pmu_enable(pmu_ctx->pmu);
>  	}
>  
>  	raw_spin_unlock(&ctx->lock);
  
Namhyung Kim Nov. 21, 2023, 6:21 p.m. UTC | #4
Hi Mingwei,

On Mon, Nov 20, 2023 at 3:24 PM Mingwei Zhang <mizhang@google.com> wrote:
>
> On Mon, Nov 20, 2023, Ian Rogers wrote:
> > On Mon, Nov 20, 2023 at 2:19 PM Namhyung Kim <namhyung@kernel.org> wrote:
> > >
> > > It was unnecessarily disabling and enabling PMUs for each event.  It
> > > should be done at PMU level.  Add pmu_ctx->nr_freq counter to check it
> > > at each PMU.  As pmu context has separate active lists for pinned group
> > > and flexible group, factor out a new function to do the job.
> > >
> > > Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT
> > > even if it needs to unthrottle sampling events.
> > >
> > > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> >
> > Series:
> > Reviewed-by: Ian Rogers <irogers@google.com>
> >
> > Thanks,
> > Ian
> >
>
> Can we have "Cc: stable@vger.kernel.org" for the whole series? This
> series should have a great performance improvement for all VMs in which
> perf sampling events without specifying period.

I was not sure if it's ok to have this performance fix in the stable series.

Thanks,
Namhyung
  
Mingwei Zhang Nov. 21, 2023, 11:01 p.m. UTC | #5
On Tue, Nov 21, 2023, Namhyung Kim wrote:


Hi Namhyung,

> Hi Mingwei,
> 
> On Mon, Nov 20, 2023 at 3:24 PM Mingwei Zhang <mizhang@google.com> wrote:
> >
> > On Mon, Nov 20, 2023, Ian Rogers wrote:
> > > On Mon, Nov 20, 2023 at 2:19 PM Namhyung Kim <namhyung@kernel.org> wrote:
> > > >
> > > > It was unnecessarily disabling and enabling PMUs for each event.  It
> > > > should be done at PMU level.  Add pmu_ctx->nr_freq counter to check it
> > > > at each PMU.  As pmu context has separate active lists for pinned group
> > > > and flexible group, factor out a new function to do the job.
> > > >
> > > > Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT
> > > > even if it needs to unthrottle sampling events.
> > > >
> > > > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> > >
> > > Series:
> > > Reviewed-by: Ian Rogers <irogers@google.com>
> > >
> > > Thanks,
> > > Ian
> > >
> >
> > Can we have "Cc: stable@vger.kernel.org" for the whole series? This
> > series should have a great performance improvement for all VMs in which
> > perf sampling events without specifying period.
> 
> I was not sure if it's ok to have this performance fix in the stable series.
> 

Critical performance bug fix is ok to be added to stable tree, as the
requirements are mentioned here:

https://www.kernel.org/doc/Documentation/process/stable-kernel-rules.rst

In particular, this patch satisfies the 2nd sub-bullet of the forth
bullet.

But let me step back. Only this patch is needed with stable tag instead
of the whole series. This patch impact 69 lines of code. It satisfies
the rule of within 100 lines (bullet 3).

I will give a try and test it today or tomorrow and make sure we satisfy
bullet 2.

Once it gets in, bullet 1 will be satisfied as well.

Overall, the intention is to improve PMU performance in VM as early as
we can since we don't control the schedule of distro kernel upgrade and
we don't control when end customers upgrade their running kernel. So I
presume even adding to the stable tree may take years to see the result
change. But if we don't do it, it may take way longer (since it does not
contain a "Fixes" tag as well).

Thanks.
-Mingwei

> Thanks,
> Namhyung
  
Mingwei Zhang Nov. 24, 2023, 12:50 a.m. UTC | #6
On Tue, Nov 21, 2023, Mingwei Zhang wrote:
> On Tue, Nov 21, 2023, Namhyung Kim wrote:
> 
> 
> Hi Namhyung,
> 
> > Hi Mingwei,
> > 
> > On Mon, Nov 20, 2023 at 3:24 PM Mingwei Zhang <mizhang@google.com> wrote:
> > >
> > > On Mon, Nov 20, 2023, Ian Rogers wrote: > > > > On Mon, Nov 20, 2023 at 2:19 PM Namhyung Kim <namhyung@kernel.org> wrote:
> > > > >
> > > > > It was unnecessarily disabling and enabling PMUs for each event.  It
> > > > > should be done at PMU level.  Add pmu_ctx->nr_freq counter to check it
> > > > > at each PMU.  As pmu context has separate active lists for pinned group
> > > > > and flexible group, factor out a new function to do the job.
> > > > >
> > > > > Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT
> > > > > even if it needs to unthrottle sampling events.
> > > > >
> > > > > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> > > >
> > > > Series:
> > > > Reviewed-by: Ian Rogers <irogers@google.com>
> > > >
> > > > Thanks,
> > > > Ian
> > > >
> > >
> > > Can we have "Cc: stable@vger.kernel.org" for the whole series? This
> > > series should have a great performance improvement for all VMs in which
> > > perf sampling events without specifying period.
> > 
> > I was not sure if it's ok to have this performance fix in the stable series.
> > 
> 
> Critical performance bug fix is ok to be added to stable tree, as the
> requirements are mentioned here:
> 
> https://www.kernel.org/doc/Documentation/process/stable-kernel-rules.rst
> 
> In particular, this patch satisfies the 2nd sub-bullet of the forth
> bullet.
> 
> But let me step back. Only this patch is needed with stable tag instead
> of the whole series. This patch impact 69 lines of code. It satisfies
> the rule of within 100 lines (bullet 3).
> 
> I will give a try and test it today or tomorrow and make sure we satisfy
> bullet 2.
> 
> Once it gets in, bullet 1 will be satisfied as well.
> 
> Overall, the intention is to improve PMU performance in VM as early as
> we can since we don't control the schedule of distro kernel upgrade and
> we don't control when end customers upgrade their running kernel. So I
> presume even adding to the stable tree may take years to see the result
> change. But if we don't do it, it may take way longer (since it does not
> contain a "Fixes" tag as well).
> 
> Thanks.
> -Mingwei
> 

I have tested the code. Yes profiling results in the VM shows that it
removes perf_adjust_freq_unthr_contex() as the hot spot. However, when
running perf with sufficient events in frequency mode that triggers
multiplexing. The overall performance overhead still reaches 60% per
CPU (this overhead is invisible to vCPU).

At the host level, I have been monitoring the write MSRs and found that
the repeated writes to 0x38f disappeared, indicating that this patch is
indeed working. But on the other hand, I have noticed more frequent
overflows and PMIs.

The more frequent overflows is shown in the writes to MSR 0x390 and
reads to MSR 0x38e. I infer the more frequent PMIs from the much longer
execution of 'vmx_vmexit()' shown in flamegraph.

Because of the above observation, it seems to me that this patch no
longer satisfies the requirements of Ccing "stable@vger.kernel.org". I
will double check and follow up on this one.

Thanks.
-Mingwei
  
Mingwei Zhang Dec. 2, 2023, 6:16 a.m. UTC | #7
On Mon, Nov 20, 2023, Namhyung Kim wrote:
> It was unnecessarily disabling and enabling PMUs for each event.  It
> should be done at PMU level.  Add pmu_ctx->nr_freq counter to check it
> at each PMU.  As pmu context has separate active lists for pinned group
> and flexible group, factor out a new function to do the job.
> 
> Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT
> even if it needs to unthrottle sampling events.
> 
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Mingwei Zhang <mizhang@google.com>
> ---
>  include/linux/perf_event.h |  1 +
>  kernel/events/core.c       | 68 +++++++++++++++++++++++---------------
>  2 files changed, 43 insertions(+), 26 deletions(-)
> 
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index 0367d748fae0..3eb17dc89f5e 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -879,6 +879,7 @@ struct perf_event_pmu_context {
>  
>  	unsigned int			nr_events;
>  	unsigned int			nr_cgroups;
> +	unsigned int			nr_freq;
>  
>  	atomic_t			refcount; /* event <-> epc */
>  	struct rcu_head			rcu_head;
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index 3eb26c2c6e65..53e2ad73102d 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -2275,8 +2275,10 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
>  
>  	if (!is_software_event(event))
>  		cpc->active_oncpu--;
> -	if (event->attr.freq && event->attr.sample_freq)
> +	if (event->attr.freq && event->attr.sample_freq) {
>  		ctx->nr_freq--;
> +		epc->nr_freq--;
> +	}
>  	if (event->attr.exclusive || !cpc->active_oncpu)
>  		cpc->exclusive = 0;
>  
> @@ -2531,9 +2533,10 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
>  
>  	if (!is_software_event(event))
>  		cpc->active_oncpu++;
> -	if (event->attr.freq && event->attr.sample_freq)
> +	if (event->attr.freq && event->attr.sample_freq) {
>  		ctx->nr_freq++;
> -
> +		epc->nr_freq++;
> +	}
>  	if (event->attr.exclusive)
>  		cpc->exclusive = 1;
>  
> @@ -4096,30 +4099,14 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
>  	}
>  }
>  
> -/*
> - * combine freq adjustment with unthrottling to avoid two passes over the
> - * events. At the same time, make sure, having freq events does not change
> - * the rate of unthrottling as that would introduce bias.
> - */
> -static void
> -perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> +static void perf_adjust_freq_unthr_events(struct list_head *event_list)
>  {
>  	struct perf_event *event;
>  	struct hw_perf_event *hwc;
>  	u64 now, period = TICK_NSEC;
>  	s64 delta;
>  
> -	/*
> -	 * only need to iterate over all events iff:
> -	 * - context have events in frequency mode (needs freq adjust)
> -	 * - there are events to unthrottle on this cpu
> -	 */
> -	if (!(ctx->nr_freq || unthrottle))
> -		return;
> -
> -	raw_spin_lock(&ctx->lock);
> -
> -	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
> +	list_for_each_entry(event, event_list, active_list) {
>  		if (event->state != PERF_EVENT_STATE_ACTIVE)
>  			continue;
>  
> @@ -4127,8 +4114,6 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>  		if (!event_filter_match(event))
>  			continue;
>  
> -		perf_pmu_disable(event->pmu);
> -
>  		hwc = &event->hw;
>  
>  		if (hwc->interrupts == MAX_INTERRUPTS) {
> @@ -4138,7 +4123,7 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>  		}
>  
>  		if (!event->attr.freq || !event->attr.sample_freq)
> -			goto next;
> +			continue;
>  
>  		/*
>  		 * stop the event and update event->count
> @@ -4160,8 +4145,39 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
>  			perf_adjust_period(event, period, delta, false);
>  
>  		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
> -	next:
> -		perf_pmu_enable(event->pmu);
> +	}
> +}
> +
> +/*
> + * combine freq adjustment with unthrottling to avoid two passes over the
> + * events. At the same time, make sure, having freq events does not change
> + * the rate of unthrottling as that would introduce bias.
> + */
> +static void
> +perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
> +{
> +	struct perf_event_pmu_context *pmu_ctx;
> +
> +	/*
> +	 * only need to iterate over all events iff:
> +	 * - context have events in frequency mode (needs freq adjust)
> +	 * - there are events to unthrottle on this cpu
> +	 */
> +	if (!(ctx->nr_freq || unthrottle))
> +		return;
> +
> +	raw_spin_lock(&ctx->lock);
> +
> +	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
> +		if (!(pmu_ctx->nr_freq || unthrottle))
> +			continue;
> +		if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT)
> +			continue;
> +
> +		perf_pmu_disable(pmu_ctx->pmu);
> +		perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active);
> +		perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active);
> +		perf_pmu_enable(pmu_ctx->pmu);
>  	}
>  
>  	raw_spin_unlock(&ctx->lock);
> -- 
> 2.43.0.rc1.413.gea7ed67945-goog
>
  

Patch

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 0367d748fae0..3eb17dc89f5e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -879,6 +879,7 @@  struct perf_event_pmu_context {
 
 	unsigned int			nr_events;
 	unsigned int			nr_cgroups;
+	unsigned int			nr_freq;
 
 	atomic_t			refcount; /* event <-> epc */
 	struct rcu_head			rcu_head;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3eb26c2c6e65..53e2ad73102d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2275,8 +2275,10 @@  event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
 
 	if (!is_software_event(event))
 		cpc->active_oncpu--;
-	if (event->attr.freq && event->attr.sample_freq)
+	if (event->attr.freq && event->attr.sample_freq) {
 		ctx->nr_freq--;
+		epc->nr_freq--;
+	}
 	if (event->attr.exclusive || !cpc->active_oncpu)
 		cpc->exclusive = 0;
 
@@ -2531,9 +2533,10 @@  event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
 
 	if (!is_software_event(event))
 		cpc->active_oncpu++;
-	if (event->attr.freq && event->attr.sample_freq)
+	if (event->attr.freq && event->attr.sample_freq) {
 		ctx->nr_freq++;
-
+		epc->nr_freq++;
+	}
 	if (event->attr.exclusive)
 		cpc->exclusive = 1;
 
@@ -4096,30 +4099,14 @@  static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
 	}
 }
 
-/*
- * combine freq adjustment with unthrottling to avoid two passes over the
- * events. At the same time, make sure, having freq events does not change
- * the rate of unthrottling as that would introduce bias.
- */
-static void
-perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
+static void perf_adjust_freq_unthr_events(struct list_head *event_list)
 {
 	struct perf_event *event;
 	struct hw_perf_event *hwc;
 	u64 now, period = TICK_NSEC;
 	s64 delta;
 
-	/*
-	 * only need to iterate over all events iff:
-	 * - context have events in frequency mode (needs freq adjust)
-	 * - there are events to unthrottle on this cpu
-	 */
-	if (!(ctx->nr_freq || unthrottle))
-		return;
-
-	raw_spin_lock(&ctx->lock);
-
-	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+	list_for_each_entry(event, event_list, active_list) {
 		if (event->state != PERF_EVENT_STATE_ACTIVE)
 			continue;
 
@@ -4127,8 +4114,6 @@  perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
 		if (!event_filter_match(event))
 			continue;
 
-		perf_pmu_disable(event->pmu);
-
 		hwc = &event->hw;
 
 		if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -4138,7 +4123,7 @@  perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
 		}
 
 		if (!event->attr.freq || !event->attr.sample_freq)
-			goto next;
+			continue;
 
 		/*
 		 * stop the event and update event->count
@@ -4160,8 +4145,39 @@  perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
 			perf_adjust_period(event, period, delta, false);
 
 		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
-	next:
-		perf_pmu_enable(event->pmu);
+	}
+}
+
+/*
+ * combine freq adjustment with unthrottling to avoid two passes over the
+ * events. At the same time, make sure, having freq events does not change
+ * the rate of unthrottling as that would introduce bias.
+ */
+static void
+perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
+{
+	struct perf_event_pmu_context *pmu_ctx;
+
+	/*
+	 * only need to iterate over all events iff:
+	 * - context have events in frequency mode (needs freq adjust)
+	 * - there are events to unthrottle on this cpu
+	 */
+	if (!(ctx->nr_freq || unthrottle))
+		return;
+
+	raw_spin_lock(&ctx->lock);
+
+	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
+		if (!(pmu_ctx->nr_freq || unthrottle))
+			continue;
+		if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT)
+			continue;
+
+		perf_pmu_disable(pmu_ctx->pmu);
+		perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active);
+		perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active);
+		perf_pmu_enable(pmu_ctx->pmu);
 	}
 
 	raw_spin_unlock(&ctx->lock);