[v3] x86/resctrl: mba_MBps: Fall back to total b/w if local b/w unavailable

Message ID 20231026200214.16017-1-tony.luck@intel.com
State New
Headers
Series [v3] x86/resctrl: mba_MBps: Fall back to total b/w if local b/w unavailable |

Commit Message

Luck, Tony Oct. 26, 2023, 8:02 p.m. UTC
  On Intel the various resource director technology (RDT) features are all
orthogonal and independently enumerated. Thus it is possible to have
a system that  provides "total" memory bandwidth measurements without
providing "local" bandwidth measurements.

If local bandwidth measurement is not available, do not give up on
providing the "mba_MBps" feedback option completely, make the code fall
back to using total bandwidth.

Signed-off-by: Tony Luck <tony.luck@intel.com>
---
Change since v2:

Babu doesn't like the global variable. So here's a version without it.

Note that my preference is still the v2 version. But as I tell newbies
to Linux "Your job isn't to get YOUR patch upstream. You job is to get
the problem fixed.".  So taking my own advice I don't really mind
whether v2 or v3 is applied.

 arch/x86/kernel/cpu/resctrl/monitor.c  | 43 ++++++++++++++++++--------
 arch/x86/kernel/cpu/resctrl/rdtgroup.c |  2 +-
 2 files changed, 31 insertions(+), 14 deletions(-)
  

Comments

Moger, Babu Oct. 26, 2023, 10:40 p.m. UTC | #1
Hi Tony,

On 10/26/2023 3:02 PM, Tony Luck wrote:
> On Intel the various resource director technology (RDT) features are all
> orthogonal and independently enumerated. Thus it is possible to have
> a system that  provides "total" memory bandwidth measurements without
> providing "local" bandwidth measurements.
>
> If local bandwidth measurement is not available, do not give up on
> providing the "mba_MBps" feedback option completely, make the code fall
> back to using total bandwidth.
>
> Signed-off-by: Tony Luck <tony.luck@intel.com>
> ---
> Change since v2:
>
> Babu doesn't like the global variable. So here's a version without it.
>
> Note that my preference is still the v2 version. But as I tell newbies
> to Linux "Your job isn't to get YOUR patch upstream. You job is to get
> the problem fixed.".  So taking my own advice I don't really mind
> whether v2 or v3 is applied.
Hmm. I like v3 better. Few minor comments below.
>
>   arch/x86/kernel/cpu/resctrl/monitor.c  | 43 ++++++++++++++++++--------
>   arch/x86/kernel/cpu/resctrl/rdtgroup.c |  2 +-
>   2 files changed, 31 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
> index f136ac046851..29e86310677d 100644
> --- a/arch/x86/kernel/cpu/resctrl/monitor.c
> +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
> @@ -418,6 +418,20 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
>   	return 0;
>   }
>   
> +/*
> + * For legacy compatibility use the local memory bandwidth to drive
> + * the mba_MBps feedback control loop. But on platforms that do not
> + * provide the local event fall back to use the total bandwidth event
> + * instead.
> + */
> +static enum resctrl_event_id pick_mba_mbps_event(void)
> +{
> +	if (is_mbm_local_enabled())
> +		return QOS_L3_MBM_LOCAL_EVENT_ID;
> +
> +	return QOS_L3_MBM_TOTAL_EVENT_ID;
> +}
> +
>   /*
>    * mbm_bw_count() - Update bw count from values previously read by
>    *		    __mon_event_count().
> @@ -431,9 +445,11 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
>    */
>   static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
>   {
> -	struct mbm_state *m = &rr->d->mbm_local[rmid];
> +	enum resctrl_event_id mba_mbps_evt_id = pick_mba_mbps_event();

How about evt_id instead of mba_mbps_evt_id? It seems pretty mouthful 
for temp variable.

>   	u64 cur_bw, bytes, cur_bytes;
> +	struct mbm_state *m;
>   
> +	m = get_mbm_state(rr->d, rmid, mba_mbps_evt_id);
>   	cur_bytes = rr->val;
>   	bytes = cur_bytes - m->prev_bw_bytes;
>   	m->prev_bw_bytes = cur_bytes;
> @@ -518,6 +534,7 @@ void mon_event_count(void *info)
>    */
>   static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
>   {
> +	enum resctrl_event_id mba_mbps_evt_id = pick_mba_mbps_event();
same comment as above.
>   	u32 closid, rmid, cur_msr_val, new_msr_val;
>   	struct mbm_state *pmbm_data, *cmbm_data;
>   	u32 cur_bw, delta_bw, user_bw;
> @@ -526,14 +543,14 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
>   	struct list_head *head;
>   	struct rdtgroup *entry;
>   
> -	if (!is_mbm_local_enabled())
> +	if (!is_mbm_enabled())
>   		return;
>   
>   	r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
>   
>   	closid = rgrp->closid;
>   	rmid = rgrp->mon.rmid;
> -	pmbm_data = &dom_mbm->mbm_local[rmid];
> +	pmbm_data = get_mbm_state(dom_mbm, rmid, mba_mbps_evt_id);
>   
>   	dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
>   	if (!dom_mba) {
> @@ -553,7 +570,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
>   	 */
>   	head = &rgrp->mon.crdtgrp_list;
>   	list_for_each_entry(entry, head, mon.crdtgrp_list) {
> -		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
> +		cmbm_data = get_mbm_state(dom_mbm, entry->mon.rmid, mba_mbps_evt_id);
>   		cur_bw += cmbm_data->prev_bw;
>   		delta_bw += cmbm_data->delta_bw;
>   	}
> @@ -595,7 +612,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
>   	 */
>   	pmbm_data->delta_comp = true;
>   	list_for_each_entry(entry, head, mon.crdtgrp_list) {
> -		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
> +		cmbm_data = get_mbm_state(dom_mbm, entry->mon.rmid, mba_mbps_evt_id);
>   		cmbm_data->delta_comp = true;
>   	}
>   }
> @@ -621,15 +638,15 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
>   		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
>   		rr.val = 0;
>   		__mon_event_count(rmid, &rr);
> -
> -		/*
> -		 * Call the MBA software controller only for the
> -		 * control groups and when user has enabled
> -		 * the software controller explicitly.
> -		 */
> -		if (is_mba_sc(NULL))
> -			mbm_bw_count(rmid, &rr);
>   	}
> +
> +	/*
> +	 * Call the MBA software controller only for the
> +	 * control groups and when user has enabled
> +	 * the software controller explicitly.
> +	 */
> +	if (is_mba_sc(NULL))
> +		mbm_bw_count(rmid, &rr);
>   }
>   
>   /*
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 69a1de92384a..0c4f8a1b8df0 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -2294,7 +2294,7 @@ static bool supports_mba_mbps(void)
>   {
>   	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
>   
> -	return (is_mbm_local_enabled() &&
> +	return (is_mbm_enabled() &&
>   		r->alloc_capable && is_mba_linear());
>   }
>   

Otherwise looks good to me.

Thanks

Babu
  
Luck, Tony Oct. 26, 2023, 10:59 p.m. UTC | #2
>>   static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
>>   {
>> -	struct mbm_state *m = &rr->d->mbm_local[rmid];
>> +	enum resctrl_event_id mba_mbps_evt_id = pick_mba_mbps_event();
>
> How about evt_id instead of mba_mbps_evt_id? It seems pretty mouthful 
> for temp variable.

Sure. The long name made sense as a globa. But as you say, a temp
variable doesn't need to be so verbose. Context from being initialized
by the pick_mba_mbps_event() function also helps reader to see what
it is used for.

I'll make that change if others vote for the v3 approach over v2.

-Tony
  
Reinette Chatre Nov. 3, 2023, 9:43 p.m. UTC | #3
Hi Tony,

On 10/26/2023 1:02 PM, Tony Luck wrote:
> On Intel the various resource director technology (RDT) features are all
> orthogonal and independently enumerated. Thus it is possible to have
> a system that  provides "total" memory bandwidth measurements without
> providing "local" bandwidth measurements.

This motivation is written in support of Intel systems but from what I
can tell the changes impact Intel as well as AMD.

> 
> If local bandwidth measurement is not available, do not give up on
> providing the "mba_MBps" feedback option completely, make the code fall
> back to using total bandwidth.

It is interesting to me that the "fall back" is essentially a drop-in
replacement without any adjustments to the data/algorithm.

Can these measurements be considered equivalent? Could a user now perhaps
want to experiment by disabling local bandwidth measurement to explore if
system behaves differently when using total memory bandwidth? What
would have a user choose one over the other (apart from when user
is forced by system ability)?

> 
> Signed-off-by: Tony Luck <tony.luck@intel.com>
> ---
> Change since v2:
> 
> Babu doesn't like the global variable. So here's a version without it.
> 
> Note that my preference is still the v2 version. But as I tell newbies
> to Linux "Your job isn't to get YOUR patch upstream. You job is to get
> the problem fixed.".  So taking my own advice I don't really mind
> whether v2 or v3 is applied.
> 
>  arch/x86/kernel/cpu/resctrl/monitor.c  | 43 ++++++++++++++++++--------
>  arch/x86/kernel/cpu/resctrl/rdtgroup.c |  2 +-
>  2 files changed, 31 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
> index f136ac046851..29e86310677d 100644
> --- a/arch/x86/kernel/cpu/resctrl/monitor.c
> +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
> @@ -418,6 +418,20 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
>  	return 0;
>  }
>  
> +/*
> + * For legacy compatibility use the local memory bandwidth to drive
> + * the mba_MBps feedback control loop. But on platforms that do not
> + * provide the local event fall back to use the total bandwidth event
> + * instead.
> + */
> +static enum resctrl_event_id pick_mba_mbps_event(void)
> +{
> +	if (is_mbm_local_enabled())
> +		return QOS_L3_MBM_LOCAL_EVENT_ID;
> +
> +	return QOS_L3_MBM_TOTAL_EVENT_ID;
> +}

Can there be a WARN here to catch the unlikely event that
!is_mbm_total_enabled()?
This may mean the caller (in update_mba_bw()) needs to move
to code protected by is_mbm_enabled().

One option to consider is to have a single "get_mba_mbps_state()"
call (similar to V1) that determines the eventid as above and
then calls get_mbm_state() to return a pointer to mbm_state in one
call. Starting to seem like nitpicking but I'd thought I'd mention it
since it seemed a way to have V1 solution with request to use
get_mbm_state() addressed.

> +
>  /*
>   * mbm_bw_count() - Update bw count from values previously read by
>   *		    __mon_event_count().
> @@ -431,9 +445,11 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
>   */
>  static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
>  {
> -	struct mbm_state *m = &rr->d->mbm_local[rmid];
> +	enum resctrl_event_id mba_mbps_evt_id = pick_mba_mbps_event();
>  	u64 cur_bw, bytes, cur_bytes;
> +	struct mbm_state *m;
>  
> +	m = get_mbm_state(rr->d, rmid, mba_mbps_evt_id);
>  	cur_bytes = rr->val;
>  	bytes = cur_bytes - m->prev_bw_bytes;
>  	m->prev_bw_bytes = cur_bytes;

It should not be necessary to pick the event id again. It is available
within the struct rmid_read parameter. 

Reinette
  
Reinette Chatre Nov. 3, 2023, 9:50 p.m. UTC | #4
On 11/3/2023 2:43 PM, Reinette Chatre wrote:
> On 10/26/2023 1:02 PM, Tony Luck wrote:
>> On Intel the various resource director technology (RDT) features are all
>> orthogonal and independently enumerated. Thus it is possible to have
>> a system that  provides "total" memory bandwidth measurements without
>> providing "local" bandwidth measurements.
> 
> This motivation is written in support of Intel systems but from what I
> can tell the changes impact Intel as well as AMD.

No wait. Sorry. AMD does not enable delay_linear.

Reinette
  
Luck, Tony Nov. 7, 2023, 9:15 p.m. UTC | #5
On Fri, Nov 03, 2023 at 02:43:15PM -0700, Reinette Chatre wrote:
> Hi Tony,
> 
> On 10/26/2023 1:02 PM, Tony Luck wrote:
> > On Intel the various resource director technology (RDT) features are all
> > orthogonal and independently enumerated. Thus it is possible to have
> > a system that  provides "total" memory bandwidth measurements without
> > providing "local" bandwidth measurements.
> 
> This motivation is written in support of Intel systems but from what I
> can tell the changes impact Intel as well as AMD.

If AMD were to build a system that did this, same fixes would be needed.

> > 
> > If local bandwidth measurement is not available, do not give up on
> > providing the "mba_MBps" feedback option completely, make the code fall
> > back to using total bandwidth.
> 
> It is interesting to me that the "fall back" is essentially a drop-in
> replacement without any adjustments to the data/algorithm.

The algorithm is, by necessity, very simple. Essentially "if measured
bandwidth is above desired target, apply one step extra throttling.
Reverse when bandwidth is below desired level." I'm not sure what tweaks
are possible.

> Can these measurements be considered equivalent? Could a user now perhaps
> want to experiment by disabling local bandwidth measurement to explore if
> system behaves differently when using total memory bandwidth? What
> would have a user choose one over the other (apart from when user
> is forced by system ability)?

This may be interesting. I dug around in the e-mail archives to see if
there was any discussion on why "local" was picked as the feedback
measurement rather that "total". But I couldn't find anything.

Thinking about it now, "total" feels like a better choice. Why would
you not care about off-package memory bandwidth? In pathological cases
all the memory traffic might be going off package, but the existing
mba_MBps algorithm would *reduce* the amount of throttling, eventually
to zero.

Maybe additional an mount option "mba_MBps_total" so the user can pick
total instead of local?

> > 
> > Signed-off-by: Tony Luck <tony.luck@intel.com>
> > ---
> > Change since v2:
> > 
> > Babu doesn't like the global variable. So here's a version without it.
> > 
> > Note that my preference is still the v2 version. But as I tell newbies
> > to Linux "Your job isn't to get YOUR patch upstream. You job is to get
> > the problem fixed.".  So taking my own advice I don't really mind
> > whether v2 or v3 is applied.
> > 
> >  arch/x86/kernel/cpu/resctrl/monitor.c  | 43 ++++++++++++++++++--------
> >  arch/x86/kernel/cpu/resctrl/rdtgroup.c |  2 +-
> >  2 files changed, 31 insertions(+), 14 deletions(-)
> > 
> > diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
> > index f136ac046851..29e86310677d 100644
> > --- a/arch/x86/kernel/cpu/resctrl/monitor.c
> > +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
> > @@ -418,6 +418,20 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
> >  	return 0;
> >  }
> >  
> > +/*
> > + * For legacy compatibility use the local memory bandwidth to drive
> > + * the mba_MBps feedback control loop. But on platforms that do not
> > + * provide the local event fall back to use the total bandwidth event
> > + * instead.
> > + */
> > +static enum resctrl_event_id pick_mba_mbps_event(void)
> > +{
> > +	if (is_mbm_local_enabled())
> > +		return QOS_L3_MBM_LOCAL_EVENT_ID;
> > +
> > +	return QOS_L3_MBM_TOTAL_EVENT_ID;
> > +}
> 
> Can there be a WARN here to catch the unlikely event that
> !is_mbm_total_enabled()?
> This may mean the caller (in update_mba_bw()) needs to move
> to code protected by is_mbm_enabled().

All this code is under the protection of the check at mount time
done by supports_mba_mbps()

static bool supports_mba_mbps(void)
{
        struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;

        return (is_mbm_enabled() &&
                r->alloc_capable && is_mba_linear());
}

Adding even more run-time checks seems overkill.

> One option to consider is to have a single "get_mba_mbps_state()"
> call (similar to V1) that determines the eventid as above and
> then calls get_mbm_state() to return a pointer to mbm_state in one
> call. Starting to seem like nitpicking but I'd thought I'd mention it
> since it seemed a way to have V1 solution with request to use
> get_mbm_state() addressed.

It doesn't sound any better than the V3 approach.

> > +
> >  /*
> >   * mbm_bw_count() - Update bw count from values previously read by
> >   *		    __mon_event_count().
> > @@ -431,9 +445,11 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
> >   */
> >  static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
> >  {
> > -	struct mbm_state *m = &rr->d->mbm_local[rmid];
> > +	enum resctrl_event_id mba_mbps_evt_id = pick_mba_mbps_event();
> >  	u64 cur_bw, bytes, cur_bytes;
> > +	struct mbm_state *m;
> >  
> > +	m = get_mbm_state(rr->d, rmid, mba_mbps_evt_id);
> >  	cur_bytes = rr->val;
> >  	bytes = cur_bytes - m->prev_bw_bytes;
> >  	m->prev_bw_bytes = cur_bytes;
> 
> It should not be necessary to pick the event id again. It is available
> within the struct rmid_read parameter. 

So it is. I can drop the extra pick_mba_mbps_event() call here.

-Tony
  
Reinette Chatre Nov. 8, 2023, 9:49 p.m. UTC | #6
Hi Tony,

On 11/7/2023 1:15 PM, Tony Luck wrote:
> On Fri, Nov 03, 2023 at 02:43:15PM -0700, Reinette Chatre wrote:
>> On 10/26/2023 1:02 PM, Tony Luck wrote:
>>> If local bandwidth measurement is not available, do not give up on
>>> providing the "mba_MBps" feedback option completely, make the code fall
>>> back to using total bandwidth.
>>
>> It is interesting to me that the "fall back" is essentially a drop-in
>> replacement without any adjustments to the data/algorithm.
> 
> The algorithm is, by necessity, very simple. Essentially "if measured
> bandwidth is above desired target, apply one step extra throttling.
> Reverse when bandwidth is below desired level." I'm not sure what tweaks
> are possible.
> 
>> Can these measurements be considered equivalent? Could a user now perhaps
>> want to experiment by disabling local bandwidth measurement to explore if
>> system behaves differently when using total memory bandwidth? What
>> would have a user choose one over the other (apart from when user
>> is forced by system ability)?
> 
> This may be interesting. I dug around in the e-mail archives to see if
> there was any discussion on why "local" was picked as the feedback
> measurement rather that "total". But I couldn't find anything.
> 
> Thinking about it now, "total" feels like a better choice. Why would
> you not care about off-package memory bandwidth? In pathological cases
> all the memory traffic might be going off package, but the existing
> mba_MBps algorithm would *reduce* the amount of throttling, eventually
> to zero.
> 
> Maybe additional an mount option "mba_MBps_total" so the user can pick
> total instead of local?

Is this something for which a remount is required? Can it not perhaps be
changed at runtime?

> 
>>>
>>> Signed-off-by: Tony Luck <tony.luck@intel.com>
>>> ---
>>> Change since v2:
>>>
>>> Babu doesn't like the global variable. So here's a version without it.
>>>
>>> Note that my preference is still the v2 version. But as I tell newbies
>>> to Linux "Your job isn't to get YOUR patch upstream. You job is to get
>>> the problem fixed.".  So taking my own advice I don't really mind
>>> whether v2 or v3 is applied.
>>>
>>>  arch/x86/kernel/cpu/resctrl/monitor.c  | 43 ++++++++++++++++++--------
>>>  arch/x86/kernel/cpu/resctrl/rdtgroup.c |  2 +-
>>>  2 files changed, 31 insertions(+), 14 deletions(-)
>>>
>>> diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
>>> index f136ac046851..29e86310677d 100644
>>> --- a/arch/x86/kernel/cpu/resctrl/monitor.c
>>> +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
>>> @@ -418,6 +418,20 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
>>>  	return 0;
>>>  }
>>>  
>>> +/*
>>> + * For legacy compatibility use the local memory bandwidth to drive
>>> + * the mba_MBps feedback control loop. But on platforms that do not
>>> + * provide the local event fall back to use the total bandwidth event
>>> + * instead.
>>> + */
>>> +static enum resctrl_event_id pick_mba_mbps_event(void)
>>> +{
>>> +	if (is_mbm_local_enabled())
>>> +		return QOS_L3_MBM_LOCAL_EVENT_ID;
>>> +
>>> +	return QOS_L3_MBM_TOTAL_EVENT_ID;
>>> +}
>>
>> Can there be a WARN here to catch the unlikely event that
>> !is_mbm_total_enabled()?
>> This may mean the caller (in update_mba_bw()) needs to move
>> to code protected by is_mbm_enabled().
> 
> All this code is under the protection of the check at mount time
> done by supports_mba_mbps()
> 
> static bool supports_mba_mbps(void)
> {
>         struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
> 
>         return (is_mbm_enabled() &&
>                 r->alloc_capable && is_mba_linear());
> }
> 
> Adding even more run-time checks seems overkill.

Refactoring the code into a function but then implicitly assume and
require that the function be called in specific flows on systems with
particular environment does not sound appealing to me.

Another alternative, since only one caller of this function remains,
is to remove this function and instead open code it within update_mba_bw(),
replacing the is_mbm_enabled() call.

Reinette
  
Luck, Tony Nov. 9, 2023, 9:27 p.m. UTC | #7
>> Maybe additional an mount option "mba_MBps_total" so the user can pick
>> total instead of local?
>
> Is this something for which a remount is required? Can it not perhaps be
> changed at runtime?

In theory, yes. But I've been playing with a patch that adds a writable info/
file to allow runtime switch:

# ls -l /sys/fs/resctrl/info/MB/mba_MBps_control
-rw-r--r--. 1 root root 0 Nov  9 10:57 /sys/fs/resctrl/info/MB/mba_MBps_control
]# cat /sys/fs/resctrl/info/MB/mba_MBps_control
total

and found that it's a bit tricky to switch out the MBM event from the
state machine driving the feedback loop. I think the problem is in the
code that tries to stop the control loop from switching between two
throttling levels every second:

        if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
                new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
        } else if (cur_msr_val < MAX_MBA_BW &&
                   (user_bw > (cur_bw + delta_bw))) {
                new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
        } else {
                return;
        }

The code drops down one percentage step if current bandwidth is above
the desired target. But stepping back up checks to see if "cur_bw + delta_bw"
is below the target.

Where does "delta_bw" come from? Code uses the Boolean flag "pmbm_data->delta_comp"
to request the once-per-second polling compute the change in bandwidth on the
next poll after adjusting throttling MSRs.

All of these values are in the "struct mbm_state" which is a per-event-id structure.

Picking an event at boot time works fine. Likely also fine at mount time. But
switching at run-time seems to frequently end up with a very large value in
"delta_bw" (as it compares current & previous for this event ... and it looks
like things changed from zero).  Net effect is that throttling is increased when
processes go over their target for the resctrl group, but throttling is never decreased.

The whole heuristic seems a bit fragile. It works well for test processes that have
constant memory bandwidth. But I could see it failing in scenarios like this:

1) Process is over MB limit
2) Linux increases throttling, and sets flag to compute delta_bw on next poll
3) Process blocks on some event and uses no bandwidth in next one second
4) Next poll. Linux computes delta_bw as abs(cur_bw - m->prev_bw). cur_bw is zero,
    so delta_bw is set to full value of bandwidth that process used when over budget
5) Process resumes running
6) Linux sees process using less than target, but cur_bw + delta_bw is above target,
   so Linux doesn't adjust throttling

I think the goal was to avoid relaxing throttling and letting a resctrl group go back over
target bandwidth. But that doesn't work either for groups with highly variable bandwidth
requirements.

1) Group is over budget
2) Linux increases throttling, and sets flag to compute delta_bw on next poll
3) Group forks additional processes. New bandwidth from those offsets the reduction due to throttling
4) Next poll. Linux sees bandwidth is unchanged. Sets delta_bw = 0.
5) Next poll. Groups aggregate bandwidth is fractionally below target. Because delta_bw=0, Linux
   reduces throttling.
6) Group goes over target.

-Tony
  
Reinette Chatre Nov. 15, 2023, 4:09 p.m. UTC | #8
Hi Tony,

On 11/9/2023 1:27 PM, Luck, Tony wrote:
>>> Maybe additional an mount option "mba_MBps_total" so the user can pick
>>> total instead of local?
>>
>> Is this something for which a remount is required? Can it not perhaps be
>> changed at runtime?
> 
> In theory, yes. But I've been playing with a patch that adds a writable info/
> file to allow runtime switch:
> 
> # ls -l /sys/fs/resctrl/info/MB/mba_MBps_control
> -rw-r--r--. 1 root root 0 Nov  9 10:57 /sys/fs/resctrl/info/MB/mba_MBps_control
> ]# cat /sys/fs/resctrl/info/MB/mba_MBps_control
> total
> 
> and found that it's a bit tricky to switch out the MBM event from the
> state machine driving the feedback loop. I think the problem is in the
> code that tries to stop the control loop from switching between two
> throttling levels every second:
> 
>         if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
>                 new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
>         } else if (cur_msr_val < MAX_MBA_BW &&
>                    (user_bw > (cur_bw + delta_bw))) {
>                 new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
>         } else {
>                 return;
>         }
> 
> The code drops down one percentage step if current bandwidth is above
> the desired target. But stepping back up checks to see if "cur_bw + delta_bw"
> is below the target.
> 
> Where does "delta_bw" come from? Code uses the Boolean flag "pmbm_data->delta_comp"
> to request the once-per-second polling compute the change in bandwidth on the
> next poll after adjusting throttling MSRs.
> 
> All of these values are in the "struct mbm_state" which is a per-event-id structure.
> 
> Picking an event at boot time works fine. Likely also fine at mount time. But
> switching at run-time seems to frequently end up with a very large value in
> "delta_bw" (as it compares current & previous for this event ... and it looks
> like things changed from zero).  Net effect is that throttling is increased when
> processes go over their target for the resctrl group, but throttling is never decreased.

This is not clear to me. Would the state not also start from zero at boot and mount
time? From what I understand the state is also reset to zero on monitor group creation.

> 
> The whole heuristic seems a bit fragile. It works well for test processes that have
> constant memory bandwidth. But I could see it failing in scenarios like this:
> 
> 1) Process is over MB limit
> 2) Linux increases throttling, and sets flag to compute delta_bw on next poll
> 3) Process blocks on some event and uses no bandwidth in next one second
> 4) Next poll. Linux computes delta_bw as abs(cur_bw - m->prev_bw). cur_bw is zero,
>     so delta_bw is set to full value of bandwidth that process used when over budget
> 5) Process resumes running
> 6) Linux sees process using less than target, but cur_bw + delta_bw is above target,
>    so Linux doesn't adjust throttling
> 
> I think the goal was to avoid relaxing throttling and letting a resctrl group go back over
> target bandwidth. But that doesn't work either for groups with highly variable bandwidth
> requirements.
> 
> 1) Group is over budget
> 2) Linux increases throttling, and sets flag to compute delta_bw on next poll
> 3) Group forks additional processes. New bandwidth from those offsets the reduction due to throttling
> 4) Next poll. Linux sees bandwidth is unchanged. Sets delta_bw = 0.
> 5) Next poll. Groups aggregate bandwidth is fractionally below target. Because delta_bw=0, Linux
>    reduces throttling.
> 6) Group goes over target.
> 

I'll defer to you for the history about this algorithm. I am not familiar with how
broadly this feature is used but I have not heard about issues with it. It does
seem as though there is some opportunity for investigation here.

Reinette
  
Luck, Tony Nov. 15, 2023, 9:54 p.m. UTC | #9
On Wed, Nov 15, 2023 at 08:09:13AM -0800, Reinette Chatre wrote:
> Hi Tony,
> 
> On 11/9/2023 1:27 PM, Luck, Tony wrote:
> >>> Maybe additional an mount option "mba_MBps_total" so the user can pick
> >>> total instead of local?
> >>
> >> Is this something for which a remount is required? Can it not perhaps be
> >> changed at runtime?
> > 
> > In theory, yes. But I've been playing with a patch that adds a writable info/
> > file to allow runtime switch:
> > 
> > # ls -l /sys/fs/resctrl/info/MB/mba_MBps_control
> > -rw-r--r--. 1 root root 0 Nov  9 10:57 /sys/fs/resctrl/info/MB/mba_MBps_control
> > ]# cat /sys/fs/resctrl/info/MB/mba_MBps_control
> > total
> > 
> > and found that it's a bit tricky to switch out the MBM event from the
> > state machine driving the feedback loop. I think the problem is in the
> > code that tries to stop the control loop from switching between two
> > throttling levels every second:
> > 
> >         if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
> >                 new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
> >         } else if (cur_msr_val < MAX_MBA_BW &&
> >                    (user_bw > (cur_bw + delta_bw))) {
> >                 new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
> >         } else {
> >                 return;
> >         }
> > 
> > The code drops down one percentage step if current bandwidth is above
> > the desired target. But stepping back up checks to see if "cur_bw + delta_bw"
> > is below the target.
> > 
> > Where does "delta_bw" come from? Code uses the Boolean flag "pmbm_data->delta_comp"
> > to request the once-per-second polling compute the change in bandwidth on the
> > next poll after adjusting throttling MSRs.
> > 
> > All of these values are in the "struct mbm_state" which is a per-event-id structure.
> > 
> > Picking an event at boot time works fine. Likely also fine at mount time. But
> > switching at run-time seems to frequently end up with a very large value in
> > "delta_bw" (as it compares current & previous for this event ... and it looks
> > like things changed from zero).  Net effect is that throttling is increased when
> > processes go over their target for the resctrl group, but throttling is never decreased.
> 
> This is not clear to me. Would the state not also start from zero at boot and mount
> time? From what I understand the state is also reset to zero on monitor group creation.

Yes. All of boot, mount, mkdir start a group in a well defined state
with no throttling applied (schemata shows bandwitdh limit as 2^32
MBytes/sec). If the user sets some realistic limit, and the group
MBM measurement exceeds that limit, then the MBA MSR for the group
is dropped from 100% to 90% and the delta_comp flag set to record
the delta_bw on the next 1-second poll.

The value of delta_bw is only used when looking to reduce throttling.
To be in that state this group must have been in a state where
throttling was increased ... which would result in delta_bw being
set up.

Now look at what happens when switching from local to total for the
first time. delta_bw is zero in the structures recording total bandwidth
information. But more importanly so is prev_bw. If the code above
changes throttling value and requests an updated calulation of delta_bw,
that will be done using a value of prev_bw==0. I.e. delta_bw will be
set to the current bandwidth. That high value will likely block attempts
to reduce throttling.

Maybe when switching MBM source events the prev_bw value should be
copied from old source structures to new source structures as a rough
guide to avoid crazy actions. But that could also be wrong when
switching from total to local for a group that has poor NUMA
localization and total bandwidth is far higher than local.

> > The whole heuristic seems a bit fragile. It works well for test processes that have
> > constant memory bandwidth. But I could see it failing in scenarios like this:
> > 
> > 1) Process is over MB limit
> > 2) Linux increases throttling, and sets flag to compute delta_bw on next poll
> > 3) Process blocks on some event and uses no bandwidth in next one second
> > 4) Next poll. Linux computes delta_bw as abs(cur_bw - m->prev_bw). cur_bw is zero,
> >     so delta_bw is set to full value of bandwidth that process used when over budget
> > 5) Process resumes running
> > 6) Linux sees process using less than target, but cur_bw + delta_bw is above target,
> >    so Linux doesn't adjust throttling
> > 
> > I think the goal was to avoid relaxing throttling and letting a resctrl group go back over
> > target bandwidth. But that doesn't work either for groups with highly variable bandwidth
> > requirements.
> > 
> > 1) Group is over budget
> > 2) Linux increases throttling, and sets flag to compute delta_bw on next poll
> > 3) Group forks additional processes. New bandwidth from those offsets the reduction due to throttling
> > 4) Next poll. Linux sees bandwidth is unchanged. Sets delta_bw = 0.
> > 5) Next poll. Groups aggregate bandwidth is fractionally below target. Because delta_bw=0, Linux
> >    reduces throttling.
> > 6) Group goes over target.
> > 
> 
> I'll defer to you for the history about this algorithm. I am not familiar with how
> broadly this feature is used but I have not heard about issues with it. It does
> seem as though there is some opportunity for investigation here.

I sure I could construct an artificial test case to force this scenario.
But maybe:
1) It never happens in real life
2) It happens, but nobody noticed
3) People figured out the workaround (set schemata to a really big
   MBytes/sec value for a second, and then back to desired value).
4) Few people use this option

I dug again into the lore.kernel.org archives. Thomas complained
that is wasn't "calibration" (as Vikas had descibed in in V1) but
seems to have otherwise been OK with it as a heuristic.

https://lore.kernel.org/all/alpine.DEB.2.21.1804041037090.2056@nanos.tec.linutronix.de/


I coded up and tested the below patch as a possible replacement heuristic.
But I also wonder whether just letting the feedback loop flip throttling
up and down between throttling values above/below the target bandwidth
would really be so bad. It's just one MSR write that can be done from
the current CPU and would result in average bandwidth closer to the
user requested target.

-Tony


From 2d6d08c8ebeb62830b6d8dea36f5a8d5a53b75eb Mon Sep 17 00:00:00 2001
From: Tony Luck <tony.luck@intel.com>
Date: Mon, 13 Nov 2023 13:29:29 -0800
Subject: [PATCH] x86/resctrl: Implement new MBA_mbps throttling heuristic

The MBA_mbps feedback loop increases throttling when a group is using
more bandwidth than the target set by the user in the schemata file, and
decreases throttling when below target.

To avoid possibly stepping throttling up and down on every poll a
flag "delta_comp" is set whenever throttling is changed to indicate
that the actual change in bandwidth should be recorded on the next
poll in "delta_bw". Throttling is only reduced if the current bandwidth
plus delta_bw is below the user target.

This algorithm works well if the workload has steady bandwidth needs.
But it can go badly wrong if the workload moves to a different phase
just as the throttling level changed. E.g. if the workload becomes
essentially idle right as throttling level is increased, the value
calculated for delta_bw will be more or less the old bandwidth level.
If the workload then resumes, Linux may never reduce throttling because
current bandwidth plu delta_bw is above the target set by the user.

Implement a simpler heuristic by assuming that in the worst case the
currently measured bandwidth is being controlled by the current level of
throttling. Compute how much it may increase if throttling is relaxed to
the next higher level. If that is still below the user target, then it
is ok to reduce the amount of throttling.

Signed-off-by: Tony Luck <tony.luck@intel.com>
---
 arch/x86/kernel/cpu/resctrl/internal.h |  4 ---
 arch/x86/kernel/cpu/resctrl/monitor.c  | 42 ++++++--------------------
 2 files changed, 10 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index a4f1aa15f0a2..71bbd2245cc7 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -296,14 +296,10 @@ struct rftype {
  * struct mbm_state - status for each MBM counter in each domain
  * @prev_bw_bytes: Previous bytes value read for bandwidth calculation
  * @prev_bw:	The most recent bandwidth in MBps
- * @delta_bw:	Difference between the current and previous bandwidth
- * @delta_comp:	Indicates whether to compute the delta_bw
  */
 struct mbm_state {
 	u64	prev_bw_bytes;
 	u32	prev_bw;
-	u32	delta_bw;
-	bool	delta_comp;
 };
 
 /**
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index f136ac046851..1961823b555b 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -440,9 +440,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
 
 	cur_bw = bytes / SZ_1M;
 
-	if (m->delta_comp)
-		m->delta_bw = abs(cur_bw - m->prev_bw);
-	m->delta_comp = false;
 	m->prev_bw = cur_bw;
 }
 
@@ -520,7 +517,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 {
 	u32 closid, rmid, cur_msr_val, new_msr_val;
 	struct mbm_state *pmbm_data, *cmbm_data;
-	u32 cur_bw, delta_bw, user_bw;
+	u32 cur_bw, user_bw;
 	struct rdt_resource *r_mba;
 	struct rdt_domain *dom_mba;
 	struct list_head *head;
@@ -543,7 +540,6 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 
 	cur_bw = pmbm_data->prev_bw;
 	user_bw = dom_mba->mbps_val[closid];
-	delta_bw = pmbm_data->delta_bw;
 
 	/* MBA resource doesn't support CDP */
 	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
@@ -555,49 +551,31 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
 		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
 		cur_bw += cmbm_data->prev_bw;
-		delta_bw += cmbm_data->delta_bw;
 	}
 
 	/*
 	 * Scale up/down the bandwidth linearly for the ctrl group.  The
 	 * bandwidth step is the bandwidth granularity specified by the
 	 * hardware.
-	 *
-	 * The delta_bw is used when increasing the bandwidth so that we
-	 * dont alternately increase and decrease the control values
-	 * continuously.
-	 *
-	 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
-	 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
-	 * switching between 90 and 110 continuously if we only check
-	 * cur_bw < user_bw.
+	 * Always increase throttling if current bandwidth is above the
+	 * target set by user.
+	 * But avoid thrashing up and down on every poll by checking
+	 * whether a decrease in throttling is likely to push the group
+	 * back over target. E.g. if currently throttling to 30% of bandwidth
+	 * on a system with 10% granularity steps, check whether moving to
+	 * 40% would go past the limit by multiplying current bandwidth by
+	 * "(30 + 10) / 30".
 	 */
 	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
 		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
 	} else if (cur_msr_val < MAX_MBA_BW &&
-		   (user_bw > (cur_bw + delta_bw))) {
+		   (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
 		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
 	} else {
 		return;
 	}
 
 	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
-
-	/*
-	 * Delta values are updated dynamically package wise for each
-	 * rdtgrp every time the throttle MSR changes value.
-	 *
-	 * This is because (1)the increase in bandwidth is not perfectly
-	 * linear and only "approximately" linear even when the hardware
-	 * says it is linear.(2)Also since MBA is a core specific
-	 * mechanism, the delta values vary based on number of cores used
-	 * by the rdtgrp.
-	 */
-	pmbm_data->delta_comp = true;
-	list_for_each_entry(entry, head, mon.crdtgrp_list) {
-		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
-		cmbm_data->delta_comp = true;
-	}
 }
 
 static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
  
Reinette Chatre Nov. 16, 2023, 7:48 p.m. UTC | #10
Hi Tony,

On 11/15/2023 1:54 PM, Tony Luck wrote:
> On Wed, Nov 15, 2023 at 08:09:13AM -0800, Reinette Chatre wrote:
>> Hi Tony,
>>
>> On 11/9/2023 1:27 PM, Luck, Tony wrote:
>>>>> Maybe additional an mount option "mba_MBps_total" so the user can pick
>>>>> total instead of local?
>>>>
>>>> Is this something for which a remount is required? Can it not perhaps be
>>>> changed at runtime?
>>>
>>> In theory, yes. But I've been playing with a patch that adds a writable info/
>>> file to allow runtime switch:
>>>
>>> # ls -l /sys/fs/resctrl/info/MB/mba_MBps_control
>>> -rw-r--r--. 1 root root 0 Nov  9 10:57 /sys/fs/resctrl/info/MB/mba_MBps_control
>>> ]# cat /sys/fs/resctrl/info/MB/mba_MBps_control
>>> total
>>>
>>> and found that it's a bit tricky to switch out the MBM event from the
>>> state machine driving the feedback loop. I think the problem is in the
>>> code that tries to stop the control loop from switching between two
>>> throttling levels every second:
>>>
>>>         if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
>>>                 new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
>>>         } else if (cur_msr_val < MAX_MBA_BW &&
>>>                    (user_bw > (cur_bw + delta_bw))) {
>>>                 new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
>>>         } else {
>>>                 return;
>>>         }
>>>
>>> The code drops down one percentage step if current bandwidth is above
>>> the desired target. But stepping back up checks to see if "cur_bw + delta_bw"
>>> is below the target.
>>>
>>> Where does "delta_bw" come from? Code uses the Boolean flag "pmbm_data->delta_comp"
>>> to request the once-per-second polling compute the change in bandwidth on the
>>> next poll after adjusting throttling MSRs.
>>>
>>> All of these values are in the "struct mbm_state" which is a per-event-id structure.
>>>
>>> Picking an event at boot time works fine. Likely also fine at mount time. But
>>> switching at run-time seems to frequently end up with a very large value in
>>> "delta_bw" (as it compares current & previous for this event ... and it looks
>>> like things changed from zero).  Net effect is that throttling is increased when
>>> processes go over their target for the resctrl group, but throttling is never decreased.
>>
>> This is not clear to me. Would the state not also start from zero at boot and mount
>> time? From what I understand the state is also reset to zero on monitor group creation.
> 
> Yes. All of boot, mount, mkdir start a group in a well defined state
> with no throttling applied (schemata shows bandwitdh limit as 2^32
> MBytes/sec). If the user sets some realistic limit, and the group
> MBM measurement exceeds that limit, then the MBA MSR for the group
> is dropped from 100% to 90% and the delta_comp flag set to record
> the delta_bw on the next 1-second poll.
> 
> The value of delta_bw is only used when looking to reduce throttling.
> To be in that state this group must have been in a state where
> throttling was increased ... which would result in delta_bw being
> set up.
> 
> Now look at what happens when switching from local to total for the
> first time. delta_bw is zero in the structures recording total bandwidth
> information. But more importanly so is prev_bw. If the code above
> changes throttling value and requests an updated calulation of delta_bw,
> that will be done using a value of prev_bw==0. I.e. delta_bw will be
> set to the current bandwidth. That high value will likely block attempts
> to reduce throttling.

Thank you for the detailed explanation. I think there are ways in which
to make this transition smoother, for example to not compute delta_bw
if there is no history (no "prev_bw_bytes"). But that would just fix
the existing algorithm without addressing the other issues you raised
with this algorithm.

> 
> Maybe when switching MBM source events the prev_bw value should be
> copied from old source structures to new source structures as a rough
> guide to avoid crazy actions. But that could also be wrong when
> switching from total to local for a group that has poor NUMA
> localization and total bandwidth is far higher than local.
> 
>>> The whole heuristic seems a bit fragile. It works well for test processes that have
>>> constant memory bandwidth. But I could see it failing in scenarios like this:
>>>
>>> 1) Process is over MB limit
>>> 2) Linux increases throttling, and sets flag to compute delta_bw on next poll
>>> 3) Process blocks on some event and uses no bandwidth in next one second
>>> 4) Next poll. Linux computes delta_bw as abs(cur_bw - m->prev_bw). cur_bw is zero,
>>>     so delta_bw is set to full value of bandwidth that process used when over budget
>>> 5) Process resumes running
>>> 6) Linux sees process using less than target, but cur_bw + delta_bw is above target,
>>>    so Linux doesn't adjust throttling
>>>
>>> I think the goal was to avoid relaxing throttling and letting a resctrl group go back over
>>> target bandwidth. But that doesn't work either for groups with highly variable bandwidth
>>> requirements.
>>>
>>> 1) Group is over budget
>>> 2) Linux increases throttling, and sets flag to compute delta_bw on next poll
>>> 3) Group forks additional processes. New bandwidth from those offsets the reduction due to throttling
>>> 4) Next poll. Linux sees bandwidth is unchanged. Sets delta_bw = 0.
>>> 5) Next poll. Groups aggregate bandwidth is fractionally below target. Because delta_bw=0, Linux
>>>    reduces throttling.
>>> 6) Group goes over target.
>>>
>>
>> I'll defer to you for the history about this algorithm. I am not familiar with how
>> broadly this feature is used but I have not heard about issues with it. It does
>> seem as though there is some opportunity for investigation here.
> 
> I sure I could construct an artificial test case to force this scenario.
> But maybe:
> 1) It never happens in real life
> 2) It happens, but nobody noticed
> 3) People figured out the workaround (set schemata to a really big
>    MBytes/sec value for a second, and then back to desired value).
> 4) Few people use this option
> 
> I dug again into the lore.kernel.org archives. Thomas complained
> that is wasn't "calibration" (as Vikas had descibed in in V1) but
> seems to have otherwise been OK with it as a heuristic.
> 
> https://lore.kernel.org/all/alpine.DEB.2.21.1804041037090.2056@nanos.tec.linutronix.de/
> 
> 
> I coded up and tested the below patch as a possible replacement heuristic.
> But I also wonder whether just letting the feedback loop flip throttling
> up and down between throttling values above/below the target bandwidth
> would really be so bad. It's just one MSR write that can be done from
> the current CPU and would result in average bandwidth closer to the
> user requested target.

The proposed heuristic seem to assume that the bandwidth used has
a linear relationship to the throttling percentage. It seems to set
aside the reasons that motivated this "delta_bw" in the first place:

> -	 * This is because (1)the increase in bandwidth is not perfectly
> -	 * linear and only "approximately" linear even when the hardware
> -	 * says it is linear.(2)Also since MBA is a core specific
> -	 * mechanism, the delta values vary based on number of cores used
> -	 * by the rdtgrp.

From the above I understand that reducing throttling by 10% does not
imply that bandwidth consumed will increase by 10%. A new heuristic like
this may thus decide not to relax throttling expecting that doing so would
cause bandwidth to go over limit while the non-linear increase may result
in bandwidth consumed not going over limit when throttling is relaxed.

I am also curious if only using target bandwidth would be bad.

I looked through the spec and was not able to find any information
guiding to the cost of adjusting the allocation once per second
(per resource group per domain). The closest I could find was
the discussion of a need of a "fine-grained software controller" where
it is not clear if "once per second" can be considered "fine grained".

Reinette
  

Patch

diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index f136ac046851..29e86310677d 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -418,6 +418,20 @@  static int __mon_event_count(u32 rmid, struct rmid_read *rr)
 	return 0;
 }
 
+/*
+ * For legacy compatibility use the local memory bandwidth to drive
+ * the mba_MBps feedback control loop. But on platforms that do not
+ * provide the local event fall back to use the total bandwidth event
+ * instead.
+ */
+static enum resctrl_event_id pick_mba_mbps_event(void)
+{
+	if (is_mbm_local_enabled())
+		return QOS_L3_MBM_LOCAL_EVENT_ID;
+
+	return QOS_L3_MBM_TOTAL_EVENT_ID;
+}
+
 /*
  * mbm_bw_count() - Update bw count from values previously read by
  *		    __mon_event_count().
@@ -431,9 +445,11 @@  static int __mon_event_count(u32 rmid, struct rmid_read *rr)
  */
 static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
 {
-	struct mbm_state *m = &rr->d->mbm_local[rmid];
+	enum resctrl_event_id mba_mbps_evt_id = pick_mba_mbps_event();
 	u64 cur_bw, bytes, cur_bytes;
+	struct mbm_state *m;
 
+	m = get_mbm_state(rr->d, rmid, mba_mbps_evt_id);
 	cur_bytes = rr->val;
 	bytes = cur_bytes - m->prev_bw_bytes;
 	m->prev_bw_bytes = cur_bytes;
@@ -518,6 +534,7 @@  void mon_event_count(void *info)
  */
 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 {
+	enum resctrl_event_id mba_mbps_evt_id = pick_mba_mbps_event();
 	u32 closid, rmid, cur_msr_val, new_msr_val;
 	struct mbm_state *pmbm_data, *cmbm_data;
 	u32 cur_bw, delta_bw, user_bw;
@@ -526,14 +543,14 @@  static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 	struct list_head *head;
 	struct rdtgroup *entry;
 
-	if (!is_mbm_local_enabled())
+	if (!is_mbm_enabled())
 		return;
 
 	r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
 
 	closid = rgrp->closid;
 	rmid = rgrp->mon.rmid;
-	pmbm_data = &dom_mbm->mbm_local[rmid];
+	pmbm_data = get_mbm_state(dom_mbm, rmid, mba_mbps_evt_id);
 
 	dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
 	if (!dom_mba) {
@@ -553,7 +570,7 @@  static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 	 */
 	head = &rgrp->mon.crdtgrp_list;
 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
-		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
+		cmbm_data = get_mbm_state(dom_mbm, entry->mon.rmid, mba_mbps_evt_id);
 		cur_bw += cmbm_data->prev_bw;
 		delta_bw += cmbm_data->delta_bw;
 	}
@@ -595,7 +612,7 @@  static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 	 */
 	pmbm_data->delta_comp = true;
 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
-		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
+		cmbm_data = get_mbm_state(dom_mbm, entry->mon.rmid, mba_mbps_evt_id);
 		cmbm_data->delta_comp = true;
 	}
 }
@@ -621,15 +638,15 @@  static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
 		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
 		rr.val = 0;
 		__mon_event_count(rmid, &rr);
-
-		/*
-		 * Call the MBA software controller only for the
-		 * control groups and when user has enabled
-		 * the software controller explicitly.
-		 */
-		if (is_mba_sc(NULL))
-			mbm_bw_count(rmid, &rr);
 	}
+
+	/*
+	 * Call the MBA software controller only for the
+	 * control groups and when user has enabled
+	 * the software controller explicitly.
+	 */
+	if (is_mba_sc(NULL))
+		mbm_bw_count(rmid, &rr);
 }
 
 /*
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 69a1de92384a..0c4f8a1b8df0 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2294,7 +2294,7 @@  static bool supports_mba_mbps(void)
 {
 	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
 
-	return (is_mbm_local_enabled() &&
+	return (is_mbm_enabled() &&
 		r->alloc_capable && is_mba_linear());
 }