[v3,1/3] mm/rmap: Recognize non-writable TLB entries during TLB batch flush
Commit Message
Functionally, no change. This is a preparation for CONFIG_MIGRC that
requires to recognize non-writable TLB entries and makes use of them to
batch more aggressively or even skip TLB flushes.
While at it, changed struct tlbflush_unmap's ->flush_required(boolean)
to ->nr_flush_required(int) in order to take into account not only
whether it has been requested or not, but also the exact number of the
requests. That will be used in CONFIG_MIGRC implementation.
Signed-off-by: Byungchul Park <byungchul@sk.com>
---
arch/x86/include/asm/tlbflush.h | 2 ++
arch/x86/mm/tlb.c | 7 ++++++
include/linux/mm_types_task.h | 4 ++--
include/linux/sched.h | 1 +
mm/internal.h | 14 ++++++++++++
mm/rmap.c | 39 ++++++++++++++++++++++++++++-----
6 files changed, 60 insertions(+), 7 deletions(-)
Comments
Below are some points you might find useful:
> +
> /*
> * Blindly accessing user memory from NMI context can be dangerous
> * if we're in the middle of switching the current user task or
> diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
> index aa44fff8bb9d..35ba9425d48d 100644
> --- a/include/linux/mm_types_task.h
> +++ b/include/linux/mm_types_task.h
> @@ -59,8 +59,8 @@ struct tlbflush_unmap_batch {
> */
> struct arch_tlbflush_unmap_batch arch;
>
> - /* True if a flush is needed. */
> - bool flush_required;
> + /* The number of flush requested. */
Number of what? Base pages I presume.
> + int nr_flush_required;
Perhaps unsigned would be better suited?
>
> /*
> * If true then the PTE was dirty when unmapped. The entry must be
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 77f01ac385f7..63189c023357 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1324,6 +1324,7 @@ struct task_struct {
> #endif
>
> struct tlbflush_unmap_batch tlb_ubc;
> + struct tlbflush_unmap_batch tlb_ubc_nowr;
tlb_ubc_nowr is - I think - less informative the tlb_ubc_ro (and a comment
would be useful).
[snip]
>
> +
> +int nr_flush_required(void)
> +{
> + return current->tlb_ubc.nr_flush_required;
> +}
> +
> +int nr_flush_required_nowr(void)
> +{
> + return current->tlb_ubc_nowr.nr_flush_required;
> +}
I haven’t gone through the users of these functions yet, as they are not included
in this patch (which is usually not great).
Anyhow, it might be a bit wasteful to have a function call for such a function. See
if it is possible to avoid that call.
> +
> /*
> * Flush TLB entries for recently unmapped pages from remote CPUs. It is
> * important if a PTE was dirty when it was unmapped that it's flushed
> @@ -615,11 +641,12 @@ void try_to_unmap_flush(void)
> {
> struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
>
> - if (!tlb_ubc->flush_required)
> + fold_ubc_nowr();
> + if (!tlb_ubc->nr_flush_required)
> return;
>
> arch_tlbbatch_flush(&tlb_ubc->arch);
> - tlb_ubc->flush_required = false;
> + tlb_ubc->nr_flush_required = 0;
> tlb_ubc->writable = false;
> }
>
> @@ -627,8 +654,9 @@ void try_to_unmap_flush(void)
> void try_to_unmap_flush_dirty(void)
> {
> struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
> + struct tlbflush_unmap_batch *tlb_ubc_nowr = ¤t->tlb_ubc_nowr;
>
> - if (tlb_ubc->writable)
> + if (tlb_ubc->writable || tlb_ubc_nowr->writable)
> try_to_unmap_flush();
> }
>
> @@ -645,15 +673,16 @@ void try_to_unmap_flush_dirty(void)
> static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
> unsigned long uaddr)
> {
> - struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
> + struct tlbflush_unmap_batch *tlb_ubc;
> int batch;
> bool writable = pte_dirty(pteval);
>
> if (!pte_accessible(mm, pteval))
> return;
>
> + tlb_ubc = pte_write(pteval) || writable ? ¤t->tlb_ubc : ¤t->tlb_ubc_nowr;
Using the ternary operator here is a bit confusing. You can use an “if”
instead or if you mind is set doing it this way at least make it easier to
read:
tlb_ubc = (pte_write(pteval) || writable) ? ¤t->tlb_ubc :
¤t->tlb_ubc_nowr;
And of course, add a comment.
> arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
> - tlb_ubc->flush_required = true;
> + tlb_ubc->nr_flush_required += 1;
Presumably overflow is impossible for other reasons, but something like that
worries me.
On Mon, Oct 30, 2023 at 07:52:05AM +0000, Nadav Amit wrote:
>
> Below are some points you might find useful:
Thank you!
> > +
> > /*
> > * Blindly accessing user memory from NMI context can be dangerous
> > * if we're in the middle of switching the current user task or
> > diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
> > index aa44fff8bb9d..35ba9425d48d 100644
> > --- a/include/linux/mm_types_task.h
> > +++ b/include/linux/mm_types_task.h
> > @@ -59,8 +59,8 @@ struct tlbflush_unmap_batch {
> > */
> > struct arch_tlbflush_unmap_batch arch;
> >
> > - /* True if a flush is needed. */
> > - bool flush_required;
> > + /* The number of flush requested. */
>
> Number of what? Base pages I presume.
How many times set_tlb_ubc_flush_pending() has been called.
> > + int nr_flush_required;
>
> Perhaps unsigned would be better suited?
Will change it to unsigned.
> > /*
> > * If true then the PTE was dirty when unmapped. The entry must be
> > diff --git a/include/linux/sched.h b/include/linux/sched.h
> > index 77f01ac385f7..63189c023357 100644
> > --- a/include/linux/sched.h
> > +++ b/include/linux/sched.h
> > @@ -1324,6 +1324,7 @@ struct task_struct {
> > #endif
> >
> > struct tlbflush_unmap_batch tlb_ubc;
> > + struct tlbflush_unmap_batch tlb_ubc_nowr;
>
> tlb_ubc_nowr is - I think - less informative the tlb_ubc_ro (and a comment
> would be useful).
At the beginning, I named it tlb_ubc_ro but.. I forgot why I changed it
to tlb_ubc_nowr but.. I will change it back and add a comment on it.
> > +
> > +int nr_flush_required(void)
> > +{
> > + return current->tlb_ubc.nr_flush_required;
> > +}
> > +
> > +int nr_flush_required_nowr(void)
> > +{
> > + return current->tlb_ubc_nowr.nr_flush_required;
> > +}
>
> I haven’t gone through the users of these functions yet, as they are not included
> in this patch (which is usually not great).
Right. I will place these two on another patch that uses the functions.
Or need to add an explanation in this commit message.
> Anyhow, it might be a bit wasteful to have a function call for such a function. See
> if it is possible to avoid that call.
I will move them to mm/internal.h with inline added if possible.
> > +
> > /*
> > * Flush TLB entries for recently unmapped pages from remote CPUs. It is
> > * important if a PTE was dirty when it was unmapped that it's flushed
> > @@ -615,11 +641,12 @@ void try_to_unmap_flush(void)
> > {
> > struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
> >
> > - if (!tlb_ubc->flush_required)
> > + fold_ubc_nowr();
> > + if (!tlb_ubc->nr_flush_required)
> > return;
> >
> > arch_tlbbatch_flush(&tlb_ubc->arch);
> > - tlb_ubc->flush_required = false;
> > + tlb_ubc->nr_flush_required = 0;
> > tlb_ubc->writable = false;
> > }
> >
> > @@ -627,8 +654,9 @@ void try_to_unmap_flush(void)
> > void try_to_unmap_flush_dirty(void)
> > {
> > struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
> > + struct tlbflush_unmap_batch *tlb_ubc_nowr = ¤t->tlb_ubc_nowr;
> >
> > - if (tlb_ubc->writable)
> > + if (tlb_ubc->writable || tlb_ubc_nowr->writable)
> > try_to_unmap_flush();
> > }
> >
> > @@ -645,15 +673,16 @@ void try_to_unmap_flush_dirty(void)
> > static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
> > unsigned long uaddr)
> > {
> > - struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
> > + struct tlbflush_unmap_batch *tlb_ubc;
> > int batch;
> > bool writable = pte_dirty(pteval);
> >
> > if (!pte_accessible(mm, pteval))
> > return;
> >
> > + tlb_ubc = pte_write(pteval) || writable ? ¤t->tlb_ubc : ¤t->tlb_ubc_nowr;
>
> Using the ternary operator here is a bit confusing. You can use an “if”
> instead or if you mind is set doing it this way at least make it easier to
> read:
>
> tlb_ubc = (pte_write(pteval) || writable) ? ¤t->tlb_ubc :
> ¤t->tlb_ubc_nowr;
You are right. I should change it that way. Thanks.
> And of course, add a comment.
Okay. Also will add a comment.
> > arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
> > - tlb_ubc->flush_required = true;
> > + tlb_ubc->nr_flush_required += 1;
>
> Presumably overflow is impossible for other reasons, but something like that
> worries me.
Agree with you. Lemme think it more and fix it.
Thank you.
Byungchul
@@ -292,6 +292,8 @@ static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
}
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+extern void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc);
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
@@ -1274,6 +1274,13 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
put_cpu();
}
+void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc)
+{
+ cpumask_or(&bdst->cpumask, &bdst->cpumask, &bsrc->cpumask);
+ cpumask_clear(&bsrc->cpumask);
+}
+
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
@@ -59,8 +59,8 @@ struct tlbflush_unmap_batch {
*/
struct arch_tlbflush_unmap_batch arch;
- /* True if a flush is needed. */
- bool flush_required;
+ /* The number of flush requested. */
+ int nr_flush_required;
/*
* If true then the PTE was dirty when unmapped. The entry must be
@@ -1324,6 +1324,7 @@ struct task_struct {
#endif
struct tlbflush_unmap_batch tlb_ubc;
+ struct tlbflush_unmap_batch tlb_ubc_nowr;
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
@@ -861,6 +861,9 @@ extern struct workqueue_struct *mm_percpu_wq;
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
+void fold_ubc_nowr(void);
+int nr_flush_required(void);
+int nr_flush_required_nowr(void);
#else
static inline void try_to_unmap_flush(void)
{
@@ -871,6 +874,17 @@ static inline void try_to_unmap_flush_dirty(void)
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
{
}
+static inline void fold_ubc_nowr(void)
+{
+}
+static inline int nr_flush_required(void)
+{
+ return 0;
+}
+static inline int nr_flush_required_nowr(void)
+{
+ return 0;
+}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[];
@@ -605,6 +605,32 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
}
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+
+void fold_ubc_nowr(void)
+{
+ struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc_nowr = ¤t->tlb_ubc_nowr;
+
+ if (!tlb_ubc_nowr->nr_flush_required)
+ return;
+
+ arch_tlbbatch_fold(&tlb_ubc->arch, &tlb_ubc_nowr->arch);
+ tlb_ubc->writable = tlb_ubc->writable || tlb_ubc_nowr->writable;
+ tlb_ubc->nr_flush_required += tlb_ubc_nowr->nr_flush_required;
+ tlb_ubc_nowr->nr_flush_required = 0;
+ tlb_ubc_nowr->writable = false;
+}
+
+int nr_flush_required(void)
+{
+ return current->tlb_ubc.nr_flush_required;
+}
+
+int nr_flush_required_nowr(void)
+{
+ return current->tlb_ubc_nowr.nr_flush_required;
+}
+
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
@@ -615,11 +641,12 @@ void try_to_unmap_flush(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
- if (!tlb_ubc->flush_required)
+ fold_ubc_nowr();
+ if (!tlb_ubc->nr_flush_required)
return;
arch_tlbbatch_flush(&tlb_ubc->arch);
- tlb_ubc->flush_required = false;
+ tlb_ubc->nr_flush_required = 0;
tlb_ubc->writable = false;
}
@@ -627,8 +654,9 @@ void try_to_unmap_flush(void)
void try_to_unmap_flush_dirty(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc_nowr = ¤t->tlb_ubc_nowr;
- if (tlb_ubc->writable)
+ if (tlb_ubc->writable || tlb_ubc_nowr->writable)
try_to_unmap_flush();
}
@@ -645,15 +673,16 @@ void try_to_unmap_flush_dirty(void)
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
unsigned long uaddr)
{
- struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc;
int batch;
bool writable = pte_dirty(pteval);
if (!pte_accessible(mm, pteval))
return;
+ tlb_ubc = pte_write(pteval) || writable ? ¤t->tlb_ubc : ¤t->tlb_ubc_nowr;
arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
- tlb_ubc->flush_required = true;
+ tlb_ubc->nr_flush_required += 1;
/*
* Ensure compiler does not re-order the setting of tlb_flush_batched