[v5,16/50] perf maps: Add remove maps function to remove a map based on callback
Commit Message
Removing maps wasn't being done under the write lock. Similar to
maps__for_each_map, iterate the entries but in this case remove the
entry based on the result of the callback. If an entry was removed
then maps_by_name also needs updating, so add missed flush.
In dso__load_kcore, the test of map to save would always be false with
REFCNT_CHECKING because of a missing RC_CHK_ACCESS.
Signed-off-by: Ian Rogers <irogers@google.com>
---
tools/perf/util/maps.c | 24 ++++++++++++++++++++++++
tools/perf/util/maps.h | 6 ++----
tools/perf/util/symbol.c | 24 ++++++++++++------------
3 files changed, 38 insertions(+), 16 deletions(-)
Comments
On Mon, Nov 27, 2023 at 2:10 PM Ian Rogers <irogers@google.com> wrote:
>
> Removing maps wasn't being done under the write lock. Similar to
> maps__for_each_map, iterate the entries but in this case remove the
> entry based on the result of the callback. If an entry was removed
> then maps_by_name also needs updating, so add missed flush.
>
> In dso__load_kcore, the test of map to save would always be false with
> REFCNT_CHECKING because of a missing RC_CHK_ACCESS.
>
> Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
A nitpick below,
> ---
[SNIP]
> diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
> index 72f03b875478..30da8a405d11 100644
> --- a/tools/perf/util/symbol.c
> +++ b/tools/perf/util/symbol.c
> @@ -1239,13 +1239,23 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
> return 0;
> }
>
> +static bool remove_old_maps(struct map *map, void *data)
> +{
> + const struct map *map_to_save = data;
> +
> + /*
> + * We need to preserve eBPF maps even if they are covered by kcore,
> + * because we need to access eBPF dso for source data.
> + */
> + return RC_CHK_ACCESS(map) != RC_CHK_ACCESS(map_to_save) && !__map__is_bpf_prog(map);
RC_CHK_EQUAL(map, map_to_save) ?
Thanks,
Namhyung
> +}
> +
> static int dso__load_kcore(struct dso *dso, struct map *map,
> const char *kallsyms_filename)
> {
> struct maps *kmaps = map__kmaps(map);
> struct kcore_mapfn_data md;
> struct map *replacement_map = NULL;
> - struct map_rb_node *old_node, *next;
> struct machine *machine;
> bool is_64_bit;
> int err, fd;
> @@ -1292,17 +1302,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
> }
>
> /* Remove old maps */
> - maps__for_each_entry_safe(kmaps, old_node, next) {
> - struct map *old_map = old_node->map;
> -
> - /*
> - * We need to preserve eBPF maps even if they are
> - * covered by kcore, because we need to access
> - * eBPF dso for source data.
> - */
> - if (old_map != map && !__map__is_bpf_prog(old_map))
> - maps__remove(kmaps, old_map);
> - }
> + maps__remove_maps(kmaps, remove_old_maps, map);
> machine->trampolines_mapped = false;
>
> /* Find the kernel map using the '_stext' symbol */
> --
> 2.43.0.rc1.413.gea7ed67945-goog
>
On Mon, Dec 4, 2023 at 3:50 PM Namhyung Kim <namhyung@kernel.org> wrote:
>
> On Mon, Nov 27, 2023 at 2:10 PM Ian Rogers <irogers@google.com> wrote:
> >
> > Removing maps wasn't being done under the write lock. Similar to
> > maps__for_each_map, iterate the entries but in this case remove the
> > entry based on the result of the callback. If an entry was removed
> > then maps_by_name also needs updating, so add missed flush.
> >
> > In dso__load_kcore, the test of map to save would always be false with
> > REFCNT_CHECKING because of a missing RC_CHK_ACCESS.
> >
> > Signed-off-by: Ian Rogers <irogers@google.com>
>
> Acked-by: Namhyung Kim <namhyung@kernel.org>
>
> A nitpick below,
>
> > ---
> [SNIP]
> > diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
> > index 72f03b875478..30da8a405d11 100644
> > --- a/tools/perf/util/symbol.c
> > +++ b/tools/perf/util/symbol.c
> > @@ -1239,13 +1239,23 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
> > return 0;
> > }
> >
> > +static bool remove_old_maps(struct map *map, void *data)
> > +{
> > + const struct map *map_to_save = data;
> > +
> > + /*
> > + * We need to preserve eBPF maps even if they are covered by kcore,
> > + * because we need to access eBPF dso for source data.
> > + */
> > + return RC_CHK_ACCESS(map) != RC_CHK_ACCESS(map_to_save) && !__map__is_bpf_prog(map);
>
> RC_CHK_EQUAL(map, map_to_save) ?
Done in v6.
Thanks,
Ian
> Thanks,
> Namhyung
>
>
> > +}
> > +
> > static int dso__load_kcore(struct dso *dso, struct map *map,
> > const char *kallsyms_filename)
> > {
> > struct maps *kmaps = map__kmaps(map);
> > struct kcore_mapfn_data md;
> > struct map *replacement_map = NULL;
> > - struct map_rb_node *old_node, *next;
> > struct machine *machine;
> > bool is_64_bit;
> > int err, fd;
> > @@ -1292,17 +1302,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
> > }
> >
> > /* Remove old maps */
> > - maps__for_each_entry_safe(kmaps, old_node, next) {
> > - struct map *old_map = old_node->map;
> > -
> > - /*
> > - * We need to preserve eBPF maps even if they are
> > - * covered by kcore, because we need to access
> > - * eBPF dso for source data.
> > - */
> > - if (old_map != map && !__map__is_bpf_prog(old_map))
> > - maps__remove(kmaps, old_map);
> > - }
> > + maps__remove_maps(kmaps, remove_old_maps, map);
> > machine->trampolines_mapped = false;
> >
> > /* Find the kernel map using the '_stext' symbol */
> > --
> > 2.43.0.rc1.413.gea7ed67945-goog
> >
@@ -13,6 +13,10 @@
#define maps__for_each_entry(maps, map) \
for (map = maps__first(maps); map; map = map_rb_node__next(map))
+#define maps__for_each_entry_safe(maps, map, next) \
+ for (map = maps__first(maps), next = map_rb_node__next(map); map; \
+ map = next, next = map_rb_node__next(map))
+
static void maps__init(struct maps *maps, struct machine *machine)
{
refcount_set(maps__refcnt(maps), 1);
@@ -214,6 +218,26 @@ int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data)
return ret;
}
+void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
+{
+ struct map_rb_node *pos, *next;
+ unsigned int start_nr_maps;
+
+ down_write(maps__lock(maps));
+
+ start_nr_maps = maps__nr_maps(maps);
+ maps__for_each_entry_safe(maps, pos, next) {
+ if (cb(pos->map, data)) {
+ __maps__remove(maps, pos);
+ --RC_CHK_ACCESS(maps)->nr_maps;
+ }
+ }
+ if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps))
+ __maps__free_maps_by_name(maps);
+
+ up_write(maps__lock(maps));
+}
+
struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
{
struct map *map = maps__find(maps, addr);
@@ -36,10 +36,6 @@ struct map_rb_node *map_rb_node__next(struct map_rb_node *node);
struct map_rb_node *maps__find_node(struct maps *maps, struct map *map);
struct map *maps__find(struct maps *maps, u64 addr);
-#define maps__for_each_entry_safe(maps, map, next) \
- for (map = maps__first(maps), next = map_rb_node__next(map); map; \
- map = next, next = map_rb_node__next(map))
-
DECLARE_RC_STRUCT(maps) {
struct rb_root entries;
struct rw_semaphore lock;
@@ -80,6 +76,8 @@ static inline void __maps__zput(struct maps **map)
/* Iterate over map calling cb for each entry. */
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data);
+/* Iterate over map removing an entry if cb returns true. */
+void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data);
static inline struct rb_root *maps__entries(struct maps *maps)
{
@@ -1239,13 +1239,23 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
return 0;
}
+static bool remove_old_maps(struct map *map, void *data)
+{
+ const struct map *map_to_save = data;
+
+ /*
+ * We need to preserve eBPF maps even if they are covered by kcore,
+ * because we need to access eBPF dso for source data.
+ */
+ return RC_CHK_ACCESS(map) != RC_CHK_ACCESS(map_to_save) && !__map__is_bpf_prog(map);
+}
+
static int dso__load_kcore(struct dso *dso, struct map *map,
const char *kallsyms_filename)
{
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
struct map *replacement_map = NULL;
- struct map_rb_node *old_node, *next;
struct machine *machine;
bool is_64_bit;
int err, fd;
@@ -1292,17 +1302,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
}
/* Remove old maps */
- maps__for_each_entry_safe(kmaps, old_node, next) {
- struct map *old_map = old_node->map;
-
- /*
- * We need to preserve eBPF maps even if they are
- * covered by kcore, because we need to access
- * eBPF dso for source data.
- */
- if (old_map != map && !__map__is_bpf_prog(old_map))
- maps__remove(kmaps, old_map);
- }
+ maps__remove_maps(kmaps, remove_old_maps, map);
machine->trampolines_mapped = false;
/* Find the kernel map using the '_stext' symbol */