@@ -46,9 +46,9 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4);
/* test the maps pointer is shared */
- TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t1)));
- TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t2)));
- TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t3)));
+ TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t1)));
+ TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t2)));
+ TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t3)));
/*
* Verify the other leader was created by previous call.
@@ -73,7 +73,7 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s
other_maps = thread__maps(other);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 2);
- TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(other_maps, thread__maps(other_leader)));
+ TEST_ASSERT_VAL("maps don't match", maps__equal(other_maps, thread__maps(other_leader)));
/* release thread group */
thread__put(t3);
@@ -1157,7 +1157,7 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
if (al->map == NULL)
goto out;
}
- if (RC_CHK_EQUAL(al->maps, machine__kernel_maps(machine))) {
+ if (maps__equal(al->maps, machine__kernel_maps(machine))) {
if (machine__is_host(machine)) {
al->cpumode = PERF_RECORD_MISC_KERNEL;
al->level = 'k';
@@ -6,9 +6,63 @@
#include "dso.h"
#include "map.h"
#include "maps.h"
+#include "rwsem.h"
#include "thread.h"
#include "ui/ui.h"
#include "unwind.h"
+#include <internal/rc_check.h>
+
+/*
+ * Locking/sorting note:
+ *
+ * Sorting is done with the write lock, iteration and binary searching happens
+ * under the read lock requiring being sorted. There is a race between sorting
+ * releasing the write lock and acquiring the read lock for iteration/searching
+ * where another thread could insert and break the sorting of the maps. In
+ * practice inserting maps should be rare meaning that the race shouldn't lead
+ * to live lock. Removal of maps doesn't break being sorted.
+ */
+
+DECLARE_RC_STRUCT(maps) {
+ struct rw_semaphore lock;
+ /**
+ * @maps_by_address: array of maps sorted by their starting address if
+ * maps_by_address_sorted is true.
+ */
+ struct map **maps_by_address;
+ /**
+ * @maps_by_name: optional array of maps sorted by their dso name if
+ * maps_by_name_sorted is true.
+ */
+ struct map **maps_by_name;
+ struct machine *machine;
+#ifdef HAVE_LIBUNWIND_SUPPORT
+ void *addr_space;
+ const struct unwind_libunwind_ops *unwind_libunwind_ops;
+#endif
+ refcount_t refcnt;
+ /**
+ * @nr_maps: number of maps_by_address, and possibly maps_by_name,
+ * entries that contain maps.
+ */
+ unsigned int nr_maps;
+ /**
+ * @nr_maps_allocated: number of entries in maps_by_address and possibly
+ * maps_by_name.
+ */
+ unsigned int nr_maps_allocated;
+ /**
+ * @last_search_by_name_idx: cache of last found by name entry's index
+ * as frequent searches for the same dso name are common.
+ */
+ unsigned int last_search_by_name_idx;
+ /** @maps_by_address_sorted: is maps_by_address sorted. */
+ bool maps_by_address_sorted;
+ /** @maps_by_name_sorted: is maps_by_name sorted. */
+ bool maps_by_name_sorted;
+ /** @ends_broken: does the map contain a map where end values are unset/unsorted? */
+ bool ends_broken;
+};
static void check_invariants(const struct maps *maps __maybe_unused)
{
@@ -103,6 +157,43 @@ static void maps__set_maps_by_name_sorted(struct maps *maps, bool value)
RC_CHK_ACCESS(maps)->maps_by_name_sorted = value;
}
+struct machine *maps__machine(const struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->machine;
+}
+
+unsigned int maps__nr_maps(const struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->nr_maps;
+}
+
+refcount_t *maps__refcnt(struct maps *maps)
+{
+ return &RC_CHK_ACCESS(maps)->refcnt;
+}
+
+#ifdef HAVE_LIBUNWIND_SUPPORT
+void *maps__addr_space(const struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->addr_space;
+}
+
+void maps__set_addr_space(struct maps *maps, void *addr_space)
+{
+ RC_CHK_ACCESS(maps)->addr_space = addr_space;
+}
+
+const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
+}
+
+void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops)
+{
+ RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
+}
+#endif
+
static struct rw_semaphore *maps__lock(struct maps *maps)
{
/*
@@ -440,6 +531,11 @@ bool maps__empty(struct maps *maps)
return maps__nr_maps(maps) == 0;
}
+bool maps__equal(struct maps *a, struct maps *b)
+{
+ return RC_CHK_EQUAL(a, b);
+}
+
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
{
bool done = false;
@@ -3,80 +3,15 @@
#define __PERF_MAPS_H
#include <linux/refcount.h>
-#include <linux/rbtree.h>
#include <stdio.h>
#include <stdbool.h>
#include <linux/types.h>
-#include "rwsem.h"
-#include <internal/rc_check.h>
struct ref_reloc_sym;
struct machine;
struct map;
struct maps;
-struct map_list_node {
- struct list_head node;
- struct map *map;
-};
-
-static inline struct map_list_node *map_list_node__new(void)
-{
- return malloc(sizeof(struct map_list_node));
-}
-
-/*
- * Locking/sorting note:
- *
- * Sorting is done with the write lock, iteration and binary searching happens
- * under the read lock requiring being sorted. There is a race between sorting
- * releasing the write lock and acquiring the read lock for iteration/searching
- * where another thread could insert and break the sorting of the maps. In
- * practice inserting maps should be rare meaning that the race shouldn't lead
- * to live lock. Removal of maps doesn't break being sorted.
- */
-
-DECLARE_RC_STRUCT(maps) {
- struct rw_semaphore lock;
- /**
- * @maps_by_address: array of maps sorted by their starting address if
- * maps_by_address_sorted is true.
- */
- struct map **maps_by_address;
- /**
- * @maps_by_name: optional array of maps sorted by their dso name if
- * maps_by_name_sorted is true.
- */
- struct map **maps_by_name;
- struct machine *machine;
-#ifdef HAVE_LIBUNWIND_SUPPORT
- void *addr_space;
- const struct unwind_libunwind_ops *unwind_libunwind_ops;
-#endif
- refcount_t refcnt;
- /**
- * @nr_maps: number of maps_by_address, and possibly maps_by_name,
- * entries that contain maps.
- */
- unsigned int nr_maps;
- /**
- * @nr_maps_allocated: number of entries in maps_by_address and possibly
- * maps_by_name.
- */
- unsigned int nr_maps_allocated;
- /**
- * @last_search_by_name_idx: cache of last found by name entry's index
- * as frequent searches for the same dso name are common.
- */
- unsigned int last_search_by_name_idx;
- /** @maps_by_address_sorted: is maps_by_address sorted. */
- bool maps_by_address_sorted;
- /** @maps_by_name_sorted: is maps_by_name sorted. */
- bool maps_by_name_sorted;
- /** @ends_broken: does the map contain a map where end values are unset/unsorted? */
- bool ends_broken;
-};
-
#define KMAP_NAME_LEN 256
struct kmap {
@@ -100,36 +35,22 @@ static inline void __maps__zput(struct maps **map)
#define maps__zput(map) __maps__zput(&map)
+bool maps__equal(struct maps *a, struct maps *b);
+
/* Iterate over map calling cb for each entry. */
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data);
/* Iterate over map removing an entry if cb returns true. */
void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data);
-static inline struct machine *maps__machine(struct maps *maps)
-{
- return RC_CHK_ACCESS(maps)->machine;
-}
-
-static inline unsigned int maps__nr_maps(const struct maps *maps)
-{
- return RC_CHK_ACCESS(maps)->nr_maps;
-}
-
-static inline refcount_t *maps__refcnt(struct maps *maps)
-{
- return &RC_CHK_ACCESS(maps)->refcnt;
-}
+struct machine *maps__machine(const struct maps *maps);
+unsigned int maps__nr_maps(const struct maps *maps);
+refcount_t *maps__refcnt(struct maps *maps);
#ifdef HAVE_LIBUNWIND_SUPPORT
-static inline void *maps__addr_space(struct maps *maps)
-{
- return RC_CHK_ACCESS(maps)->addr_space;
-}
-
-static inline const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
-{
- return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
-}
+void *maps__addr_space(const struct maps *maps);
+void maps__set_addr_space(struct maps *maps, void *addr_space);
+const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps);
+void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops);
#endif
size_t maps__fprintf(struct maps *maps, FILE *fp);
@@ -63,6 +63,16 @@ struct symbol_conf symbol_conf = {
.res_sample = 0,
};
+struct map_list_node {
+ struct list_head node;
+ struct map *map;
+};
+
+static struct map_list_node *map_list_node__new(void)
+{
+ return malloc(sizeof(struct map_list_node));
+}
+
static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__KALLSYMS,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
@@ -383,7 +383,7 @@ static int thread__clone_maps(struct thread *thread, struct thread *parent, bool
if (thread__pid(thread) == thread__pid(parent))
return thread__prepare_access(thread);
- if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) {
+ if (maps__equal(thread__maps(thread), thread__maps(parent))) {
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
thread__pid(thread), thread__tid(thread),
thread__pid(parent), thread__tid(parent));
@@ -706,7 +706,7 @@ static int _unwind__prepare_access(struct maps *maps)
{
void *addr_space = unw_create_addr_space(&accessors, 0);
- RC_CHK_ACCESS(maps)->addr_space = addr_space;
+ maps__set_addr_space(maps, addr_space);
if (!addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
return -ENOMEM;
@@ -12,11 +12,6 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
-static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops)
-{
- RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
-}
-
int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
{
const char *arch;
@@ -60,7 +55,7 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized
return 0;
}
out_register:
- unwind__register_ops(maps, ops);
+ maps__set_unwind_libunwind_ops(maps, ops);
err = maps__unwind_libunwind_ops(maps)->prepare_access(maps);
if (initialized)