@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DEPT) += dept.o
+obj-$(CONFIG_DEPT) += dept_proc.o
@@ -74,6 +74,7 @@
#include <linux/dept.h>
#include <linux/utsname.h>
#include <linux/kernel.h>
+#include "dept_internal.h"
static int dept_stop;
static int dept_per_cpu_ready;
@@ -260,20 +261,13 @@ static bool valid_key(struct dept_key *k)
* have been freed will be placed.
*/
-enum object_t {
-#define OBJECT(id, nr) OBJECT_##id,
- #include "dept_object.h"
-#undef OBJECT
- OBJECT_NR,
-};
-
#define OBJECT(id, nr) \
static struct dept_##id spool_##id[nr]; \
static DEFINE_PER_CPU(struct llist_head, lpool_##id);
#include "dept_object.h"
#undef OBJECT
-static struct dept_pool pool[OBJECT_NR] = {
+struct dept_pool dept_pool[OBJECT_NR] = {
#define OBJECT(id, nr) { \
.name = #id, \
.obj_sz = sizeof(struct dept_##id), \
@@ -303,7 +297,7 @@ static void *from_pool(enum object_t t)
if (DEPT_WARN_ON(!irqs_disabled()))
return NULL;
- p = &pool[t];
+ p = &dept_pool[t];
/*
* Try local pool first.
@@ -338,7 +332,7 @@ static void *from_pool(enum object_t t)
static void to_pool(void *o, enum object_t t)
{
- struct dept_pool *p = &pool[t];
+ struct dept_pool *p = &dept_pool[t];
struct llist_head *h;
preempt_disable();
@@ -2092,7 +2086,7 @@ void dept_map_copy(struct dept_map *to, struct dept_map *from)
clean_classes_cache(&to->map_key);
}
-static LIST_HEAD(classes);
+LIST_HEAD(dept_classes);
static bool within(const void *addr, void *start, unsigned long size)
{
@@ -2124,7 +2118,7 @@ void dept_free_range(void *start, unsigned int sz)
while (unlikely(!dept_lock()))
cpu_relax();
- list_for_each_entry_safe(c, n, &classes, all_node) {
+ list_for_each_entry_safe(c, n, &dept_classes, all_node) {
if (!within((void *)c->key, start, sz) &&
!within(c->name, start, sz))
continue;
@@ -2200,7 +2194,7 @@ static struct dept_class *check_new_class(struct dept_key *local,
c->sub_id = sub_id;
c->key = (unsigned long)(k->base + sub_id);
hash_add_class(c);
- list_add(&c->all_node, &classes);
+ list_add(&c->all_node, &dept_classes);
unlock:
dept_unlock();
caching:
@@ -2915,8 +2909,8 @@ static void migrate_per_cpu_pool(void)
struct llist_head *from;
struct llist_head *to;
- from = &pool[i].boot_pool;
- to = per_cpu_ptr(pool[i].lpool, boot_cpu);
+ from = &dept_pool[i].boot_pool;
+ to = per_cpu_ptr(dept_pool[i].lpool, boot_cpu);
move_llist(to, from);
}
}
new file mode 100644
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Dept(DEPendency Tracker) - runtime dependency tracker internal header
+ *
+ * Started by Byungchul Park <max.byungchul.park@gmail.com>:
+ *
+ * Copyright (c) 2020 LG Electronics, Inc., Byungchul Park
+ */
+
+#ifndef __DEPT_INTERNAL_H
+#define __DEPT_INTERNAL_H
+
+#ifdef CONFIG_DEPT
+
+enum object_t {
+#define OBJECT(id, nr) OBJECT_##id,
+ #include "dept_object.h"
+#undef OBJECT
+ OBJECT_NR,
+};
+
+extern struct list_head dept_classes;
+extern struct dept_pool dept_pool[];
+
+#endif
+#endif /* __DEPT_INTERNAL_H */
new file mode 100644
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Procfs knobs for Dept(DEPendency Tracker)
+ *
+ * Started by Byungchul Park <max.byungchul.park@gmail.com>:
+ *
+ * Copyright (C) 2021 LG Electronics, Inc. , Byungchul Park
+ */
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/dept.h>
+#include "dept_internal.h"
+
+static void *l_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ /*
+ * XXX: Serialize list traversal if needed. The following might
+ * give a wrong information on contention.
+ */
+ return seq_list_next(v, &dept_classes, pos);
+}
+
+static void *l_start(struct seq_file *m, loff_t *pos)
+{
+ /*
+ * XXX: Serialize list traversal if needed. The following might
+ * give a wrong information on contention.
+ */
+ return seq_list_start_head(&dept_classes, *pos);
+}
+
+static void l_stop(struct seq_file *m, void *v)
+{
+}
+
+static int l_show(struct seq_file *m, void *v)
+{
+ struct dept_class *fc = list_entry(v, struct dept_class, all_node);
+ struct dept_dep *d;
+ const char *prefix;
+
+ if (v == &dept_classes) {
+ seq_puts(m, "All classes:\n\n");
+ return 0;
+ }
+
+ prefix = fc->sched_map ? "<sched> " : "";
+ seq_printf(m, "[%p] %s%s\n", (void *)fc->key, prefix, fc->name);
+
+ /*
+ * XXX: Serialize list traversal if needed. The following might
+ * give a wrong information on contention.
+ */
+ list_for_each_entry(d, &fc->dep_head, dep_node) {
+ struct dept_class *tc = d->wait->class;
+
+ prefix = tc->sched_map ? "<sched> " : "";
+ seq_printf(m, " -> [%p] %s%s\n", (void *)tc->key, prefix, tc->name);
+ }
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
+static const struct seq_operations dept_deps_ops = {
+ .start = l_start,
+ .next = l_next,
+ .stop = l_stop,
+ .show = l_show,
+};
+
+static int dept_stats_show(struct seq_file *m, void *v)
+{
+ int r;
+
+ seq_puts(m, "Availability in the static pools:\n\n");
+#define OBJECT(id, nr) \
+ r = atomic_read(&dept_pool[OBJECT_##id].obj_nr); \
+ if (r < 0) \
+ r = 0; \
+ seq_printf(m, "%s\t%d/%d(%d%%)\n", #id, r, nr, (r * 100) / (nr));
+ #include "dept_object.h"
+#undef OBJECT
+
+ return 0;
+}
+
+static int __init dept_proc_init(void)
+{
+ proc_create_seq("dept_deps", S_IRUSR, NULL, &dept_deps_ops);
+ proc_create_single("dept_stats", S_IRUSR, NULL, dept_stats_show);
+ return 0;
+}
+
+__initcall(dept_proc_init);