@@ -270,6 +270,11 @@ struct dept_wait {
* whether this wait is for commit in scheduler
*/
bool sched_sleep;
+
+ /*
+ * whether a timeout is set
+ */
+ bool timeout;
};
};
};
@@ -458,6 +463,7 @@ struct dept_task {
bool stage_sched_map;
const char *stage_w_fn;
unsigned long stage_ip;
+ bool stage_timeout;
/*
* the number of missing ecxts
@@ -496,6 +502,7 @@ struct dept_task {
.stage_sched_map = false, \
.stage_w_fn = NULL, \
.stage_ip = 0UL, \
+ .stage_timeout = false, \
.missing_ecxt = 0, \
.hardirqs_enabled = false, \
.softirqs_enabled = false, \
@@ -513,8 +520,8 @@ extern void dept_map_init(struct dept_map *m, struct dept_key *k, int sub_u, con
extern void dept_map_reinit(struct dept_map *m, struct dept_key *k, int sub_u, const char *n);
extern void dept_map_copy(struct dept_map *to, struct dept_map *from);
-extern void dept_wait(struct dept_map *m, unsigned long w_f, unsigned long ip, const char *w_fn, int sub_l);
-extern void dept_stage_wait(struct dept_map *m, struct dept_key *k, unsigned long ip, const char *w_fn);
+extern void dept_wait(struct dept_map *m, unsigned long w_f, unsigned long ip, const char *w_fn, int sub_l, long timeout);
+extern void dept_stage_wait(struct dept_map *m, struct dept_key *k, unsigned long ip, const char *w_fn, long timeout);
extern void dept_request_event_wait_commit(void);
extern void dept_clean_stage(void);
extern void dept_stage_event(struct task_struct *t, unsigned long ip);
@@ -566,8 +573,8 @@ struct dept_task { };
#define dept_map_reinit(m, k, su, n) do { (void)(n); (void)(k); } while (0)
#define dept_map_copy(t, f) do { } while (0)
-#define dept_wait(m, w_f, ip, w_fn, sl) do { (void)(w_fn); } while (0)
-#define dept_stage_wait(m, k, ip, w_fn) do { (void)(k); (void)(w_fn); } while (0)
+#define dept_wait(m, w_f, ip, w_fn, sl, t) do { (void)(w_fn); } while (0)
+#define dept_stage_wait(m, k, ip, w_fn, t) do { (void)(k); (void)(w_fn); } while (0)
#define dept_request_event_wait_commit() do { } while (0)
#define dept_clean_stage() do { } while (0)
#define dept_stage_event(t, ip) do { } while (0)
@@ -27,7 +27,7 @@
else if (t) \
dept_ecxt_enter(m, LDT_EVT_L, i, "trylock", "unlock", sl);\
else { \
- dept_wait(m, LDT_EVT_L, i, "lock", sl); \
+ dept_wait(m, LDT_EVT_L, i, "lock", sl, false); \
dept_ecxt_enter(m, LDT_EVT_L, i, "lock", "unlock", sl);\
} \
} while (0)
@@ -39,7 +39,7 @@
else if (t) \
dept_ecxt_enter(m, LDT_EVT_R, i, "read_trylock", "read_unlock", sl);\
else { \
- dept_wait(m, q ? LDT_EVT_RW : LDT_EVT_W, i, "read_lock", sl);\
+ dept_wait(m, q ? LDT_EVT_RW : LDT_EVT_W, i, "read_lock", sl, false);\
dept_ecxt_enter(m, LDT_EVT_R, i, "read_lock", "read_unlock", sl);\
} \
} while (0)
@@ -51,7 +51,7 @@
else if (t) \
dept_ecxt_enter(m, LDT_EVT_W, i, "write_trylock", "write_unlock", sl);\
else { \
- dept_wait(m, LDT_EVT_RW, i, "write_lock", sl); \
+ dept_wait(m, LDT_EVT_RW, i, "write_lock", sl, false);\
dept_ecxt_enter(m, LDT_EVT_W, i, "write_lock", "write_unlock", sl);\
} \
} while (0)
@@ -22,11 +22,12 @@
#define sdt_map_init_key(m, k) dept_map_init(m, k, 0, #m)
-#define sdt_wait(m) \
+#define sdt_wait_timeout(m, t) \
do { \
dept_request_event(m); \
- dept_wait(m, 1UL, _THIS_IP_, __func__, 0); \
+ dept_wait(m, 1UL, _THIS_IP_, __func__, 0, t); \
} while (0)
+#define sdt_wait(m) sdt_wait_timeout(m, -1L)
/*
* sdt_might_sleep() and its family will be committed in __schedule()
@@ -37,12 +38,13 @@
/*
* Use the code location as the class key if an explicit map is not used.
*/
-#define sdt_might_sleep_start(m) \
+#define sdt_might_sleep_start_timeout(m, t) \
do { \
struct dept_map *__m = m; \
static struct dept_key __key; \
- dept_stage_wait(__m, __m ? NULL : &__key, _THIS_IP_, __func__);\
+ dept_stage_wait(__m, __m ? NULL : &__key, _THIS_IP_, __func__, t);\
} while (0)
+#define sdt_might_sleep_start(m) sdt_might_sleep_start_timeout(m, -1L)
#define sdt_might_sleep_end() dept_clean_stage()
@@ -52,7 +54,9 @@
#else /* !CONFIG_DEPT */
#define sdt_map_init(m) do { } while (0)
#define sdt_map_init_key(m, k) do { (void)(k); } while (0)
+#define sdt_wait_timeout(m, t) do { } while (0)
#define sdt_wait(m) do { } while (0)
+#define sdt_might_sleep_start_timeout(m, t) do { } while (0)
#define sdt_might_sleep_start(m) do { } while (0)
#define sdt_might_sleep_end() do { } while (0)
#define sdt_ecxt_enter(m) do { } while (0)
@@ -740,6 +740,8 @@ static void print_diagram(struct dept_dep *d)
if (!irqf) {
print_spc(spc, "[S] %s(%s:%d)\n", c_fn, fc_n, fc->sub_id);
print_spc(spc, "[W] %s(%s:%d)\n", w_fn, tc_n, tc->sub_id);
+ if (w->timeout)
+ print_spc(spc, "--------------- >8 timeout ---------------\n");
print_spc(spc, "[E] %s(%s:%d)\n", e_fn, fc_n, fc->sub_id);
}
}
@@ -793,6 +795,24 @@ static void print_dep(struct dept_dep *d)
static void save_current_stack(int skip);
+static bool is_timeout_wait_circle(struct dept_class *c)
+{
+ struct dept_class *fc = c->bfs_parent;
+ struct dept_class *tc = c;
+
+ do {
+ struct dept_dep *d = lookup_dep(fc, tc);
+
+ if (d->wait->timeout)
+ return true;
+
+ tc = fc;
+ fc = fc->bfs_parent;
+ } while (tc != c);
+
+ return false;
+}
+
/*
* Print all classes in a circle.
*/
@@ -815,10 +835,14 @@ static void print_circle(struct dept_class *c)
pr_warn("summary\n");
pr_warn("---------------------------------------------------\n");
- if (fc == tc)
+ if (is_timeout_wait_circle(c)) {
+ pr_warn("NOT A DEADLOCK BUT A CIRCULAR DEPENDENCY\n");
+ pr_warn("CHECK IF THE TIMEOUT IS INTENDED\n\n");
+ } else if (fc == tc) {
pr_warn("*** AA DEADLOCK ***\n\n");
- else
+ } else {
pr_warn("*** DEADLOCK ***\n\n");
+ }
i = 0;
do {
@@ -1564,7 +1588,8 @@ static void add_dep(struct dept_ecxt *e, struct dept_wait *w)
static atomic_t wgen = ATOMIC_INIT(1);
static void add_wait(struct dept_class *c, unsigned long ip,
- const char *w_fn, int sub_l, bool sched_sleep)
+ const char *w_fn, int sub_l, bool sched_sleep,
+ bool timeout)
{
struct dept_task *dt = dept_task();
struct dept_wait *w;
@@ -1584,6 +1609,7 @@ static void add_wait(struct dept_class *c, unsigned long ip,
w->wait_fn = w_fn;
w->wait_stack = get_current_stack();
w->sched_sleep = sched_sleep;
+ w->timeout = timeout;
cxt = cur_cxt();
if (cxt == DEPT_CXT_HIRQ || cxt == DEPT_CXT_SIRQ)
@@ -2338,7 +2364,7 @@ static struct dept_class *check_new_class(struct dept_key *local,
*/
static void __dept_wait(struct dept_map *m, unsigned long w_f,
unsigned long ip, const char *w_fn, int sub_l,
- bool sched_sleep, bool sched_map)
+ bool sched_sleep, bool sched_map, bool timeout)
{
int e;
@@ -2361,7 +2387,7 @@ static void __dept_wait(struct dept_map *m, unsigned long w_f,
if (!c)
continue;
- add_wait(c, ip, w_fn, sub_l, sched_sleep);
+ add_wait(c, ip, w_fn, sub_l, sched_sleep, timeout);
}
}
@@ -2403,14 +2429,23 @@ static void __dept_event(struct dept_map *m, unsigned long e_f,
}
void dept_wait(struct dept_map *m, unsigned long w_f,
- unsigned long ip, const char *w_fn, int sub_l)
+ unsigned long ip, const char *w_fn, int sub_l,
+ long timeoutval)
{
struct dept_task *dt = dept_task();
unsigned long flags;
+ bool timeout;
if (unlikely(!dept_working()))
return;
+ timeout = timeoutval > 0 && timeoutval < MAX_SCHEDULE_TIMEOUT;
+
+#if !defined(CONFIG_DEPT_AGGRESSIVE_TIMEOUT_WAIT)
+ if (timeout)
+ return;
+#endif
+
if (dt->recursive)
return;
@@ -2419,21 +2454,30 @@ void dept_wait(struct dept_map *m, unsigned long w_f,
flags = dept_enter();
- __dept_wait(m, w_f, ip, w_fn, sub_l, false, false);
+ __dept_wait(m, w_f, ip, w_fn, sub_l, false, false, timeout);
dept_exit(flags);
}
EXPORT_SYMBOL_GPL(dept_wait);
void dept_stage_wait(struct dept_map *m, struct dept_key *k,
- unsigned long ip, const char *w_fn)
+ unsigned long ip, const char *w_fn,
+ long timeoutval)
{
struct dept_task *dt = dept_task();
unsigned long flags;
+ bool timeout;
if (unlikely(!dept_working()))
return;
+ timeout = timeoutval > 0 && timeoutval < MAX_SCHEDULE_TIMEOUT;
+
+#if !defined(CONFIG_DEPT_AGGRESSIVE_TIMEOUT_WAIT)
+ if (timeout)
+ return;
+#endif
+
if (m && m->nocheck)
return;
@@ -2481,6 +2525,7 @@ void dept_stage_wait(struct dept_map *m, struct dept_key *k,
dt->stage_w_fn = w_fn;
dt->stage_ip = ip;
+ dt->stage_timeout = timeout;
unlock:
arch_spin_unlock(&stage_spin);
@@ -2506,6 +2551,7 @@ void dept_clean_stage(void)
dt->stage_sched_map = false;
dt->stage_w_fn = NULL;
dt->stage_ip = 0UL;
+ dt->stage_timeout = false;
arch_spin_unlock(&stage_spin);
dept_exit_recursive(flags);
@@ -2523,6 +2569,7 @@ void dept_request_event_wait_commit(void)
unsigned long ip;
const char *w_fn;
bool sched_map;
+ bool timeout;
if (unlikely(!dept_working()))
return;
@@ -2545,6 +2592,7 @@ void dept_request_event_wait_commit(void)
w_fn = dt->stage_w_fn;
ip = dt->stage_ip;
sched_map = dt->stage_sched_map;
+ timeout = dt->stage_timeout;
/*
* Avoid zero wgen.
@@ -2552,7 +2600,7 @@ void dept_request_event_wait_commit(void)
wg = atomic_inc_return(&wgen) ?: atomic_inc_return(&wgen);
WRITE_ONCE(dt->stage_m.wgen, wg);
- __dept_wait(&dt->stage_m, 1UL, ip, w_fn, 0, true, sched_map);
+ __dept_wait(&dt->stage_m, 1UL, ip, w_fn, 0, true, sched_map, timeout);
exit:
dept_exit(flags);
}
@@ -1234,6 +1234,16 @@ config DEPT
noting, to mitigate the impact by the false positives, multi
reporting has been supported.
+config DEPT_AGGRESSIVE_TIMEOUT_WAIT
+ bool "Aggressively track even timeout waits"
+ depends on DEPT
+ default n
+ help
+ Timeout wait doesn't contribute to a deadlock. However,
+ informing a circular dependency might be helpful for cases
+ that timeout is used to avoid a deadlock. Say N if you'd like
+ to avoid verbose reports.
+
config LOCK_DEBUGGING_SUPPORT
bool
depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT