@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep()
struct device;
struct clk;
@@ -184,7 +184,7 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
#else /* CONFIG_GPIOLIB */
#include <linux/err.h>
-#include <linux/kernel.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep()
#include <asm/bug.h>
@@ -57,112 +57,6 @@
struct completion;
struct user;
-#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
-
-extern int __cond_resched(void);
-# define might_resched() __cond_resched()
-
-#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-
-extern int __cond_resched(void);
-
-DECLARE_STATIC_CALL(might_resched, __cond_resched);
-
-static __always_inline void might_resched(void)
-{
- static_call_mod(might_resched)();
-}
-
-#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-
-extern int dynamic_might_resched(void);
-# define might_resched() dynamic_might_resched()
-
-#else
-
-# define might_resched() do { } while (0)
-
-#endif /* CONFIG_PREEMPT_* */
-
-#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-extern void __might_resched(const char *file, int line, unsigned int offsets);
-extern void __might_sleep(const char *file, int line);
-extern void __cant_sleep(const char *file, int line, int preempt_offset);
-extern void __cant_migrate(const char *file, int line);
-
-/**
- * might_sleep - annotation for functions that can sleep
- *
- * this macro will print a stack trace if it is executed in an atomic
- * context (spinlock, irq-handler, ...). Additional sections where blocking is
- * not allowed can be annotated with non_block_start() and non_block_end()
- * pairs.
- *
- * This is a useful debugging help to be able to catch problems early and not
- * be bitten later when the calling function happens to sleep when it is not
- * supposed to.
- */
-# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
-/**
- * cant_sleep - annotation for functions that cannot sleep
- *
- * this macro will print a stack trace if it is executed with preemption enabled
- */
-# define cant_sleep() \
- do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
-# define sched_annotate_sleep() (current->task_state_change = 0)
-
-/**
- * cant_migrate - annotation for functions that cannot migrate
- *
- * Will print a stack trace if executed in code which is migratable
- */
-# define cant_migrate() \
- do { \
- if (IS_ENABLED(CONFIG_SMP)) \
- __cant_migrate(__FILE__, __LINE__); \
- } while (0)
-
-/**
- * non_block_start - annotate the start of section where sleeping is prohibited
- *
- * This is on behalf of the oom reaper, specifically when it is calling the mmu
- * notifiers. The problem is that if the notifier were to block on, for example,
- * mutex_lock() and if the process which holds that mutex were to perform a
- * sleeping memory allocation, the oom reaper is now blocked on completion of
- * that memory allocation. Other blocking calls like wait_event() pose similar
- * issues.
- */
-# define non_block_start() (current->non_block_count++)
-/**
- * non_block_end - annotate the end of section where sleeping is prohibited
- *
- * Closes a section opened by non_block_start().
- */
-# define non_block_end() WARN_ON(current->non_block_count-- == 0)
-#else
- static inline void __might_resched(const char *file, int line,
- unsigned int offsets) { }
-static inline void __might_sleep(const char *file, int line) { }
-# define might_sleep() do { might_resched(); } while (0)
-# define cant_sleep() do { } while (0)
-# define cant_migrate() do { } while (0)
-# define sched_annotate_sleep() do { } while (0)
-# define non_block_start() do { } while (0)
-# define non_block_end() do { } while (0)
-#endif
-
-#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
-
-#if defined(CONFIG_MMU) && \
- (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
-#define might_fault() __might_fault(__FILE__, __LINE__)
-void __might_fault(const char *file, int line);
-#else
-static inline void might_fault(void) { }
-#endif
-
void do_exit(long error_code) __noreturn;
extern int get_option(char **str, int *pint);
@@ -6,6 +6,7 @@
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep()
#include <linux/srcu.h>
#include <linux/interval_tree.h>
@@ -402,6 +402,8 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id);
#else
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep()
+
static inline bool pwm_might_sleep(struct pwm_device *pwm)
{
return true;
@@ -12,6 +12,7 @@
#ifndef __LINUX_TINY_H
#define __LINUX_TINY_H
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep()
#include <asm/param.h> /* for HZ */
struct rcu_gp_oldstate {
@@ -31,6 +31,7 @@
#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
+#include <linux/sched/debug_atomic_sleep.h> // for __might_resched()
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
new file mode 100644
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_DEBUG_ATOMIC_SLEEP_H
+#define _LINUX_SCHED_DEBUG_ATOMIC_SLEEP_H
+
+#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
+
+extern int __cond_resched(void);
+# define might_resched() __cond_resched()
+
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+
+#include <linux/static_call_types.h>
+
+extern int __cond_resched(void);
+
+DECLARE_STATIC_CALL(might_resched, __cond_resched);
+
+static __always_inline void might_resched(void)
+{
+ static_call_mod(might_resched)();
+}
+
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+extern int dynamic_might_resched(void);
+# define might_resched() dynamic_might_resched()
+
+#else
+
+# define might_resched() do { } while (0)
+
+#endif /* CONFIG_PREEMPT_* */
+
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+extern void __might_resched(const char *file, int line, unsigned int offsets);
+extern void __might_sleep(const char *file, int line);
+extern void __cant_sleep(const char *file, int line, int preempt_offset);
+extern void __cant_migrate(const char *file, int line);
+
+/**
+ * might_sleep - annotation for functions that can sleep
+ *
+ * this macro will print a stack trace if it is executed in an atomic
+ * context (spinlock, irq-handler, ...). Additional sections where blocking is
+ * not allowed can be annotated with non_block_start() and non_block_end()
+ * pairs.
+ *
+ * This is a useful debugging help to be able to catch problems early and not
+ * be bitten later when the calling function happens to sleep when it is not
+ * supposed to.
+ */
+# define might_sleep() \
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+/**
+ * cant_sleep - annotation for functions that cannot sleep
+ *
+ * this macro will print a stack trace if it is executed with preemption enabled
+ */
+# define cant_sleep() \
+ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
+# define sched_annotate_sleep() (current->task_state_change = 0)
+
+/**
+ * cant_migrate - annotation for functions that cannot migrate
+ *
+ * Will print a stack trace if executed in code which is migratable
+ */
+# define cant_migrate() \
+ do { \
+ if (IS_ENABLED(CONFIG_SMP)) \
+ __cant_migrate(__FILE__, __LINE__); \
+ } while (0)
+
+/**
+ * non_block_start - annotate the start of section where sleeping is prohibited
+ *
+ * This is on behalf of the oom reaper, specifically when it is calling the mmu
+ * notifiers. The problem is that if the notifier were to block on, for example,
+ * mutex_lock() and if the process which holds that mutex were to perform a
+ * sleeping memory allocation, the oom reaper is now blocked on completion of
+ * that memory allocation. Other blocking calls like wait_event() pose similar
+ * issues.
+ */
+# define non_block_start() (current->non_block_count++)
+/**
+ * non_block_end - annotate the end of section where sleeping is prohibited
+ *
+ * Closes a section opened by non_block_start().
+ */
+# define non_block_end() WARN_ON(current->non_block_count-- == 0)
+#else
+ static inline void __might_resched(const char *file, int line,
+ unsigned int offsets) { }
+static inline void __might_sleep(const char *file, int line) { }
+# define might_sleep() do { might_resched(); } while (0)
+# define cant_sleep() do { } while (0)
+# define cant_migrate() do { } while (0)
+# define sched_annotate_sleep() do { } while (0)
+# define non_block_start() do { } while (0)
+# define non_block_end() do { } while (0)
+#endif
+
+#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
+
+#if defined(CONFIG_MMU) && \
+ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
+#define might_fault() __might_fault(__FILE__, __LINE__)
+void __might_fault(const char *file, int line);
+#else
+static inline void might_fault(void) { }
+#endif
+
+
+#endif
@@ -2,9 +2,9 @@
#ifndef _LINUX_SCHED_MM_H
#define _LINUX_SCHED_MM_H
-#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/sched.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep_if()
#include <linux/mm_types.h>
#include <linux/gfp.h>
#include <linux/sync_core.h>
@@ -6,6 +6,7 @@
#include <linux/instrumented.h>
#include <linux/minmax.h>
#include <linux/sched.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_faul()
#include <linux/thread_info.h>
#include <asm/uaccess.h>
@@ -5,6 +5,7 @@
* Linux wait queue related types and methods
*/
#include <linux/list.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep()
#include <linux/stddef.h>
#include <linux/spinlock.h>
@@ -6,6 +6,7 @@
* Linux wait-bit related types and methods:
*/
#include <linux/wait.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep()
struct wait_bit_key {
void *flags;
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/refcount.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep()
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -16,6 +16,7 @@
#include <linux/overflow.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/sched/debug_atomic_sleep.h> // for might_sleep_if()
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>