@@ -220,7 +220,7 @@ static __always_inline void call_do_softirq(const void *sp)
}
#endif
-DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
+DEFINE_STATIC_CALL_NULL(ppc_get_irq, *ppc_md.get_irq);
static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
{
@@ -330,7 +330,7 @@ static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
}
}
-DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config);
+DEFINE_STATIC_CALL_NULL(amd_pmu_branch_hw_config, *x86_pmu.hw_config);
static int amd_core_hw_config(struct perf_event *event)
{
@@ -96,7 +96,7 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter);
* This one is magic, it will get called even when PMU init fails (because
* there is no PMU), in which case it should simply return NULL.
*/
-DEFINE_STATIC_CALL_RET0(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
+DEFINE_STATIC_CALL_NULL(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
@@ -17,7 +17,6 @@
* DECLARE_STATIC_CALL(name, func);
* DEFINE_STATIC_CALL(name, func);
* DEFINE_STATIC_CALL_NULL(name, typename);
- * DEFINE_STATIC_CALL_RET0(name, typename);
*
* __static_call_return0;
*
@@ -171,8 +170,6 @@ extern int static_call_text_reserved(void *start, void *end);
}; \
ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
-#define DEFINE_STATIC_CALL_RET0 DEFINE_STATIC_CALL_NULL
-
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
@@ -206,8 +203,6 @@ static inline int static_call_init(void) { return 0; }
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
-#define DEFINE_STATIC_CALL_RET0 DEFINE_STATIC_CALL_NULL
-
static inline
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
@@ -251,8 +246,6 @@ static inline int static_call_init(void) { return 0; }
#define DEFINE_STATIC_CALL_NULL(name, _func) \
__DEFINE_STATIC_CALL(name, _func, __static_call_return0)
-#define DEFINE_STATIC_CALL_RET0 DEFINE_STATIC_CALL_NULL
-
static inline
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
@@ -6757,9 +6757,9 @@ static void perf_pending_task(struct callback_head *head)
#ifdef CONFIG_GUEST_PERF_EVENTS
struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
-DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state);
-DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
-DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+DEFINE_STATIC_CALL_NULL(__perf_guest_state, *perf_guest_cbs->state);
+DEFINE_STATIC_CALL_NULL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
+DEFINE_STATIC_CALL_NULL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
@@ -13766,4 +13766,4 @@ struct cgroup_subsys perf_event_cgrp_subsys = {
};
#endif /* CONFIG_CGROUP_PERF */
-DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
+DEFINE_STATIC_CALL_NULL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
@@ -8492,12 +8492,12 @@ EXPORT_SYMBOL(__cond_resched);
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define cond_resched_dynamic_enabled __cond_resched
#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
-DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
+DEFINE_STATIC_CALL_NULL(cond_resched, __cond_resched);
EXPORT_STATIC_CALL_TRAMP(cond_resched);
#define might_resched_dynamic_enabled __cond_resched
#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
-DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
+DEFINE_STATIC_CALL_NULL(might_resched, __cond_resched);
EXPORT_STATIC_CALL_TRAMP(might_resched);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
@@ -8598,7 +8598,7 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
*
* NONE:
* cond_resched <- __cond_resched
- * might_resched <- RET0
+ * might_resched <- NULL
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
@@ -8611,8 +8611,8 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
* irqentry_exit_cond_resched <- NOP
*
* FULL:
- * cond_resched <- RET0
- * might_resched <- RET0
+ * cond_resched <- NULL
+ * might_resched <- NULL
* preempt_schedule <- preempt_schedule
* preempt_schedule_notrace <- preempt_schedule_notrace
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched