@@ -1181,7 +1181,7 @@ static void put_ctx(struct perf_event_context *ctx)
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
- put_task_struct(ctx->task);
+ put_task_struct_atomic_safe(ctx->task);
call_rcu(&ctx->rcu_head, free_ctx);
}
}
@@ -13019,7 +13019,7 @@ static void perf_event_exit_task_context(struct task_struct *child)
RCU_INIT_POINTER(child->perf_event_ctxp, NULL);
put_ctx(child_ctx); /* cannot be last */
WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
- put_task_struct(current); /* cannot be last */
+ put_task_struct_atomic_safe(current); /* cannot be last */
clone_ctx = unclone_ctx(child_ctx);
raw_spin_unlock_irq(&child_ctx->lock);
@@ -13124,7 +13124,7 @@ void perf_event_free_task(struct task_struct *task)
*/
RCU_INIT_POINTER(task->perf_event_ctxp, NULL);
WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
- put_task_struct(task); /* cannot be last */
+ put_task_struct_atomic_safe(task); /* cannot be last */
raw_spin_unlock_irq(&ctx->lock);
@@ -509,7 +509,7 @@ static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
- put_task_struct(wqh->rtlock_task);
+ put_task_struct_atomic_safe(wqh->rtlock_task);
wqh->rtlock_task = NULL;
}
@@ -649,7 +649,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
"task: %s (%d)\n", max_lock_depth,
top_task->comm, task_pid_nr(top_task));
}
- put_task_struct(task);
+ put_task_struct_atomic_safe(task);
return -EDEADLK;
}
@@ -817,7 +817,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
* No requeue[7] here. Just release @task [8]
*/
raw_spin_unlock(&task->pi_lock);
- put_task_struct(task);
+ put_task_struct_atomic_safe(task);
/*
* [9] check_exit_conditions_3 protected by lock->wait_lock.
@@ -886,7 +886,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
/* [8] Release the task */
raw_spin_unlock(&task->pi_lock);
- put_task_struct(task);
+ put_task_struct_atomic_safe(task);
/*
* [9] check_exit_conditions_3 protected by lock->wait_lock.
@@ -990,7 +990,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
out_unlock_pi:
raw_spin_unlock_irq(&task->pi_lock);
out_put_task:
- put_task_struct(task);
+ put_task_struct_atomic_safe(task);
return ret;
}
@@ -1007,7 +1007,7 @@ void wake_up_q(struct wake_q_head *head)
* the queueing in wake_q_add() so as not to miss wakeups.
*/
wake_up_process(task);
- put_task_struct(task);
+ put_task_struct_atomic_safe(task);
}
}
@@ -2528,7 +2528,7 @@ int push_cpu_stop(void *arg)
raw_spin_rq_unlock(rq);
raw_spin_unlock_irq(&p->pi_lock);
- put_task_struct(p);
+ put_task_struct_atomic_safe(p);
return 0;
}
@@ -9316,7 +9316,7 @@ static int __balance_push_cpu_stop(void *arg)
rq_unlock(rq, &rf);
raw_spin_unlock_irq(&p->pi_lock);
- put_task_struct(p);
+ put_task_struct_atomic_safe(p);
return 0;
}
@@ -327,7 +327,7 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
+ put_task_struct_atomic_safe(p);
}
__sub_rq_bw(p->dl.dl_bw, &rq->dl);
__add_rq_bw(new_bw, &rq->dl);
@@ -467,7 +467,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
- put_task_struct(dl_task_of(dl_se));
+ put_task_struct_atomic_safe(dl_task_of(dl_se));
} else {
/*
* Since "dl_non_contending" is not set, the
@@ -1207,7 +1207,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
* This can free the task_struct, including this hrtimer, do not touch
* anything related to that after this.
*/
- put_task_struct(p);
+ put_task_struct_atomic_safe(p);
return HRTIMER_NORESTART;
}
@@ -1442,7 +1442,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
dl_se->dl_non_contending = 0;
unlock:
task_rq_unlock(rq, p, &rf);
- put_task_struct(p);
+ put_task_struct_atomic_safe(p);
return HRTIMER_NORESTART;
}
@@ -1899,7 +1899,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
+ put_task_struct_atomic_safe(p);
}
sub_rq_bw(&p->dl, &rq->dl);
rq_unlock(rq, &rf);
@@ -2351,7 +2351,7 @@ static int push_dl_task(struct rq *rq)
/* No more tasks */
goto out;
- put_task_struct(next_task);
+ put_task_struct_atomic_safe(next_task);
next_task = task;
goto retry;
}
@@ -2366,7 +2366,7 @@ static int push_dl_task(struct rq *rq)
double_unlock_balance(rq, later_rq);
out:
- put_task_struct(next_task);
+ put_task_struct_atomic_safe(next_task);
return ret;
}
@@ -2633,7 +2633,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
static void switched_to_dl(struct rq *rq, struct task_struct *p)
{
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
+ put_task_struct_atomic_safe(p);
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
@@ -2150,7 +2150,7 @@ static int push_rt_task(struct rq *rq, bool pull)
/*
* Something has shifted, try again.
*/
- put_task_struct(next_task);
+ put_task_struct_atomic_safe(next_task);
next_task = task;
goto retry;
}
@@ -2163,7 +2163,7 @@ static int push_rt_task(struct rq *rq, bool pull)
double_unlock_balance(rq, lowest_rq);
out:
- put_task_struct(next_task);
+ put_task_struct_atomic_safe(next_task);
return ret;
}