blob: 1698fbe6f0e134ac2a2ab3e9357b44af99c71f83 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Oleg Nesterove73f8952012-05-11 10:59:07 +10002#include <linux/spinlock.h>
3#include <linux/task_work.h>
4#include <linux/tracehook.h>
5
Oleg Nesterov9da33de2012-08-26 21:12:11 +02006static struct callback_head work_exited; /* all we need is ->next == NULL */
7
Oleg Nesterov892f6662013-09-11 14:23:31 -07008/**
9 * task_work_add - ask the @task to execute @work->func()
10 * @task: the task which should run the callback
11 * @work: the callback to run
Jens Axboe91989c72020-10-16 09:02:26 -060012 * @notify: how to notify the targeted task
Oleg Nesterov892f6662013-09-11 14:23:31 -070013 *
Jens Axboe91989c72020-10-16 09:02:26 -060014 * Queue @work for task_work_run() below and notify the @task if @notify
15 * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the
16 * it will interrupt the targeted task and run the task_work. @TWA_RESUME
17 * work is run only when the task exits the kernel and returns to user mode,
18 * or before entering guest mode. Fails if the @task is exiting/exited and thus
19 * it can't process this @work. Otherwise @work->func() will be called when the
20 * @task goes through one of the aforementioned transitions, or exits.
Oleg Nesterov892f6662013-09-11 14:23:31 -070021 *
Jens Axboe91989c72020-10-16 09:02:26 -060022 * If the targeted task is exiting, then an error is returned and the work item
23 * is not queued. It's up to the caller to arrange for an alternative mechanism
24 * in that case.
Oleg Nesterov892f6662013-09-11 14:23:31 -070025 *
Jens Axboe91989c72020-10-16 09:02:26 -060026 * Note: there is no ordering guarantee on works queued here. The task_work
27 * list is LIFO.
Eric Dumazetc8219902015-08-28 19:42:30 -070028 *
Oleg Nesterov892f6662013-09-11 14:23:31 -070029 * RETURNS:
30 * 0 if succeeds or -ESRCH.
31 */
Jens Axboe91989c72020-10-16 09:02:26 -060032int task_work_add(struct task_struct *task, struct callback_head *work,
33 enum task_work_notify_mode notify)
Oleg Nesterove73f8952012-05-11 10:59:07 +100034{
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020035 struct callback_head *head;
Oleg Nesterov9da33de2012-08-26 21:12:11 +020036
Walter Wu23f61f02021-04-29 23:00:45 -070037 /* record the work call stack in order to print it in KASAN reports */
38 kasan_record_aux_stack(work);
39
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020040 do {
Oleg Nesterov61e96492016-08-02 14:03:44 -070041 head = READ_ONCE(task->task_works);
Oleg Nesterov9da33de2012-08-26 21:12:11 +020042 if (unlikely(head == &work_exited))
43 return -ESRCH;
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020044 work->next = head;
45 } while (cmpxchg(&task->task_works, head, work) != head);
Oleg Nesterove73f8952012-05-11 10:59:07 +100046
Oleg Nesterove91b4812020-06-30 17:32:54 +020047 switch (notify) {
Jens Axboe91989c72020-10-16 09:02:26 -060048 case TWA_NONE:
49 break;
Oleg Nesterove91b4812020-06-30 17:32:54 +020050 case TWA_RESUME:
Oleg Nesterove73f8952012-05-11 10:59:07 +100051 set_notify_resume(task);
Oleg Nesterove91b4812020-06-30 17:32:54 +020052 break;
53 case TWA_SIGNAL:
Jens Axboe03941cc2020-10-09 16:01:33 -060054 set_notify_signal(task);
Oleg Nesterove91b4812020-06-30 17:32:54 +020055 break;
Jens Axboe91989c72020-10-16 09:02:26 -060056 default:
57 WARN_ON_ONCE(1);
58 break;
Oleg Nesterove91b4812020-06-30 17:32:54 +020059 }
60
Al Viroed3e6942012-06-27 11:31:24 +040061 return 0;
Oleg Nesterove73f8952012-05-11 10:59:07 +100062}
63
Oleg Nesterov892f6662013-09-11 14:23:31 -070064/**
Jens Axboec7aab1a2021-04-01 19:53:29 -060065 * task_work_cancel_match - cancel a pending work added by task_work_add()
Oleg Nesterov892f6662013-09-11 14:23:31 -070066 * @task: the task which should execute the work
Jens Axboec7aab1a2021-04-01 19:53:29 -060067 * @match: match function to call
Oleg Nesterov892f6662013-09-11 14:23:31 -070068 *
69 * RETURNS:
70 * The found work or NULL if not found.
71 */
Al Viro67d12142012-06-27 11:07:19 +040072struct callback_head *
Jens Axboec7aab1a2021-04-01 19:53:29 -060073task_work_cancel_match(struct task_struct *task,
74 bool (*match)(struct callback_head *, void *data),
75 void *data)
Oleg Nesterove73f8952012-05-11 10:59:07 +100076{
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020077 struct callback_head **pprev = &task->task_works;
Oleg Nesterov205e5502013-09-11 14:23:30 -070078 struct callback_head *work;
Oleg Nesterove73f8952012-05-11 10:59:07 +100079 unsigned long flags;
Oleg Nesterov61e96492016-08-02 14:03:44 -070080
81 if (likely(!task->task_works))
82 return NULL;
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020083 /*
84 * If cmpxchg() fails we continue without updating pprev.
85 * Either we raced with task_work_add() which added the
86 * new entry before this work, we will find it again. Or
Oleg Nesterov9da33de2012-08-26 21:12:11 +020087 * we raced with task_work_run(), *pprev == NULL/exited.
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020088 */
Oleg Nesterove73f8952012-05-11 10:59:07 +100089 raw_spin_lock_irqsave(&task->pi_lock, flags);
Will Deacon506458e2017-10-24 11:22:48 +010090 while ((work = READ_ONCE(*pprev))) {
Jens Axboec7aab1a2021-04-01 19:53:29 -060091 if (!match(work, data))
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020092 pprev = &work->next;
93 else if (cmpxchg(pprev, work, work->next) == work)
94 break;
Oleg Nesterove73f8952012-05-11 10:59:07 +100095 }
Oleg Nesterove73f8952012-05-11 10:59:07 +100096 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020097
98 return work;
Oleg Nesterove73f8952012-05-11 10:59:07 +100099}
100
Jens Axboec7aab1a2021-04-01 19:53:29 -0600101static bool task_work_func_match(struct callback_head *cb, void *data)
102{
103 return cb->func == data;
104}
105
106/**
107 * task_work_cancel - cancel a pending work added by task_work_add()
108 * @task: the task which should execute the work
109 * @func: identifies the work to remove
110 *
111 * Find the last queued pending work with ->func == @func and remove
112 * it from queue.
113 *
114 * RETURNS:
115 * The found work or NULL if not found.
116 */
117struct callback_head *
118task_work_cancel(struct task_struct *task, task_work_func_t func)
119{
120 return task_work_cancel_match(task, task_work_func_match, func);
121}
122
Oleg Nesterov892f6662013-09-11 14:23:31 -0700123/**
124 * task_work_run - execute the works added by task_work_add()
125 *
126 * Flush the pending works. Should be used by the core kernel code.
127 * Called before the task returns to the user-mode or stops, or when
128 * it exits. In the latter case task_work_add() can no longer add the
129 * new work after task_work_run() returns.
130 */
Oleg Nesterove73f8952012-05-11 10:59:07 +1000131void task_work_run(void)
132{
133 struct task_struct *task = current;
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200134 struct callback_head *work, *head, *next;
Oleg Nesterove73f8952012-05-11 10:59:07 +1000135
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200136 for (;;) {
Oleg Nesterov9da33de2012-08-26 21:12:11 +0200137 /*
138 * work->func() can do task_work_add(), do not set
139 * work_exited unless the list is empty.
140 */
141 do {
Oleg Nesterov6fb61492020-02-18 16:50:18 +0100142 head = NULL;
Oleg Nesterov61e96492016-08-02 14:03:44 -0700143 work = READ_ONCE(task->task_works);
Oleg Nesterov6fb61492020-02-18 16:50:18 +0100144 if (!work) {
145 if (task->flags & PF_EXITING)
146 head = &work_exited;
147 else
148 break;
149 }
Oleg Nesterov9da33de2012-08-26 21:12:11 +0200150 } while (cmpxchg(&task->task_works, work, head) != work);
151
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200152 if (!work)
153 break;
Oleg Nesterov6fb61492020-02-18 16:50:18 +0100154 /*
155 * Synchronize with task_work_cancel(). It can not remove
156 * the first entry == work, cmpxchg(task_works) must fail.
157 * But it can remove another entry from the ->next list.
158 */
159 raw_spin_lock_irq(&task->pi_lock);
160 raw_spin_unlock_irq(&task->pi_lock);
Oleg Nesterove73f8952012-05-11 10:59:07 +1000161
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200162 do {
163 next = work->next;
164 work->func(work);
165 work = next;
Eric Dumazetf3418612012-08-21 15:05:14 +0200166 cond_resched();
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200167 } while (work);
Oleg Nesterove73f8952012-05-11 10:59:07 +1000168 }
169}