workqueue: use %current instead of worker->task in worker_maybe_bind_and_lock()
worker_maybe_bind_and_lock() uses both @worker->task and @current at
the same time. As worker_maybe_bind_and_lock() can only be called by
the current worker task, they are always the same.
Update worker_maybe_bind_and_lock() to use %current consistently.
This doesn't introduce any functional change.
tj: Massaged the description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 81f2457..f456433 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1512,7 +1512,7 @@
* flushed from cpu callbacks while cpu is going down, they are
* guaranteed to execute on the cpu.
*
- * This function is to be used by rogue workers and rescuers to bind
+ * This function is to be used by unbound workers and rescuers to bind
* themselves to the target cpu and may race with cpu going down or
* coming online. kthread_bind() can't be used because it may put the
* worker to already dead cpu and set_cpus_allowed_ptr() can't be used
@@ -1537,7 +1537,6 @@
__acquires(&pool->lock)
{
struct worker_pool *pool = worker->pool;
- struct task_struct *task = worker->task;
while (true) {
/*
@@ -1547,12 +1546,12 @@
* against POOL_DISASSOCIATED.
*/
if (!(pool->flags & POOL_DISASSOCIATED))
- set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
+ set_cpus_allowed_ptr(current, get_cpu_mask(pool->cpu));
spin_lock_irq(&pool->lock);
if (pool->flags & POOL_DISASSOCIATED)
return false;
- if (task_cpu(task) == pool->cpu &&
+ if (task_cpu(current) == pool->cpu &&
cpumask_equal(¤t->cpus_allowed,
get_cpu_mask(pool->cpu)))
return true;