blob: 15a8ad63f4ffea3986f67c0a82b34dca4bbef9f1 [file] [log] [blame]
Cheah Kok Cheong08b21fb2017-12-21 19:35:30 +08001// SPDX-License-Identifier: GPL-2.0
Steffen Klassert16295be2010-01-06 19:47:10 +11002/*
3 * padata.c - generic interface to process data streams in parallel
4 *
Steffen Klassert107f8bd2012-03-28 08:42:34 +02005 * See Documentation/padata.txt for an api documentation.
6 *
Steffen Klassert16295be2010-01-06 19:47:10 +11007 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
Paul Gortmaker9984de12011-05-23 14:51:41 -040024#include <linux/export.h>
Steffen Klassert16295be2010-01-06 19:47:10 +110025#include <linux/cpumask.h>
26#include <linux/err.h>
27#include <linux/cpu.h>
28#include <linux/padata.h>
29#include <linux/mutex.h>
30#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Dan Kruchinin5e017dc2010-07-14 14:33:08 +040032#include <linux/sysfs.h>
Steffen Klassert16295be2010-01-06 19:47:10 +110033#include <linux/rcupdate.h>
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +020034#include <linux/module.h>
Steffen Klassert16295be2010-01-06 19:47:10 +110035
Steffen Klassert97e3d942010-04-29 14:37:32 +020036#define MAX_OBJ_NUM 1000
Steffen Klassert16295be2010-01-06 19:47:10 +110037
38static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
39{
40 int cpu, target_cpu;
41
Dan Kruchinine15bacb2010-07-14 14:31:57 +040042 target_cpu = cpumask_first(pd->cpumask.pcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +110043 for (cpu = 0; cpu < cpu_index; cpu++)
Dan Kruchinine15bacb2010-07-14 14:31:57 +040044 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +110045
46 return target_cpu;
47}
48
Steffen Klassert2dc9b5d2012-03-09 07:20:49 +010049static int padata_cpu_hash(struct parallel_data *pd)
Steffen Klassert16295be2010-01-06 19:47:10 +110050{
Mathias Krause0b6b0982013-10-25 12:14:15 +020051 unsigned int seq_nr;
Steffen Klassert16295be2010-01-06 19:47:10 +110052 int cpu_index;
Steffen Klassert16295be2010-01-06 19:47:10 +110053
54 /*
55 * Hash the sequence numbers to the cpus by taking
56 * seq_nr mod. number of cpus in use.
57 */
Steffen Klassert2dc9b5d2012-03-09 07:20:49 +010058
Mathias Krause0b6b0982013-10-25 12:14:15 +020059 seq_nr = atomic_inc_return(&pd->seq_nr);
60 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +110061
62 return padata_index_to_cpu(pd, cpu_index);
63}
64
Dan Kruchinine15bacb2010-07-14 14:31:57 +040065static void padata_parallel_worker(struct work_struct *parallel_work)
Steffen Klassert16295be2010-01-06 19:47:10 +110066{
Dan Kruchinine15bacb2010-07-14 14:31:57 +040067 struct padata_parallel_queue *pqueue;
Steffen Klassert16295be2010-01-06 19:47:10 +110068 LIST_HEAD(local_list);
69
70 local_bh_disable();
Dan Kruchinine15bacb2010-07-14 14:31:57 +040071 pqueue = container_of(parallel_work,
72 struct padata_parallel_queue, work);
Steffen Klassert16295be2010-01-06 19:47:10 +110073
Dan Kruchinine15bacb2010-07-14 14:31:57 +040074 spin_lock(&pqueue->parallel.lock);
75 list_replace_init(&pqueue->parallel.list, &local_list);
76 spin_unlock(&pqueue->parallel.lock);
Steffen Klassert16295be2010-01-06 19:47:10 +110077
78 while (!list_empty(&local_list)) {
79 struct padata_priv *padata;
80
81 padata = list_entry(local_list.next,
82 struct padata_priv, list);
83
84 list_del_init(&padata->list);
85
86 padata->parallel(padata);
87 }
88
89 local_bh_enable();
90}
91
Steffen Klassert0198ffd2010-05-19 13:44:27 +100092/**
Steffen Klassert16295be2010-01-06 19:47:10 +110093 * padata_do_parallel - padata parallelization function
94 *
95 * @pinst: padata instance
96 * @padata: object to be parallelized
97 * @cb_cpu: cpu the serialization callback function will run on,
Dan Kruchinine15bacb2010-07-14 14:31:57 +040098 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
Steffen Klassert16295be2010-01-06 19:47:10 +110099 *
100 * The parallelization callback function will run with BHs off.
101 * Note: Every object which is parallelized by padata_do_parallel
102 * must be seen by padata_do_serial.
103 */
104int padata_do_parallel(struct padata_instance *pinst,
105 struct padata_priv *padata, int cb_cpu)
106{
107 int target_cpu, err;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400108 struct padata_parallel_queue *queue;
Steffen Klassert16295be2010-01-06 19:47:10 +1100109 struct parallel_data *pd;
110
111 rcu_read_lock_bh();
112
Mathias Krausec0e656b2013-11-28 19:20:05 +0100113 pd = rcu_dereference_bh(pinst->pd);
Steffen Klassert16295be2010-01-06 19:47:10 +1100114
Steffen Klassert83f619f2010-07-07 15:32:02 +0200115 err = -EINVAL;
Steffen Klassert74247132010-07-20 08:51:25 +0200116 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
Steffen Klassert16295be2010-01-06 19:47:10 +1100117 goto out;
118
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400119 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
Steffen Klassert16295be2010-01-06 19:47:10 +1100120 goto out;
121
122 err = -EBUSY;
123 if ((pinst->flags & PADATA_RESET))
124 goto out;
125
126 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
127 goto out;
128
Steffen Klassert83f619f2010-07-07 15:32:02 +0200129 err = 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100130 atomic_inc(&pd->refcnt);
131 padata->pd = pd;
132 padata->cb_cpu = cb_cpu;
133
Steffen Klassert2dc9b5d2012-03-09 07:20:49 +0100134 target_cpu = padata_cpu_hash(pd);
Mathias Krause350ef882017-09-08 20:57:11 +0200135 padata->cpu = target_cpu;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400136 queue = per_cpu_ptr(pd->pqueue, target_cpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100137
138 spin_lock(&queue->parallel.lock);
139 list_add_tail(&padata->list, &queue->parallel.list);
140 spin_unlock(&queue->parallel.lock);
141
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400142 queue_work_on(target_cpu, pinst->wq, &queue->work);
Steffen Klassert16295be2010-01-06 19:47:10 +1100143
144out:
145 rcu_read_unlock_bh();
146
147 return err;
148}
149EXPORT_SYMBOL(padata_do_parallel);
150
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000151/*
152 * padata_get_next - Get the next object that needs serialization.
153 *
154 * Return values are:
155 *
156 * A pointer to the control struct of the next object that needs
157 * serialization, if present in one of the percpu reorder queues.
158 *
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000159 * -EINPROGRESS, if the next object that needs serialization will
160 * be parallel processed by another cpu and is not yet present in
161 * the cpu's reorder queue.
162 *
163 * -ENODATA, if this cpu has to do the parallel processing for
164 * the next object.
165 */
Steffen Klassert16295be2010-01-06 19:47:10 +1100166static struct padata_priv *padata_get_next(struct parallel_data *pd)
167{
Steffen Klassert5f1a8c12010-07-07 15:32:39 +0200168 int cpu, num_cpus;
Steffen Klassert2dc9b5d2012-03-09 07:20:49 +0100169 unsigned int next_nr, next_index;
Shan Weif0fcf202012-12-06 17:16:23 +0800170 struct padata_parallel_queue *next_queue;
Steffen Klassert16295be2010-01-06 19:47:10 +1100171 struct padata_priv *padata;
172 struct padata_list *reorder;
173
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400174 num_cpus = cpumask_weight(pd->cpumask.pcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100175
Steffen Klassert5f1a8c12010-07-07 15:32:39 +0200176 /*
177 * Calculate the percpu reorder queue and the sequence
178 * number of the next object.
179 */
180 next_nr = pd->processed;
181 next_index = next_nr % num_cpus;
182 cpu = padata_index_to_cpu(pd, next_index);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400183 next_queue = per_cpu_ptr(pd->pqueue, cpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100184
Steffen Klassert16295be2010-01-06 19:47:10 +1100185 reorder = &next_queue->reorder;
186
Jason A. Donenfeldde5540d2017-03-23 12:24:43 +0100187 spin_lock(&reorder->lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100188 if (!list_empty(&reorder->list)) {
189 padata = list_entry(reorder->list.next,
190 struct padata_priv, list);
191
Steffen Klassert16295be2010-01-06 19:47:10 +1100192 list_del_init(&padata->list);
193 atomic_dec(&pd->reorder_objects);
Steffen Klassert16295be2010-01-06 19:47:10 +1100194
Steffen Klassert5f1a8c12010-07-07 15:32:39 +0200195 pd->processed++;
Steffen Klassert16295be2010-01-06 19:47:10 +1100196
Jason A. Donenfeldde5540d2017-03-23 12:24:43 +0100197 spin_unlock(&reorder->lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100198 goto out;
199 }
Jason A. Donenfeldde5540d2017-03-23 12:24:43 +0100200 spin_unlock(&reorder->lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100201
Shan Weif0fcf202012-12-06 17:16:23 +0800202 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
Steffen Klassert16295be2010-01-06 19:47:10 +1100203 padata = ERR_PTR(-ENODATA);
204 goto out;
205 }
206
207 padata = ERR_PTR(-EINPROGRESS);
208out:
209 return padata;
210}
211
212static void padata_reorder(struct parallel_data *pd)
213{
Steffen Klassert30478172012-03-09 07:20:12 +0100214 int cb_cpu;
Steffen Klassert16295be2010-01-06 19:47:10 +1100215 struct padata_priv *padata;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400216 struct padata_serial_queue *squeue;
Steffen Klassert16295be2010-01-06 19:47:10 +1100217 struct padata_instance *pinst = pd->pinst;
218
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000219 /*
220 * We need to ensure that only one cpu can work on dequeueing of
221 * the reorder queue the time. Calculating in which percpu reorder
222 * queue the next object will arrive takes some time. A spinlock
223 * would be highly contended. Also it is not clear in which order
224 * the objects arrive to the reorder queues. So a cpu could wait to
225 * get the lock just to notice that there is nothing to do at the
226 * moment. Therefore we use a trylock and let the holder of the lock
227 * care for all the objects enqueued during the holdtime of the lock.
228 */
Steffen Klassert16295be2010-01-06 19:47:10 +1100229 if (!spin_trylock_bh(&pd->lock))
Steffen Klassertd46a5ac2010-05-19 13:43:14 +1000230 return;
Steffen Klassert16295be2010-01-06 19:47:10 +1100231
232 while (1) {
233 padata = padata_get_next(pd);
234
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000235 /*
Jason A. Donenfeld69b34842017-04-12 10:40:19 +0200236 * If the next object that needs serialization is parallel
237 * processed by another cpu and is still on it's way to the
238 * cpu's reorder queue, nothing to do for now.
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000239 */
Jason A. Donenfeld69b34842017-04-12 10:40:19 +0200240 if (PTR_ERR(padata) == -EINPROGRESS)
Steffen Klassert16295be2010-01-06 19:47:10 +1100241 break;
242
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000243 /*
244 * This cpu has to do the parallel processing of the next
245 * object. It's waiting in the cpu's parallelization queue,
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300246 * so exit immediately.
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000247 */
Steffen Klassert16295be2010-01-06 19:47:10 +1100248 if (PTR_ERR(padata) == -ENODATA) {
Steffen Klassertd46a5ac2010-05-19 13:43:14 +1000249 del_timer(&pd->timer);
Steffen Klassert16295be2010-01-06 19:47:10 +1100250 spin_unlock_bh(&pd->lock);
Steffen Klassertd46a5ac2010-05-19 13:43:14 +1000251 return;
Steffen Klassert16295be2010-01-06 19:47:10 +1100252 }
253
Steffen Klassert30478172012-03-09 07:20:12 +0100254 cb_cpu = padata->cb_cpu;
255 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100256
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400257 spin_lock(&squeue->serial.lock);
258 list_add_tail(&padata->list, &squeue->serial.list);
259 spin_unlock(&squeue->serial.lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100260
Steffen Klassert30478172012-03-09 07:20:12 +0100261 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
Steffen Klassert16295be2010-01-06 19:47:10 +1100262 }
263
264 spin_unlock_bh(&pd->lock);
265
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000266 /*
267 * The next object that needs serialization might have arrived to
268 * the reorder queues in the meantime, we will be called again
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300269 * from the timer function if no one else cares for it.
Daniel Jordancf144f82019-07-16 12:32:53 -0400270 *
271 * Ensure reorder_objects is read after pd->lock is dropped so we see
272 * an increment from another task in padata_do_serial. Pairs with
273 * smp_mb__after_atomic in padata_do_serial.
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000274 */
Daniel Jordancf144f82019-07-16 12:32:53 -0400275 smp_mb();
Steffen Klassertd46a5ac2010-05-19 13:43:14 +1000276 if (atomic_read(&pd->reorder_objects)
277 && !(pinst->flags & PADATA_RESET))
278 mod_timer(&pd->timer, jiffies + HZ);
279 else
280 del_timer(&pd->timer);
Steffen Klassert16295be2010-01-06 19:47:10 +1100281
Steffen Klassert16295be2010-01-06 19:47:10 +1100282 return;
283}
284
Mathias Krausecf5868c2017-09-08 20:57:10 +0200285static void invoke_padata_reorder(struct work_struct *work)
286{
287 struct padata_parallel_queue *pqueue;
288 struct parallel_data *pd;
289
290 local_bh_disable();
291 pqueue = container_of(work, struct padata_parallel_queue, reorder_work);
292 pd = pqueue->pd;
293 padata_reorder(pd);
294 local_bh_enable();
295}
296
Kees Cooke99e88a2017-10-16 14:43:17 -0700297static void padata_reorder_timer(struct timer_list *t)
Steffen Klassertd46a5ac2010-05-19 13:43:14 +1000298{
Kees Cooke99e88a2017-10-16 14:43:17 -0700299 struct parallel_data *pd = from_timer(pd, t, timer);
Mathias Krausecf5868c2017-09-08 20:57:10 +0200300 unsigned int weight;
301 int target_cpu, cpu;
Steffen Klassertd46a5ac2010-05-19 13:43:14 +1000302
Mathias Krausecf5868c2017-09-08 20:57:10 +0200303 cpu = get_cpu();
304
305 /* We don't lock pd here to not interfere with parallel processing
306 * padata_reorder() calls on other CPUs. We just need any CPU out of
307 * the cpumask.pcpu set. It would be nice if it's the right one but
308 * it doesn't matter if we're off to the next one by using an outdated
309 * pd->processed value.
310 */
311 weight = cpumask_weight(pd->cpumask.pcpu);
312 target_cpu = padata_index_to_cpu(pd, pd->processed % weight);
313
314 /* ensure to call the reorder callback on the correct CPU */
315 if (cpu != target_cpu) {
316 struct padata_parallel_queue *pqueue;
317 struct padata_instance *pinst;
318
319 /* The timer function is serialized wrt itself -- no locking
320 * needed.
321 */
322 pinst = pd->pinst;
323 pqueue = per_cpu_ptr(pd->pqueue, target_cpu);
324 queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work);
325 } else {
326 padata_reorder(pd);
327 }
328
329 put_cpu();
Steffen Klassertd46a5ac2010-05-19 13:43:14 +1000330}
331
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400332static void padata_serial_worker(struct work_struct *serial_work)
Steffen Klassert16295be2010-01-06 19:47:10 +1100333{
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400334 struct padata_serial_queue *squeue;
Steffen Klassert16295be2010-01-06 19:47:10 +1100335 struct parallel_data *pd;
336 LIST_HEAD(local_list);
337
338 local_bh_disable();
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400339 squeue = container_of(serial_work, struct padata_serial_queue, work);
340 pd = squeue->pd;
Steffen Klassert16295be2010-01-06 19:47:10 +1100341
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400342 spin_lock(&squeue->serial.lock);
343 list_replace_init(&squeue->serial.list, &local_list);
344 spin_unlock(&squeue->serial.lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100345
346 while (!list_empty(&local_list)) {
347 struct padata_priv *padata;
348
349 padata = list_entry(local_list.next,
350 struct padata_priv, list);
351
352 list_del_init(&padata->list);
353
354 padata->serial(padata);
355 atomic_dec(&pd->refcnt);
356 }
357 local_bh_enable();
358}
359
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000360/**
Steffen Klassert16295be2010-01-06 19:47:10 +1100361 * padata_do_serial - padata serialization function
362 *
363 * @padata: object to be serialized.
364 *
365 * padata_do_serial must be called for every parallelized object.
366 * The serialization callback function will run with BHs off.
367 */
368void padata_do_serial(struct padata_priv *padata)
369{
370 int cpu;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400371 struct padata_parallel_queue *pqueue;
Steffen Klassert16295be2010-01-06 19:47:10 +1100372 struct parallel_data *pd;
Mathias Krause350ef882017-09-08 20:57:11 +0200373 int reorder_via_wq = 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100374
375 pd = padata->pd;
376
377 cpu = get_cpu();
Mathias Krause350ef882017-09-08 20:57:11 +0200378
379 /* We need to run on the same CPU padata_do_parallel(.., padata, ..)
380 * was called on -- or, at least, enqueue the padata object into the
381 * correct per-cpu queue.
382 */
383 if (cpu != padata->cpu) {
384 reorder_via_wq = 1;
385 cpu = padata->cpu;
386 }
387
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400388 pqueue = per_cpu_ptr(pd->pqueue, cpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100389
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400390 spin_lock(&pqueue->reorder.lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100391 atomic_inc(&pd->reorder_objects);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400392 list_add_tail(&padata->list, &pqueue->reorder.list);
393 spin_unlock(&pqueue->reorder.lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100394
Daniel Jordancf144f82019-07-16 12:32:53 -0400395 /*
396 * Ensure the atomic_inc of reorder_objects above is ordered correctly
397 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
398 * in padata_reorder.
399 */
400 smp_mb__after_atomic();
401
Steffen Klassert16295be2010-01-06 19:47:10 +1100402 put_cpu();
403
Mathias Krause350ef882017-09-08 20:57:11 +0200404 /* If we're running on the wrong CPU, call padata_reorder() via a
405 * kernel worker.
406 */
407 if (reorder_via_wq)
408 queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
409 else
410 padata_reorder(pd);
Steffen Klassert16295be2010-01-06 19:47:10 +1100411}
412EXPORT_SYMBOL(padata_do_serial);
413
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400414static int padata_setup_cpumasks(struct parallel_data *pd,
415 const struct cpumask *pcpumask,
416 const struct cpumask *cbcpumask)
Steffen Klassert16295be2010-01-06 19:47:10 +1100417{
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400418 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
419 return -ENOMEM;
420
Steffen Klassert13614e02012-03-28 08:43:21 +0200421 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400422 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
Jason A. Donenfeld07a77922017-04-07 02:33:30 +0200423 free_cpumask_var(pd->cpumask.pcpu);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400424 return -ENOMEM;
425 }
426
Steffen Klassert13614e02012-03-28 08:43:21 +0200427 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400428 return 0;
429}
430
431static void __padata_list_init(struct padata_list *pd_list)
432{
433 INIT_LIST_HEAD(&pd_list->list);
434 spin_lock_init(&pd_list->lock);
435}
436
437/* Initialize all percpu queues used by serial workers */
438static void padata_init_squeues(struct parallel_data *pd)
439{
440 int cpu;
441 struct padata_serial_queue *squeue;
442
443 for_each_cpu(cpu, pd->cpumask.cbcpu) {
444 squeue = per_cpu_ptr(pd->squeue, cpu);
445 squeue->pd = pd;
446 __padata_list_init(&squeue->serial);
447 INIT_WORK(&squeue->work, padata_serial_worker);
448 }
449}
450
451/* Initialize all percpu queues used by parallel workers */
452static void padata_init_pqueues(struct parallel_data *pd)
453{
Steffen Klassert2dc9b5d2012-03-09 07:20:49 +0100454 int cpu_index, cpu;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400455 struct padata_parallel_queue *pqueue;
Steffen Klassert16295be2010-01-06 19:47:10 +1100456
457 cpu_index = 0;
Mathias Krause1bd845b2017-09-08 20:57:09 +0200458 for_each_possible_cpu(cpu) {
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400459 pqueue = per_cpu_ptr(pd->pqueue, cpu);
Mathias Krause1bd845b2017-09-08 20:57:09 +0200460
461 if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
462 pqueue->cpu_index = -1;
463 continue;
464 }
465
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400466 pqueue->pd = pd;
467 pqueue->cpu_index = cpu_index;
Steffen Klassertfad3a902010-07-20 08:48:34 +0200468 cpu_index++;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400469
470 __padata_list_init(&pqueue->reorder);
471 __padata_list_init(&pqueue->parallel);
472 INIT_WORK(&pqueue->work, padata_parallel_worker);
Mathias Krausecf5868c2017-09-08 20:57:10 +0200473 INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400474 atomic_set(&pqueue->num_obj, 0);
475 }
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400476}
477
478/* Allocate and initialize the internal cpumask dependend resources. */
479static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
480 const struct cpumask *pcpumask,
481 const struct cpumask *cbcpumask)
482{
483 struct parallel_data *pd;
Steffen Klassert16295be2010-01-06 19:47:10 +1100484
485 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
486 if (!pd)
487 goto err;
488
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400489 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
490 if (!pd->pqueue)
Steffen Klassert16295be2010-01-06 19:47:10 +1100491 goto err_free_pd;
492
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400493 pd->squeue = alloc_percpu(struct padata_serial_queue);
494 if (!pd->squeue)
495 goto err_free_pqueue;
496 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
497 goto err_free_squeue;
Steffen Klassert16295be2010-01-06 19:47:10 +1100498
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400499 padata_init_pqueues(pd);
500 padata_init_squeues(pd);
Kees Cooke99e88a2017-10-16 14:43:17 -0700501 timer_setup(&pd->timer, padata_reorder_timer, 0);
Mathias Krause0b6b0982013-10-25 12:14:15 +0200502 atomic_set(&pd->seq_nr, -1);
Steffen Klassert16295be2010-01-06 19:47:10 +1100503 atomic_set(&pd->reorder_objects, 0);
504 atomic_set(&pd->refcnt, 0);
505 pd->pinst = pinst;
506 spin_lock_init(&pd->lock);
507
508 return pd;
509
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400510err_free_squeue:
511 free_percpu(pd->squeue);
512err_free_pqueue:
513 free_percpu(pd->pqueue);
Steffen Klassert16295be2010-01-06 19:47:10 +1100514err_free_pd:
515 kfree(pd);
516err:
517 return NULL;
518}
519
520static void padata_free_pd(struct parallel_data *pd)
521{
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400522 free_cpumask_var(pd->cpumask.pcpu);
523 free_cpumask_var(pd->cpumask.cbcpu);
524 free_percpu(pd->pqueue);
525 free_percpu(pd->squeue);
Steffen Klassert16295be2010-01-06 19:47:10 +1100526 kfree(pd);
527}
528
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000529/* Flush all objects out of the padata queues. */
Steffen Klassert2b73b072010-05-19 13:43:46 +1000530static void padata_flush_queues(struct parallel_data *pd)
531{
532 int cpu;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400533 struct padata_parallel_queue *pqueue;
534 struct padata_serial_queue *squeue;
Steffen Klassert2b73b072010-05-19 13:43:46 +1000535
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400536 for_each_cpu(cpu, pd->cpumask.pcpu) {
537 pqueue = per_cpu_ptr(pd->pqueue, cpu);
538 flush_work(&pqueue->work);
Steffen Klassert2b73b072010-05-19 13:43:46 +1000539 }
540
541 del_timer_sync(&pd->timer);
542
543 if (atomic_read(&pd->reorder_objects))
544 padata_reorder(pd);
545
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400546 for_each_cpu(cpu, pd->cpumask.cbcpu) {
547 squeue = per_cpu_ptr(pd->squeue, cpu);
548 flush_work(&squeue->work);
Steffen Klassert2b73b072010-05-19 13:43:46 +1000549 }
550
551 BUG_ON(atomic_read(&pd->refcnt) != 0);
552}
553
Steffen Klassert4c879172010-07-07 15:30:10 +0200554static void __padata_start(struct padata_instance *pinst)
555{
556 pinst->flags |= PADATA_INIT;
557}
558
Steffen Klassertee836552010-07-07 15:30:47 +0200559static void __padata_stop(struct padata_instance *pinst)
560{
561 if (!(pinst->flags & PADATA_INIT))
562 return;
563
564 pinst->flags &= ~PADATA_INIT;
565
566 synchronize_rcu();
567
568 get_online_cpus();
569 padata_flush_queues(pinst->pd);
570 put_online_cpus();
571}
572
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300573/* Replace the internal control structure with a new one. */
Steffen Klassert16295be2010-01-06 19:47:10 +1100574static void padata_replace(struct padata_instance *pinst,
575 struct parallel_data *pd_new)
576{
577 struct parallel_data *pd_old = pinst->pd;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400578 int notification_mask = 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100579
580 pinst->flags |= PADATA_RESET;
581
582 rcu_assign_pointer(pinst->pd, pd_new);
583
584 synchronize_rcu();
585
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400586 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
587 notification_mask |= PADATA_CPU_PARALLEL;
588 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
589 notification_mask |= PADATA_CPU_SERIAL;
Steffen Klassert16295be2010-01-06 19:47:10 +1100590
Steffen Klassert16295be2010-01-06 19:47:10 +1100591 padata_flush_queues(pd_old);
592 padata_free_pd(pd_old);
593
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400594 if (notification_mask)
595 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
Steffen Klassertc6356962010-07-27 07:15:50 +0200596 notification_mask,
597 &pd_new->cpumask);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400598
Steffen Klassert16295be2010-01-06 19:47:10 +1100599 pinst->flags &= ~PADATA_RESET;
600}
601
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000602/**
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400603 * padata_register_cpumask_notifier - Registers a notifier that will be called
604 * if either pcpu or cbcpu or both cpumasks change.
Steffen Klassert16295be2010-01-06 19:47:10 +1100605 *
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400606 * @pinst: A poineter to padata instance
607 * @nblock: A pointer to notifier block.
Steffen Klassert16295be2010-01-06 19:47:10 +1100608 */
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400609int padata_register_cpumask_notifier(struct padata_instance *pinst,
610 struct notifier_block *nblock)
Steffen Klassert16295be2010-01-06 19:47:10 +1100611{
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400612 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
613 nblock);
614}
615EXPORT_SYMBOL(padata_register_cpumask_notifier);
Steffen Klassert16295be2010-01-06 19:47:10 +1100616
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400617/**
618 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
619 * registered earlier using padata_register_cpumask_notifier
620 *
621 * @pinst: A pointer to data instance.
622 * @nlock: A pointer to notifier block.
623 */
624int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
625 struct notifier_block *nblock)
626{
627 return blocking_notifier_chain_unregister(
628 &pinst->cpumask_change_notifier,
629 nblock);
630}
631EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
Steffen Klassert16295be2010-01-06 19:47:10 +1100632
Steffen Klassert6751fb32010-04-29 14:42:30 +0200633
Steffen Klassert33e54452010-07-07 15:31:26 +0200634/* If cpumask contains no active cpu, we mark the instance as invalid. */
635static bool padata_validate_cpumask(struct padata_instance *pinst,
636 const struct cpumask *cpumask)
637{
Steffen Klassert13614e02012-03-28 08:43:21 +0200638 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
Steffen Klassert33e54452010-07-07 15:31:26 +0200639 pinst->flags |= PADATA_INVALID;
640 return false;
Steffen Klassert16295be2010-01-06 19:47:10 +1100641 }
642
Steffen Klassert33e54452010-07-07 15:31:26 +0200643 pinst->flags &= ~PADATA_INVALID;
644 return true;
645}
646
Steffen Klassert65ff5772010-07-27 07:15:06 +0200647static int __padata_set_cpumasks(struct padata_instance *pinst,
648 cpumask_var_t pcpumask,
649 cpumask_var_t cbcpumask)
Steffen Klassert16295be2010-01-06 19:47:10 +1100650{
Steffen Klassert33e54452010-07-07 15:31:26 +0200651 int valid;
Steffen Klassert65ff5772010-07-27 07:15:06 +0200652 struct parallel_data *pd;
Steffen Klassert16295be2010-01-06 19:47:10 +1100653
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400654 valid = padata_validate_cpumask(pinst, pcpumask);
655 if (!valid) {
656 __padata_stop(pinst);
657 goto out_replace;
658 }
659
660 valid = padata_validate_cpumask(pinst, cbcpumask);
Steffen Klassertb89661d2010-07-20 08:49:20 +0200661 if (!valid)
Steffen Klassert33e54452010-07-07 15:31:26 +0200662 __padata_stop(pinst);
Steffen Klassert33e54452010-07-07 15:31:26 +0200663
Steffen Klassertb89661d2010-07-20 08:49:20 +0200664out_replace:
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400665 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
Steffen Klassert65ff5772010-07-27 07:15:06 +0200666 if (!pd)
667 return -ENOMEM;
Steffen Klassert16295be2010-01-06 19:47:10 +1100668
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400669 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
670 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
Steffen Klassert16295be2010-01-06 19:47:10 +1100671
672 padata_replace(pinst, pd);
673
Steffen Klassert33e54452010-07-07 15:31:26 +0200674 if (valid)
675 __padata_start(pinst);
676
Steffen Klassert65ff5772010-07-27 07:15:06 +0200677 return 0;
678}
679
680/**
Steffen Klassert65ff5772010-07-27 07:15:06 +0200681 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
682 * equivalent to @cpumask.
683 *
684 * @pinst: padata instance
685 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
686 * to parallel and serial cpumasks respectively.
687 * @cpumask: the cpumask to use
688 */
689int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
690 cpumask_var_t cpumask)
691{
692 struct cpumask *serial_mask, *parallel_mask;
693 int err = -EINVAL;
694
695 mutex_lock(&pinst->lock);
696 get_online_cpus();
697
698 switch (cpumask_type) {
699 case PADATA_CPU_PARALLEL:
700 serial_mask = pinst->cpumask.cbcpu;
701 parallel_mask = cpumask;
702 break;
703 case PADATA_CPU_SERIAL:
704 parallel_mask = pinst->cpumask.pcpu;
705 serial_mask = cpumask;
706 break;
707 default:
708 goto out;
709 }
710
711 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
712
Steffen Klassert16295be2010-01-06 19:47:10 +1100713out:
Steffen Klassert6751fb32010-04-29 14:42:30 +0200714 put_online_cpus();
Steffen Klassert16295be2010-01-06 19:47:10 +1100715 mutex_unlock(&pinst->lock);
716
717 return err;
718}
719EXPORT_SYMBOL(padata_set_cpumask);
720
Arnd Bergmann19d795b2016-05-19 17:09:59 -0700721/**
722 * padata_start - start the parallel processing
723 *
724 * @pinst: padata instance to start
725 */
726int padata_start(struct padata_instance *pinst)
727{
728 int err = 0;
729
730 mutex_lock(&pinst->lock);
731
732 if (pinst->flags & PADATA_INVALID)
733 err = -EINVAL;
734
Colin Ian King8ddab422018-11-09 13:16:39 +0000735 __padata_start(pinst);
Arnd Bergmann19d795b2016-05-19 17:09:59 -0700736
737 mutex_unlock(&pinst->lock);
738
739 return err;
740}
741EXPORT_SYMBOL(padata_start);
742
743/**
744 * padata_stop - stop the parallel processing
745 *
746 * @pinst: padata instance to stop
747 */
748void padata_stop(struct padata_instance *pinst)
749{
750 mutex_lock(&pinst->lock);
751 __padata_stop(pinst);
752 mutex_unlock(&pinst->lock);
753}
754EXPORT_SYMBOL(padata_stop);
755
756#ifdef CONFIG_HOTPLUG_CPU
757
Steffen Klassert16295be2010-01-06 19:47:10 +1100758static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
759{
760 struct parallel_data *pd;
761
Steffen Klassert13614e02012-03-28 08:43:21 +0200762 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400763 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
764 pinst->cpumask.cbcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100765 if (!pd)
766 return -ENOMEM;
767
768 padata_replace(pinst, pd);
Steffen Klassert33e54452010-07-07 15:31:26 +0200769
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400770 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
771 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
Steffen Klassert33e54452010-07-07 15:31:26 +0200772 __padata_start(pinst);
Steffen Klassert16295be2010-01-06 19:47:10 +1100773 }
774
775 return 0;
776}
777
Steffen Klassert16295be2010-01-06 19:47:10 +1100778static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
779{
Steffen Klassert33e54452010-07-07 15:31:26 +0200780 struct parallel_data *pd = NULL;
Steffen Klassert16295be2010-01-06 19:47:10 +1100781
782 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
Steffen Klassert33e54452010-07-07 15:31:26 +0200783
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400784 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
Steffen Klassertb89661d2010-07-20 08:49:20 +0200785 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
Steffen Klassert33e54452010-07-07 15:31:26 +0200786 __padata_stop(pinst);
Steffen Klassert33e54452010-07-07 15:31:26 +0200787
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400788 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
789 pinst->cpumask.cbcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100790 if (!pd)
791 return -ENOMEM;
792
793 padata_replace(pinst, pd);
Steffen Klassert96120902012-03-28 08:44:07 +0200794
795 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
796 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100797 }
798
799 return 0;
800}
801
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400802 /**
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300803 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400804 * padata cpumasks.
Steffen Klassert16295be2010-01-06 19:47:10 +1100805 *
806 * @pinst: padata instance
807 * @cpu: cpu to remove
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400808 * @mask: bitmask specifying from which cpumask @cpu should be removed
809 * The @mask may be any combination of the following flags:
810 * PADATA_CPU_SERIAL - serial cpumask
811 * PADATA_CPU_PARALLEL - parallel cpumask
Steffen Klassert16295be2010-01-06 19:47:10 +1100812 */
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400813int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
Steffen Klassert16295be2010-01-06 19:47:10 +1100814{
815 int err;
816
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400817 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
818 return -EINVAL;
819
Steffen Klassert16295be2010-01-06 19:47:10 +1100820 mutex_lock(&pinst->lock);
821
Steffen Klassert6751fb32010-04-29 14:42:30 +0200822 get_online_cpus();
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400823 if (mask & PADATA_CPU_SERIAL)
824 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
825 if (mask & PADATA_CPU_PARALLEL)
826 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
827
Steffen Klassert16295be2010-01-06 19:47:10 +1100828 err = __padata_remove_cpu(pinst, cpu);
Steffen Klassert6751fb32010-04-29 14:42:30 +0200829 put_online_cpus();
Steffen Klassert16295be2010-01-06 19:47:10 +1100830
831 mutex_unlock(&pinst->lock);
832
833 return err;
834}
835EXPORT_SYMBOL(padata_remove_cpu);
836
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400837static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
838{
839 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
840 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
841}
842
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200843static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
Steffen Klassert16295be2010-01-06 19:47:10 +1100844{
Steffen Klassert16295be2010-01-06 19:47:10 +1100845 struct padata_instance *pinst;
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200846 int ret;
Steffen Klassert16295be2010-01-06 19:47:10 +1100847
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200848 pinst = hlist_entry_safe(node, struct padata_instance, node);
849 if (!pinst_has_cpu(pinst, cpu))
850 return 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100851
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200852 mutex_lock(&pinst->lock);
853 ret = __padata_add_cpu(pinst, cpu);
854 mutex_unlock(&pinst->lock);
855 return ret;
Steffen Klassert16295be2010-01-06 19:47:10 +1100856}
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200857
858static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
859{
860 struct padata_instance *pinst;
861 int ret;
862
863 pinst = hlist_entry_safe(node, struct padata_instance, node);
864 if (!pinst_has_cpu(pinst, cpu))
865 return 0;
866
867 mutex_lock(&pinst->lock);
868 ret = __padata_remove_cpu(pinst, cpu);
869 mutex_unlock(&pinst->lock);
870 return ret;
871}
872
873static enum cpuhp_state hp_online;
Steffen Klasserte2cb2f12010-04-29 14:40:10 +0200874#endif
Steffen Klassert16295be2010-01-06 19:47:10 +1100875
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400876static void __padata_free(struct padata_instance *pinst)
877{
878#ifdef CONFIG_HOTPLUG_CPU
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200879 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400880#endif
881
882 padata_stop(pinst);
883 padata_free_pd(pinst->pd);
884 free_cpumask_var(pinst->cpumask.pcpu);
885 free_cpumask_var(pinst->cpumask.cbcpu);
886 kfree(pinst);
887}
888
889#define kobj2pinst(_kobj) \
890 container_of(_kobj, struct padata_instance, kobj)
891#define attr2pentry(_attr) \
892 container_of(_attr, struct padata_sysfs_entry, attr)
893
894static void padata_sysfs_release(struct kobject *kobj)
895{
896 struct padata_instance *pinst = kobj2pinst(kobj);
897 __padata_free(pinst);
898}
899
900struct padata_sysfs_entry {
901 struct attribute attr;
902 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
903 ssize_t (*store)(struct padata_instance *, struct attribute *,
904 const char *, size_t);
905};
906
907static ssize_t show_cpumask(struct padata_instance *pinst,
908 struct attribute *attr, char *buf)
909{
910 struct cpumask *cpumask;
911 ssize_t len;
912
913 mutex_lock(&pinst->lock);
914 if (!strcmp(attr->name, "serial_cpumask"))
915 cpumask = pinst->cpumask.cbcpu;
916 else
917 cpumask = pinst->cpumask.pcpu;
918
Tejun Heo4497da62015-02-13 14:38:05 -0800919 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
920 nr_cpu_ids, cpumask_bits(cpumask));
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400921 mutex_unlock(&pinst->lock);
Tejun Heo4497da62015-02-13 14:38:05 -0800922 return len < PAGE_SIZE ? len : -EINVAL;
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400923}
924
925static ssize_t store_cpumask(struct padata_instance *pinst,
926 struct attribute *attr,
927 const char *buf, size_t count)
928{
929 cpumask_var_t new_cpumask;
930 ssize_t ret;
931 int mask_type;
932
933 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
934 return -ENOMEM;
935
936 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
937 nr_cpumask_bits);
938 if (ret < 0)
939 goto out;
940
941 mask_type = !strcmp(attr->name, "serial_cpumask") ?
942 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
943 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
944 if (!ret)
945 ret = count;
946
947out:
948 free_cpumask_var(new_cpumask);
949 return ret;
950}
951
952#define PADATA_ATTR_RW(_name, _show_name, _store_name) \
953 static struct padata_sysfs_entry _name##_attr = \
954 __ATTR(_name, 0644, _show_name, _store_name)
955#define PADATA_ATTR_RO(_name, _show_name) \
956 static struct padata_sysfs_entry _name##_attr = \
957 __ATTR(_name, 0400, _show_name, NULL)
958
959PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
960PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
961
962/*
963 * Padata sysfs provides the following objects:
964 * serial_cpumask [RW] - cpumask for serial workers
965 * parallel_cpumask [RW] - cpumask for parallel workers
Steffen Klassert16295be2010-01-06 19:47:10 +1100966 */
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400967static struct attribute *padata_default_attrs[] = {
968 &serial_cpumask_attr.attr,
969 &parallel_cpumask_attr.attr,
970 NULL,
971};
Kimberly Brown2064fbc2019-04-01 22:51:47 -0400972ATTRIBUTE_GROUPS(padata_default);
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400973
974static ssize_t padata_sysfs_show(struct kobject *kobj,
975 struct attribute *attr, char *buf)
Steffen Klassert16295be2010-01-06 19:47:10 +1100976{
Steffen Klassert16295be2010-01-06 19:47:10 +1100977 struct padata_instance *pinst;
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400978 struct padata_sysfs_entry *pentry;
979 ssize_t ret = -EIO;
980
981 pinst = kobj2pinst(kobj);
982 pentry = attr2pentry(attr);
983 if (pentry->show)
984 ret = pentry->show(pinst, attr, buf);
985
986 return ret;
987}
988
989static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
990 const char *buf, size_t count)
991{
992 struct padata_instance *pinst;
993 struct padata_sysfs_entry *pentry;
994 ssize_t ret = -EIO;
995
996 pinst = kobj2pinst(kobj);
997 pentry = attr2pentry(attr);
998 if (pentry->show)
999 ret = pentry->store(pinst, attr, buf, count);
1000
1001 return ret;
1002}
1003
1004static const struct sysfs_ops padata_sysfs_ops = {
1005 .show = padata_sysfs_show,
1006 .store = padata_sysfs_store,
1007};
1008
1009static struct kobj_type padata_attr_type = {
1010 .sysfs_ops = &padata_sysfs_ops,
Kimberly Brown2064fbc2019-04-01 22:51:47 -04001011 .default_groups = padata_default_groups,
Dan Kruchinin5e017dc2010-07-14 14:33:08 +04001012 .release = padata_sysfs_release,
1013};
1014
Steffen Klassert16295be2010-01-06 19:47:10 +11001015/**
Steffen Klasserte6cc1172010-07-27 07:14:28 +02001016 * padata_alloc - allocate and initialize a padata instance and specify
1017 * cpumasks for serial and parallel workers.
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001018 *
1019 * @wq: workqueue to use for the allocated padata instance
1020 * @pcpumask: cpumask that will be used for padata parallelization
1021 * @cbcpumask: cpumask that will be used for padata serialization
Sebastian Andrzej Siewiorc5a81c82017-05-24 10:15:18 +02001022 *
1023 * Must be called from a cpus_read_lock() protected region
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001024 */
Thomas Gleixner95966952017-05-24 10:15:17 +02001025static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1026 const struct cpumask *pcpumask,
1027 const struct cpumask *cbcpumask)
Steffen Klassert16295be2010-01-06 19:47:10 +11001028{
1029 struct padata_instance *pinst;
Steffen Klassert33e54452010-07-07 15:31:26 +02001030 struct parallel_data *pd = NULL;
Steffen Klassert16295be2010-01-06 19:47:10 +11001031
1032 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1033 if (!pinst)
1034 goto err;
1035
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001036 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
Steffen Klassert16295be2010-01-06 19:47:10 +11001037 goto err_free_inst;
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001038 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1039 free_cpumask_var(pinst->cpumask.pcpu);
1040 goto err_free_inst;
Steffen Klassert33e54452010-07-07 15:31:26 +02001041 }
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001042 if (!padata_validate_cpumask(pinst, pcpumask) ||
1043 !padata_validate_cpumask(pinst, cbcpumask))
1044 goto err_free_masks;
Steffen Klassert16295be2010-01-06 19:47:10 +11001045
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001046 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
1047 if (!pd)
1048 goto err_free_masks;
Steffen Klassert74781382010-03-04 13:30:22 +08001049
Steffen Klassert16295be2010-01-06 19:47:10 +11001050 rcu_assign_pointer(pinst->pd, pd);
1051
1052 pinst->wq = wq;
1053
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001054 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1055 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
Steffen Klassert16295be2010-01-06 19:47:10 +11001056
1057 pinst->flags = 0;
1058
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001059 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
Dan Kruchinin5e017dc2010-07-14 14:33:08 +04001060 kobject_init(&pinst->kobj, &padata_attr_type);
Steffen Klassert16295be2010-01-06 19:47:10 +11001061 mutex_init(&pinst->lock);
1062
Richard Weinbergerb8b4a412013-08-23 13:12:33 +02001063#ifdef CONFIG_HOTPLUG_CPU
Sebastian Andrzej Siewiorc5a81c82017-05-24 10:15:18 +02001064 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
Richard Weinbergerb8b4a412013-08-23 13:12:33 +02001065#endif
Steffen Klassert16295be2010-01-06 19:47:10 +11001066 return pinst;
1067
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001068err_free_masks:
1069 free_cpumask_var(pinst->cpumask.pcpu);
1070 free_cpumask_var(pinst->cpumask.cbcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +11001071err_free_inst:
1072 kfree(pinst);
1073err:
1074 return NULL;
1075}
Steffen Klassert16295be2010-01-06 19:47:10 +11001076
Steffen Klassert0198ffd2010-05-19 13:44:27 +10001077/**
Thomas Gleixner95966952017-05-24 10:15:17 +02001078 * padata_alloc_possible - Allocate and initialize padata instance.
1079 * Use the cpu_possible_mask for serial and
1080 * parallel workers.
1081 *
1082 * @wq: workqueue to use for the allocated padata instance
Sebastian Andrzej Siewiorc5a81c82017-05-24 10:15:18 +02001083 *
1084 * Must be called from a cpus_read_lock() protected region
Thomas Gleixner95966952017-05-24 10:15:17 +02001085 */
1086struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1087{
Sebastian Andrzej Siewiorc5a81c82017-05-24 10:15:18 +02001088 lockdep_assert_cpus_held();
Thomas Gleixner95966952017-05-24 10:15:17 +02001089 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1090}
1091EXPORT_SYMBOL(padata_alloc_possible);
1092
1093/**
Steffen Klassert16295be2010-01-06 19:47:10 +11001094 * padata_free - free a padata instance
1095 *
Steffen Klassert0198ffd2010-05-19 13:44:27 +10001096 * @padata_inst: padata instance to free
Steffen Klassert16295be2010-01-06 19:47:10 +11001097 */
1098void padata_free(struct padata_instance *pinst)
1099{
Dan Kruchinin5e017dc2010-07-14 14:33:08 +04001100 kobject_put(&pinst->kobj);
Steffen Klassert16295be2010-01-06 19:47:10 +11001101}
1102EXPORT_SYMBOL(padata_free);
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +02001103
1104#ifdef CONFIG_HOTPLUG_CPU
1105
1106static __init int padata_driver_init(void)
1107{
1108 int ret;
1109
1110 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1111 padata_cpu_online,
1112 padata_cpu_prep_down);
1113 if (ret < 0)
1114 return ret;
1115 hp_online = ret;
1116 return 0;
1117}
1118module_init(padata_driver_init);
1119
1120static __exit void padata_driver_exit(void)
1121{
1122 cpuhp_remove_multi_state(hp_online);
1123}
1124module_exit(padata_driver_exit);
1125#endif