blob: 18d3a5c699d8407a1dce94a68f9556305831edcd [file] [log] [blame]
Cheah Kok Cheong08b21fb2017-12-21 19:35:30 +08001// SPDX-License-Identifier: GPL-2.0
Steffen Klassert16295be2010-01-06 19:47:10 +11002/*
3 * padata.c - generic interface to process data streams in parallel
4 *
Daniel Jordanbfcdcef82019-12-03 14:31:14 -05005 * See Documentation/core-api/padata.rst for more information.
Steffen Klassert107f8bd2012-03-28 08:42:34 +02006 *
Steffen Klassert16295be2010-01-06 19:47:10 +11007 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 *
Daniel Jordan004ed422020-06-03 15:59:43 -070010 * Copyright (c) 2020 Oracle and/or its affiliates.
11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
Steffen Klassert16295be2010-01-06 19:47:10 +110012 */
13
Daniel Jordan004ed422020-06-03 15:59:43 -070014#include <linux/completion.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Steffen Klassert16295be2010-01-06 19:47:10 +110016#include <linux/cpumask.h>
17#include <linux/err.h>
18#include <linux/cpu.h>
19#include <linux/padata.h>
20#include <linux/mutex.h>
21#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Dan Kruchinin5e017dc2010-07-14 14:33:08 +040023#include <linux/sysfs.h>
Steffen Klassert16295be2010-01-06 19:47:10 +110024#include <linux/rcupdate.h>
25
Daniel Jordan004ed422020-06-03 15:59:43 -070026#define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
27
Daniel Jordan4611ce22020-06-03 15:59:39 -070028struct padata_work {
29 struct work_struct pw_work;
30 struct list_head pw_list; /* padata_free_works linkage */
31 void *pw_data;
32};
33
34static DEFINE_SPINLOCK(padata_works_lock);
35static struct padata_work *padata_works;
36static LIST_HEAD(padata_free_works);
Steffen Klassert16295be2010-01-06 19:47:10 +110037
Daniel Jordan004ed422020-06-03 15:59:43 -070038struct padata_mt_job_state {
39 spinlock_t lock;
40 struct completion completion;
41 struct padata_mt_job *job;
42 int nworks;
43 int nworks_fini;
44 unsigned long chunk_size;
45};
46
Herbert Xu07928d92019-11-19 13:17:31 +080047static void padata_free_pd(struct parallel_data *pd);
Daniel Jordan004ed422020-06-03 15:59:43 -070048static void __init padata_mt_helper(struct work_struct *work);
Herbert Xu07928d92019-11-19 13:17:31 +080049
Steffen Klassert16295be2010-01-06 19:47:10 +110050static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
51{
52 int cpu, target_cpu;
53
Dan Kruchinine15bacb2010-07-14 14:31:57 +040054 target_cpu = cpumask_first(pd->cpumask.pcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +110055 for (cpu = 0; cpu < cpu_index; cpu++)
Dan Kruchinine15bacb2010-07-14 14:31:57 +040056 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +110057
58 return target_cpu;
59}
60
Daniel Jordanbfde23c2019-09-05 21:40:28 -040061static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
Steffen Klassert16295be2010-01-06 19:47:10 +110062{
Steffen Klassert16295be2010-01-06 19:47:10 +110063 /*
64 * Hash the sequence numbers to the cpus by taking
65 * seq_nr mod. number of cpus in use.
66 */
Daniel Jordanbfde23c2019-09-05 21:40:28 -040067 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
Steffen Klassert16295be2010-01-06 19:47:10 +110068
69 return padata_index_to_cpu(pd, cpu_index);
70}
71
Daniel Jordan4611ce22020-06-03 15:59:39 -070072static struct padata_work *padata_work_alloc(void)
73{
74 struct padata_work *pw;
75
76 lockdep_assert_held(&padata_works_lock);
77
78 if (list_empty(&padata_free_works))
79 return NULL; /* No more work items allowed to be queued. */
80
81 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
82 list_del(&pw->pw_list);
83 return pw;
84}
85
86static void padata_work_init(struct padata_work *pw, work_func_t work_fn,
Daniel Jordan004ed422020-06-03 15:59:43 -070087 void *data, int flags)
Daniel Jordan4611ce22020-06-03 15:59:39 -070088{
Daniel Jordan004ed422020-06-03 15:59:43 -070089 if (flags & PADATA_WORK_ONSTACK)
90 INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
91 else
92 INIT_WORK(&pw->pw_work, work_fn);
Daniel Jordan4611ce22020-06-03 15:59:39 -070093 pw->pw_data = data;
94}
95
Daniel Jordan004ed422020-06-03 15:59:43 -070096static int __init padata_work_alloc_mt(int nworks, void *data,
97 struct list_head *head)
98{
99 int i;
100
101 spin_lock(&padata_works_lock);
102 /* Start at 1 because the current task participates in the job. */
103 for (i = 1; i < nworks; ++i) {
104 struct padata_work *pw = padata_work_alloc();
105
106 if (!pw)
107 break;
108 padata_work_init(pw, padata_mt_helper, data, 0);
109 list_add(&pw->pw_list, head);
110 }
111 spin_unlock(&padata_works_lock);
112
113 return i;
114}
115
Daniel Jordan4611ce22020-06-03 15:59:39 -0700116static void padata_work_free(struct padata_work *pw)
117{
118 lockdep_assert_held(&padata_works_lock);
119 list_add(&pw->pw_list, &padata_free_works);
120}
121
Daniel Jordan004ed422020-06-03 15:59:43 -0700122static void __init padata_works_free(struct list_head *works)
123{
124 struct padata_work *cur, *next;
125
126 if (list_empty(works))
127 return;
128
129 spin_lock(&padata_works_lock);
130 list_for_each_entry_safe(cur, next, works, pw_list) {
131 list_del(&cur->pw_list);
132 padata_work_free(cur);
133 }
134 spin_unlock(&padata_works_lock);
135}
136
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400137static void padata_parallel_worker(struct work_struct *parallel_work)
Steffen Klassert16295be2010-01-06 19:47:10 +1100138{
Daniel Jordan4611ce22020-06-03 15:59:39 -0700139 struct padata_work *pw = container_of(parallel_work, struct padata_work,
140 pw_work);
141 struct padata_priv *padata = pw->pw_data;
Steffen Klassert16295be2010-01-06 19:47:10 +1100142
143 local_bh_disable();
Daniel Jordan4611ce22020-06-03 15:59:39 -0700144 padata->parallel(padata);
145 spin_lock(&padata_works_lock);
146 padata_work_free(pw);
147 spin_unlock(&padata_works_lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100148 local_bh_enable();
149}
150
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000151/**
Steffen Klassert16295be2010-01-06 19:47:10 +1100152 * padata_do_parallel - padata parallelization function
153 *
Herbert Xubbefa1d2019-11-26 15:58:45 +0800154 * @ps: padatashell
Steffen Klassert16295be2010-01-06 19:47:10 +1100155 * @padata: object to be parallelized
Daniel Jordane6ce0e02019-09-05 21:40:24 -0400156 * @cb_cpu: pointer to the CPU that the serialization callback function should
157 * run on. If it's not in the serial cpumask of @pinst
158 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
159 * none found, returns -EINVAL.
Steffen Klassert16295be2010-01-06 19:47:10 +1100160 *
161 * The parallelization callback function will run with BHs off.
162 * Note: Every object which is parallelized by padata_do_parallel
163 * must be seen by padata_do_serial.
Daniel Jordanbfcdcef82019-12-03 14:31:14 -0500164 *
165 * Return: 0 on success or else negative error code.
Steffen Klassert16295be2010-01-06 19:47:10 +1100166 */
Herbert Xubbefa1d2019-11-26 15:58:45 +0800167int padata_do_parallel(struct padata_shell *ps,
Daniel Jordane6ce0e02019-09-05 21:40:24 -0400168 struct padata_priv *padata, int *cb_cpu)
Steffen Klassert16295be2010-01-06 19:47:10 +1100169{
Herbert Xubbefa1d2019-11-26 15:58:45 +0800170 struct padata_instance *pinst = ps->pinst;
Daniel Jordan4611ce22020-06-03 15:59:39 -0700171 int i, cpu, cpu_index, err;
Steffen Klassert16295be2010-01-06 19:47:10 +1100172 struct parallel_data *pd;
Daniel Jordan4611ce22020-06-03 15:59:39 -0700173 struct padata_work *pw;
Steffen Klassert16295be2010-01-06 19:47:10 +1100174
175 rcu_read_lock_bh();
176
Herbert Xubbefa1d2019-11-26 15:58:45 +0800177 pd = rcu_dereference_bh(ps->pd);
Steffen Klassert16295be2010-01-06 19:47:10 +1100178
Steffen Klassert83f619f2010-07-07 15:32:02 +0200179 err = -EINVAL;
Steffen Klassert74247132010-07-20 08:51:25 +0200180 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
Steffen Klassert16295be2010-01-06 19:47:10 +1100181 goto out;
182
Daniel Jordane6ce0e02019-09-05 21:40:24 -0400183 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
184 if (!cpumask_weight(pd->cpumask.cbcpu))
185 goto out;
186
187 /* Select an alternate fallback CPU and notify the caller. */
188 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
189
190 cpu = cpumask_first(pd->cpumask.cbcpu);
191 for (i = 0; i < cpu_index; i++)
192 cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
193
194 *cb_cpu = cpu;
195 }
Steffen Klassert16295be2010-01-06 19:47:10 +1100196
197 err = -EBUSY;
198 if ((pinst->flags & PADATA_RESET))
199 goto out;
200
Xiyu Yangd5ee8e72021-07-20 11:05:11 -0400201 refcount_inc(&pd->refcnt);
Steffen Klassert16295be2010-01-06 19:47:10 +1100202 padata->pd = pd;
Daniel Jordane6ce0e02019-09-05 21:40:24 -0400203 padata->cb_cpu = *cb_cpu;
Steffen Klassert16295be2010-01-06 19:47:10 +1100204
Daniel Jordan4611ce22020-06-03 15:59:39 -0700205 spin_lock(&padata_works_lock);
206 padata->seq_nr = ++pd->seq_nr;
207 pw = padata_work_alloc();
208 spin_unlock(&padata_works_lock);
Daniel Jordan1b0df112020-09-02 13:07:56 -0400209
210 rcu_read_unlock_bh();
211
Daniel Jordan4611ce22020-06-03 15:59:39 -0700212 if (pw) {
Daniel Jordan004ed422020-06-03 15:59:43 -0700213 padata_work_init(pw, padata_parallel_worker, padata, 0);
Daniel Jordan4611ce22020-06-03 15:59:39 -0700214 queue_work(pinst->parallel_wq, &pw->pw_work);
215 } else {
216 /* Maximum works limit exceeded, run in the current task. */
217 padata->parallel(padata);
218 }
Steffen Klassert16295be2010-01-06 19:47:10 +1100219
Daniel Jordan4611ce22020-06-03 15:59:39 -0700220 return 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100221out:
222 rcu_read_unlock_bh();
223
224 return err;
225}
226EXPORT_SYMBOL(padata_do_parallel);
227
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000228/*
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400229 * padata_find_next - Find the next object that needs serialization.
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000230 *
Daniel Jordanbfcdcef82019-12-03 14:31:14 -0500231 * Return:
232 * * A pointer to the control struct of the next object that needs
233 * serialization, if present in one of the percpu reorder queues.
234 * * NULL, if the next object that needs serialization will
235 * be parallel processed by another cpu and is not yet present in
236 * the cpu's reorder queue.
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000237 */
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400238static struct padata_priv *padata_find_next(struct parallel_data *pd,
239 bool remove_object)
Steffen Klassert16295be2010-01-06 19:47:10 +1100240{
Steffen Klassert16295be2010-01-06 19:47:10 +1100241 struct padata_priv *padata;
242 struct padata_list *reorder;
Herbert Xu6fc4dbc2019-07-18 23:01:46 +0800243 int cpu = pd->cpu;
Steffen Klassert16295be2010-01-06 19:47:10 +1100244
Daniel Jordanf601c722020-07-14 16:13:56 -0400245 reorder = per_cpu_ptr(pd->reorder_list, cpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100246
Jason A. Donenfeldde5540d2017-03-23 12:24:43 +0100247 spin_lock(&reorder->lock);
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400248 if (list_empty(&reorder->list)) {
249 spin_unlock(&reorder->lock);
250 return NULL;
251 }
Steffen Klassert16295be2010-01-06 19:47:10 +1100252
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400253 padata = list_entry(reorder->list.next, struct padata_priv, list);
254
255 /*
256 * Checks the rare case where two or more parallel jobs have hashed to
257 * the same CPU and one of the later ones finishes first.
258 */
259 if (padata->seq_nr != pd->processed) {
260 spin_unlock(&reorder->lock);
261 return NULL;
262 }
263
264 if (remove_object) {
Steffen Klassert16295be2010-01-06 19:47:10 +1100265 list_del_init(&padata->list);
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400266 ++pd->processed;
267 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
Steffen Klassert16295be2010-01-06 19:47:10 +1100268 }
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400269
Jason A. Donenfeldde5540d2017-03-23 12:24:43 +0100270 spin_unlock(&reorder->lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100271 return padata;
272}
273
274static void padata_reorder(struct parallel_data *pd)
275{
Herbert Xubbefa1d2019-11-26 15:58:45 +0800276 struct padata_instance *pinst = pd->ps->pinst;
Steffen Klassert30478172012-03-09 07:20:12 +0100277 int cb_cpu;
Steffen Klassert16295be2010-01-06 19:47:10 +1100278 struct padata_priv *padata;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400279 struct padata_serial_queue *squeue;
Daniel Jordanf601c722020-07-14 16:13:56 -0400280 struct padata_list *reorder;
Steffen Klassert16295be2010-01-06 19:47:10 +1100281
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000282 /*
283 * We need to ensure that only one cpu can work on dequeueing of
284 * the reorder queue the time. Calculating in which percpu reorder
285 * queue the next object will arrive takes some time. A spinlock
286 * would be highly contended. Also it is not clear in which order
287 * the objects arrive to the reorder queues. So a cpu could wait to
288 * get the lock just to notice that there is nothing to do at the
289 * moment. Therefore we use a trylock and let the holder of the lock
290 * care for all the objects enqueued during the holdtime of the lock.
291 */
Steffen Klassert16295be2010-01-06 19:47:10 +1100292 if (!spin_trylock_bh(&pd->lock))
Steffen Klassertd46a5ac2010-05-19 13:43:14 +1000293 return;
Steffen Klassert16295be2010-01-06 19:47:10 +1100294
295 while (1) {
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400296 padata = padata_find_next(pd, true);
Steffen Klassert16295be2010-01-06 19:47:10 +1100297
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000298 /*
Jason A. Donenfeld69b34842017-04-12 10:40:19 +0200299 * If the next object that needs serialization is parallel
300 * processed by another cpu and is still on it's way to the
301 * cpu's reorder queue, nothing to do for now.
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000302 */
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400303 if (!padata)
Steffen Klassert16295be2010-01-06 19:47:10 +1100304 break;
305
Steffen Klassert30478172012-03-09 07:20:12 +0100306 cb_cpu = padata->cb_cpu;
307 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
Steffen Klassert16295be2010-01-06 19:47:10 +1100308
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400309 spin_lock(&squeue->serial.lock);
310 list_add_tail(&padata->list, &squeue->serial.list);
311 spin_unlock(&squeue->serial.lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100312
Daniel Jordan45d153c2019-09-05 21:40:27 -0400313 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
Steffen Klassert16295be2010-01-06 19:47:10 +1100314 }
315
316 spin_unlock_bh(&pd->lock);
317
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000318 /*
319 * The next object that needs serialization might have arrived to
Herbert Xu6fc4dbc2019-07-18 23:01:46 +0800320 * the reorder queues in the meantime.
Daniel Jordancf144f82019-07-16 12:32:53 -0400321 *
Herbert Xu6fc4dbc2019-07-18 23:01:46 +0800322 * Ensure reorder queue is read after pd->lock is dropped so we see
323 * new objects from another task in padata_do_serial. Pairs with
Daniel Jordane04ec0d2020-06-08 17:26:52 -0400324 * smp_mb in padata_do_serial.
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000325 */
Daniel Jordancf144f82019-07-16 12:32:53 -0400326 smp_mb();
Steffen Klassert16295be2010-01-06 19:47:10 +1100327
Daniel Jordanf601c722020-07-14 16:13:56 -0400328 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
329 if (!list_empty(&reorder->list) && padata_find_next(pd, false))
Daniel Jordan45d153c2019-09-05 21:40:27 -0400330 queue_work(pinst->serial_wq, &pd->reorder_work);
Steffen Klassert16295be2010-01-06 19:47:10 +1100331}
332
Mathias Krausecf5868c2017-09-08 20:57:10 +0200333static void invoke_padata_reorder(struct work_struct *work)
334{
Mathias Krausecf5868c2017-09-08 20:57:10 +0200335 struct parallel_data *pd;
336
337 local_bh_disable();
Herbert Xu6fc4dbc2019-07-18 23:01:46 +0800338 pd = container_of(work, struct parallel_data, reorder_work);
Mathias Krausecf5868c2017-09-08 20:57:10 +0200339 padata_reorder(pd);
340 local_bh_enable();
341}
342
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400343static void padata_serial_worker(struct work_struct *serial_work)
Steffen Klassert16295be2010-01-06 19:47:10 +1100344{
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400345 struct padata_serial_queue *squeue;
Steffen Klassert16295be2010-01-06 19:47:10 +1100346 struct parallel_data *pd;
347 LIST_HEAD(local_list);
Herbert Xu07928d92019-11-19 13:17:31 +0800348 int cnt;
Steffen Klassert16295be2010-01-06 19:47:10 +1100349
350 local_bh_disable();
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400351 squeue = container_of(serial_work, struct padata_serial_queue, work);
352 pd = squeue->pd;
Steffen Klassert16295be2010-01-06 19:47:10 +1100353
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400354 spin_lock(&squeue->serial.lock);
355 list_replace_init(&squeue->serial.list, &local_list);
356 spin_unlock(&squeue->serial.lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100357
Herbert Xu07928d92019-11-19 13:17:31 +0800358 cnt = 0;
359
Steffen Klassert16295be2010-01-06 19:47:10 +1100360 while (!list_empty(&local_list)) {
361 struct padata_priv *padata;
362
363 padata = list_entry(local_list.next,
364 struct padata_priv, list);
365
366 list_del_init(&padata->list);
367
368 padata->serial(padata);
Herbert Xu07928d92019-11-19 13:17:31 +0800369 cnt++;
Steffen Klassert16295be2010-01-06 19:47:10 +1100370 }
371 local_bh_enable();
Herbert Xu07928d92019-11-19 13:17:31 +0800372
Xiyu Yangd5ee8e72021-07-20 11:05:11 -0400373 if (refcount_sub_and_test(cnt, &pd->refcnt))
Herbert Xu07928d92019-11-19 13:17:31 +0800374 padata_free_pd(pd);
Steffen Klassert16295be2010-01-06 19:47:10 +1100375}
376
Steffen Klassert0198ffd2010-05-19 13:44:27 +1000377/**
Steffen Klassert16295be2010-01-06 19:47:10 +1100378 * padata_do_serial - padata serialization function
379 *
380 * @padata: object to be serialized.
381 *
382 * padata_do_serial must be called for every parallelized object.
383 * The serialization callback function will run with BHs off.
384 */
385void padata_do_serial(struct padata_priv *padata)
386{
Daniel Jordan065cf572019-07-19 15:04:44 -0400387 struct parallel_data *pd = padata->pd;
Daniel Jordan4611ce22020-06-03 15:59:39 -0700388 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
Daniel Jordanf601c722020-07-14 16:13:56 -0400389 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400390 struct padata_priv *cur;
Steffen Klassert16295be2010-01-06 19:47:10 +1100391
Daniel Jordanf601c722020-07-14 16:13:56 -0400392 spin_lock(&reorder->lock);
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400393 /* Sort in ascending order of sequence number. */
Daniel Jordanf601c722020-07-14 16:13:56 -0400394 list_for_each_entry_reverse(cur, &reorder->list, list)
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400395 if (cur->seq_nr < padata->seq_nr)
396 break;
397 list_add(&padata->list, &cur->list);
Daniel Jordanf601c722020-07-14 16:13:56 -0400398 spin_unlock(&reorder->lock);
Steffen Klassert16295be2010-01-06 19:47:10 +1100399
Daniel Jordancf144f82019-07-16 12:32:53 -0400400 /*
Herbert Xu6fc4dbc2019-07-18 23:01:46 +0800401 * Ensure the addition to the reorder list is ordered correctly
Daniel Jordancf144f82019-07-16 12:32:53 -0400402 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
403 * in padata_reorder.
404 */
Daniel Jordane04ec0d2020-06-08 17:26:52 -0400405 smp_mb();
Daniel Jordancf144f82019-07-16 12:32:53 -0400406
Herbert Xu6fc4dbc2019-07-18 23:01:46 +0800407 padata_reorder(pd);
Steffen Klassert16295be2010-01-06 19:47:10 +1100408}
409EXPORT_SYMBOL(padata_do_serial);
410
Herbert Xubbefa1d2019-11-26 15:58:45 +0800411static int padata_setup_cpumasks(struct padata_instance *pinst)
Steffen Klassert16295be2010-01-06 19:47:10 +1100412{
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400413 struct workqueue_attrs *attrs;
Herbert Xubbefa1d2019-11-26 15:58:45 +0800414 int err;
415
416 attrs = alloc_workqueue_attrs();
417 if (!attrs)
418 return -ENOMEM;
419
420 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
421 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
422 err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
423 free_workqueue_attrs(attrs);
424
425 return err;
426}
427
Daniel Jordan004ed422020-06-03 15:59:43 -0700428static void __init padata_mt_helper(struct work_struct *w)
429{
430 struct padata_work *pw = container_of(w, struct padata_work, pw_work);
431 struct padata_mt_job_state *ps = pw->pw_data;
432 struct padata_mt_job *job = ps->job;
433 bool done;
434
435 spin_lock(&ps->lock);
436
437 while (job->size > 0) {
438 unsigned long start, size, end;
439
440 start = job->start;
441 /* So end is chunk size aligned if enough work remains. */
442 size = roundup(start + 1, ps->chunk_size) - start;
443 size = min(size, job->size);
444 end = start + size;
445
446 job->start = end;
447 job->size -= size;
448
449 spin_unlock(&ps->lock);
450 job->thread_fn(start, end, job->fn_arg);
451 spin_lock(&ps->lock);
452 }
453
454 ++ps->nworks_fini;
455 done = (ps->nworks_fini == ps->nworks);
456 spin_unlock(&ps->lock);
457
458 if (done)
459 complete(&ps->completion);
460}
461
462/**
463 * padata_do_multithreaded - run a multithreaded job
464 * @job: Description of the job.
465 *
466 * See the definition of struct padata_mt_job for more details.
467 */
468void __init padata_do_multithreaded(struct padata_mt_job *job)
469{
470 /* In case threads finish at different times. */
471 static const unsigned long load_balance_factor = 4;
472 struct padata_work my_work, *pw;
473 struct padata_mt_job_state ps;
474 LIST_HEAD(works);
475 int nworks;
476
477 if (job->size == 0)
478 return;
479
480 /* Ensure at least one thread when size < min_chunk. */
481 nworks = max(job->size / job->min_chunk, 1ul);
482 nworks = min(nworks, job->max_threads);
483
484 if (nworks == 1) {
485 /* Single thread, no coordination needed, cut to the chase. */
486 job->thread_fn(job->start, job->start + job->size, job->fn_arg);
487 return;
488 }
489
490 spin_lock_init(&ps.lock);
491 init_completion(&ps.completion);
492 ps.job = job;
493 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works);
494 ps.nworks_fini = 0;
495
496 /*
497 * Chunk size is the amount of work a helper does per call to the
498 * thread function. Load balance large jobs between threads by
499 * increasing the number of chunks, guarantee at least the minimum
500 * chunk size from the caller, and honor the caller's alignment.
501 */
502 ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
503 ps.chunk_size = max(ps.chunk_size, job->min_chunk);
504 ps.chunk_size = roundup(ps.chunk_size, job->align);
505
506 list_for_each_entry(pw, &works, pw_list)
507 queue_work(system_unbound_wq, &pw->pw_work);
508
509 /* Use the current thread, which saves starting a workqueue worker. */
510 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
511 padata_mt_helper(&my_work.pw_work);
512
513 /* Wait for all the helpers to finish. */
514 wait_for_completion(&ps.completion);
515
516 destroy_work_on_stack(&my_work.pw_work);
517 padata_works_free(&works);
518}
519
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400520static void __padata_list_init(struct padata_list *pd_list)
521{
522 INIT_LIST_HEAD(&pd_list->list);
523 spin_lock_init(&pd_list->lock);
524}
525
526/* Initialize all percpu queues used by serial workers */
527static void padata_init_squeues(struct parallel_data *pd)
528{
529 int cpu;
530 struct padata_serial_queue *squeue;
531
532 for_each_cpu(cpu, pd->cpumask.cbcpu) {
533 squeue = per_cpu_ptr(pd->squeue, cpu);
534 squeue->pd = pd;
535 __padata_list_init(&squeue->serial);
536 INIT_WORK(&squeue->work, padata_serial_worker);
537 }
538}
539
Daniel Jordanf601c722020-07-14 16:13:56 -0400540/* Initialize per-CPU reorder lists */
541static void padata_init_reorder_list(struct parallel_data *pd)
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400542{
Daniel Jordanc51636a2019-09-05 21:40:29 -0400543 int cpu;
Daniel Jordanf601c722020-07-14 16:13:56 -0400544 struct padata_list *list;
Steffen Klassert16295be2010-01-06 19:47:10 +1100545
Daniel Jordanc51636a2019-09-05 21:40:29 -0400546 for_each_cpu(cpu, pd->cpumask.pcpu) {
Daniel Jordanf601c722020-07-14 16:13:56 -0400547 list = per_cpu_ptr(pd->reorder_list, cpu);
548 __padata_list_init(list);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400549 }
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400550}
551
552/* Allocate and initialize the internal cpumask dependend resources. */
Herbert Xubbefa1d2019-11-26 15:58:45 +0800553static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400554{
Herbert Xubbefa1d2019-11-26 15:58:45 +0800555 struct padata_instance *pinst = ps->pinst;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400556 struct parallel_data *pd;
Steffen Klassert16295be2010-01-06 19:47:10 +1100557
558 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
559 if (!pd)
560 goto err;
561
Daniel Jordanf601c722020-07-14 16:13:56 -0400562 pd->reorder_list = alloc_percpu(struct padata_list);
563 if (!pd->reorder_list)
Steffen Klassert16295be2010-01-06 19:47:10 +1100564 goto err_free_pd;
565
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400566 pd->squeue = alloc_percpu(struct padata_serial_queue);
567 if (!pd->squeue)
Daniel Jordanf601c722020-07-14 16:13:56 -0400568 goto err_free_reorder_list;
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400569
Herbert Xubbefa1d2019-11-26 15:58:45 +0800570 pd->ps = ps;
Daniel Jordancec00e6e2020-07-14 16:13:53 -0400571
572 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400573 goto err_free_squeue;
Daniel Jordancec00e6e2020-07-14 16:13:53 -0400574 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
575 goto err_free_pcpu;
576
Daniel Jordand69e0372020-07-14 16:13:54 -0400577 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
578 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
Steffen Klassert16295be2010-01-06 19:47:10 +1100579
Daniel Jordanf601c722020-07-14 16:13:56 -0400580 padata_init_reorder_list(pd);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400581 padata_init_squeues(pd);
Daniel Jordan4611ce22020-06-03 15:59:39 -0700582 pd->seq_nr = -1;
Xiyu Yangd5ee8e72021-07-20 11:05:11 -0400583 refcount_set(&pd->refcnt, 1);
Steffen Klassert16295be2010-01-06 19:47:10 +1100584 spin_lock_init(&pd->lock);
Daniel Jordanec9c7d12019-08-08 12:05:35 -0400585 pd->cpu = cpumask_first(pd->cpumask.pcpu);
Herbert Xu6fc4dbc2019-07-18 23:01:46 +0800586 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
Steffen Klassert16295be2010-01-06 19:47:10 +1100587
588 return pd;
589
Daniel Jordancec00e6e2020-07-14 16:13:53 -0400590err_free_pcpu:
591 free_cpumask_var(pd->cpumask.pcpu);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400592err_free_squeue:
593 free_percpu(pd->squeue);
Daniel Jordanf601c722020-07-14 16:13:56 -0400594err_free_reorder_list:
595 free_percpu(pd->reorder_list);
Steffen Klassert16295be2010-01-06 19:47:10 +1100596err_free_pd:
597 kfree(pd);
598err:
599 return NULL;
600}
601
602static void padata_free_pd(struct parallel_data *pd)
603{
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400604 free_cpumask_var(pd->cpumask.pcpu);
605 free_cpumask_var(pd->cpumask.cbcpu);
Daniel Jordanf601c722020-07-14 16:13:56 -0400606 free_percpu(pd->reorder_list);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400607 free_percpu(pd->squeue);
Steffen Klassert16295be2010-01-06 19:47:10 +1100608 kfree(pd);
609}
610
Steffen Klassert4c879172010-07-07 15:30:10 +0200611static void __padata_start(struct padata_instance *pinst)
612{
613 pinst->flags |= PADATA_INIT;
614}
615
Steffen Klassertee836552010-07-07 15:30:47 +0200616static void __padata_stop(struct padata_instance *pinst)
617{
618 if (!(pinst->flags & PADATA_INIT))
619 return;
620
621 pinst->flags &= ~PADATA_INIT;
622
623 synchronize_rcu();
Steffen Klassertee836552010-07-07 15:30:47 +0200624}
625
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300626/* Replace the internal control structure with a new one. */
Herbert Xubbefa1d2019-11-26 15:58:45 +0800627static int padata_replace_one(struct padata_shell *ps)
Steffen Klassert16295be2010-01-06 19:47:10 +1100628{
Herbert Xubbefa1d2019-11-26 15:58:45 +0800629 struct parallel_data *pd_new;
630
631 pd_new = padata_alloc_pd(ps);
632 if (!pd_new)
633 return -ENOMEM;
634
635 ps->opd = rcu_dereference_protected(ps->pd, 1);
636 rcu_assign_pointer(ps->pd, pd_new);
637
638 return 0;
639}
640
Daniel Jordan894c9ef2019-12-03 14:31:10 -0500641static int padata_replace(struct padata_instance *pinst)
Herbert Xubbefa1d2019-11-26 15:58:45 +0800642{
Herbert Xubbefa1d2019-11-26 15:58:45 +0800643 struct padata_shell *ps;
Daniel Jordan41ccdbf2020-02-10 13:11:00 -0500644 int err = 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100645
646 pinst->flags |= PADATA_RESET;
647
Herbert Xubbefa1d2019-11-26 15:58:45 +0800648 list_for_each_entry(ps, &pinst->pslist, list) {
649 err = padata_replace_one(ps);
650 if (err)
651 break;
652 }
Steffen Klassert16295be2010-01-06 19:47:10 +1100653
654 synchronize_rcu();
655
Herbert Xubbefa1d2019-11-26 15:58:45 +0800656 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
Xiyu Yangd5ee8e72021-07-20 11:05:11 -0400657 if (refcount_dec_and_test(&ps->opd->refcnt))
Herbert Xubbefa1d2019-11-26 15:58:45 +0800658 padata_free_pd(ps->opd);
Steffen Klassert16295be2010-01-06 19:47:10 +1100659
660 pinst->flags &= ~PADATA_RESET;
Herbert Xubbefa1d2019-11-26 15:58:45 +0800661
662 return err;
Steffen Klassert16295be2010-01-06 19:47:10 +1100663}
664
Steffen Klassert33e54452010-07-07 15:31:26 +0200665/* If cpumask contains no active cpu, we mark the instance as invalid. */
666static bool padata_validate_cpumask(struct padata_instance *pinst,
667 const struct cpumask *cpumask)
668{
Steffen Klassert13614e02012-03-28 08:43:21 +0200669 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
Steffen Klassert33e54452010-07-07 15:31:26 +0200670 pinst->flags |= PADATA_INVALID;
671 return false;
Steffen Klassert16295be2010-01-06 19:47:10 +1100672 }
673
Steffen Klassert33e54452010-07-07 15:31:26 +0200674 pinst->flags &= ~PADATA_INVALID;
675 return true;
676}
677
Steffen Klassert65ff5772010-07-27 07:15:06 +0200678static int __padata_set_cpumasks(struct padata_instance *pinst,
679 cpumask_var_t pcpumask,
680 cpumask_var_t cbcpumask)
Steffen Klassert16295be2010-01-06 19:47:10 +1100681{
Steffen Klassert33e54452010-07-07 15:31:26 +0200682 int valid;
Herbert Xubbefa1d2019-11-26 15:58:45 +0800683 int err;
Steffen Klassert16295be2010-01-06 19:47:10 +1100684
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400685 valid = padata_validate_cpumask(pinst, pcpumask);
686 if (!valid) {
687 __padata_stop(pinst);
688 goto out_replace;
689 }
690
691 valid = padata_validate_cpumask(pinst, cbcpumask);
Steffen Klassertb89661d2010-07-20 08:49:20 +0200692 if (!valid)
Steffen Klassert33e54452010-07-07 15:31:26 +0200693 __padata_stop(pinst);
Steffen Klassert33e54452010-07-07 15:31:26 +0200694
Steffen Klassertb89661d2010-07-20 08:49:20 +0200695out_replace:
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400696 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
697 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
Steffen Klassert16295be2010-01-06 19:47:10 +1100698
Daniel Jordan894c9ef2019-12-03 14:31:10 -0500699 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
Steffen Klassert16295be2010-01-06 19:47:10 +1100700
Steffen Klassert33e54452010-07-07 15:31:26 +0200701 if (valid)
702 __padata_start(pinst);
703
Herbert Xubbefa1d2019-11-26 15:58:45 +0800704 return err;
Steffen Klassert65ff5772010-07-27 07:15:06 +0200705}
706
707/**
Daniel Jordanbfcdcef82019-12-03 14:31:14 -0500708 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
709 * equivalent to @cpumask.
Steffen Klassert65ff5772010-07-27 07:15:06 +0200710 * @pinst: padata instance
711 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
712 * to parallel and serial cpumasks respectively.
713 * @cpumask: the cpumask to use
Daniel Jordanbfcdcef82019-12-03 14:31:14 -0500714 *
715 * Return: 0 on success or negative error code
Steffen Klassert65ff5772010-07-27 07:15:06 +0200716 */
717int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
718 cpumask_var_t cpumask)
719{
720 struct cpumask *serial_mask, *parallel_mask;
721 int err = -EINVAL;
722
Sebastian Andrzej Siewior80771c82021-08-03 16:16:10 +0200723 cpus_read_lock();
Daniel Jordan38228e82019-12-03 14:31:11 -0500724 mutex_lock(&pinst->lock);
Steffen Klassert65ff5772010-07-27 07:15:06 +0200725
726 switch (cpumask_type) {
727 case PADATA_CPU_PARALLEL:
728 serial_mask = pinst->cpumask.cbcpu;
729 parallel_mask = cpumask;
730 break;
731 case PADATA_CPU_SERIAL:
732 parallel_mask = pinst->cpumask.pcpu;
733 serial_mask = cpumask;
734 break;
735 default:
736 goto out;
737 }
738
739 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
740
Steffen Klassert16295be2010-01-06 19:47:10 +1100741out:
742 mutex_unlock(&pinst->lock);
Sebastian Andrzej Siewior80771c82021-08-03 16:16:10 +0200743 cpus_read_unlock();
Steffen Klassert16295be2010-01-06 19:47:10 +1100744
745 return err;
746}
747EXPORT_SYMBOL(padata_set_cpumask);
748
Arnd Bergmann19d795b2016-05-19 17:09:59 -0700749#ifdef CONFIG_HOTPLUG_CPU
750
Steffen Klassert16295be2010-01-06 19:47:10 +1100751static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
752{
Herbert Xubbefa1d2019-11-26 15:58:45 +0800753 int err = 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100754
Steffen Klassert13614e02012-03-28 08:43:21 +0200755 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
Daniel Jordan894c9ef2019-12-03 14:31:10 -0500756 err = padata_replace(pinst);
Steffen Klassert33e54452010-07-07 15:31:26 +0200757
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400758 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
759 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
Steffen Klassert33e54452010-07-07 15:31:26 +0200760 __padata_start(pinst);
Steffen Klassert16295be2010-01-06 19:47:10 +1100761 }
762
Herbert Xubbefa1d2019-11-26 15:58:45 +0800763 return err;
Steffen Klassert16295be2010-01-06 19:47:10 +1100764}
765
Steffen Klassert16295be2010-01-06 19:47:10 +1100766static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
767{
Herbert Xubbefa1d2019-11-26 15:58:45 +0800768 int err = 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100769
Daniel Jordan894c9ef2019-12-03 14:31:10 -0500770 if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400771 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
Steffen Klassertb89661d2010-07-20 08:49:20 +0200772 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
Steffen Klassert33e54452010-07-07 15:31:26 +0200773 __padata_stop(pinst);
Steffen Klassert33e54452010-07-07 15:31:26 +0200774
Daniel Jordan894c9ef2019-12-03 14:31:10 -0500775 err = padata_replace(pinst);
Steffen Klassert16295be2010-01-06 19:47:10 +1100776 }
777
Herbert Xubbefa1d2019-11-26 15:58:45 +0800778 return err;
Steffen Klassert16295be2010-01-06 19:47:10 +1100779}
780
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400781static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
782{
783 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
784 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
785}
786
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200787static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
Steffen Klassert16295be2010-01-06 19:47:10 +1100788{
Steffen Klassert16295be2010-01-06 19:47:10 +1100789 struct padata_instance *pinst;
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200790 int ret;
Steffen Klassert16295be2010-01-06 19:47:10 +1100791
Daniel Jordan3c2214b2020-04-21 12:34:55 -0400792 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200793 if (!pinst_has_cpu(pinst, cpu))
794 return 0;
Steffen Klassert16295be2010-01-06 19:47:10 +1100795
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200796 mutex_lock(&pinst->lock);
797 ret = __padata_add_cpu(pinst, cpu);
798 mutex_unlock(&pinst->lock);
799 return ret;
Steffen Klassert16295be2010-01-06 19:47:10 +1100800}
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200801
Daniel Jordan894c9ef2019-12-03 14:31:10 -0500802static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200803{
804 struct padata_instance *pinst;
805 int ret;
806
Daniel Jordan3c2214b2020-04-21 12:34:55 -0400807 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +0200808 if (!pinst_has_cpu(pinst, cpu))
809 return 0;
810
811 mutex_lock(&pinst->lock);
812 ret = __padata_remove_cpu(pinst, cpu);
813 mutex_unlock(&pinst->lock);
814 return ret;
815}
816
817static enum cpuhp_state hp_online;
Steffen Klasserte2cb2f12010-04-29 14:40:10 +0200818#endif
Steffen Klassert16295be2010-01-06 19:47:10 +1100819
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400820static void __padata_free(struct padata_instance *pinst)
821{
822#ifdef CONFIG_HOTPLUG_CPU
Daniel Jordan3c2214b2020-04-21 12:34:55 -0400823 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
824 &pinst->cpu_dead_node);
825 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400826#endif
827
Herbert Xubbefa1d2019-11-26 15:58:45 +0800828 WARN_ON(!list_empty(&pinst->pslist));
829
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400830 free_cpumask_var(pinst->cpumask.pcpu);
831 free_cpumask_var(pinst->cpumask.cbcpu);
Daniel Jordan45d153c2019-09-05 21:40:27 -0400832 destroy_workqueue(pinst->serial_wq);
833 destroy_workqueue(pinst->parallel_wq);
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400834 kfree(pinst);
835}
836
837#define kobj2pinst(_kobj) \
838 container_of(_kobj, struct padata_instance, kobj)
839#define attr2pentry(_attr) \
840 container_of(_attr, struct padata_sysfs_entry, attr)
841
842static void padata_sysfs_release(struct kobject *kobj)
843{
844 struct padata_instance *pinst = kobj2pinst(kobj);
845 __padata_free(pinst);
846}
847
848struct padata_sysfs_entry {
849 struct attribute attr;
850 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
851 ssize_t (*store)(struct padata_instance *, struct attribute *,
852 const char *, size_t);
853};
854
855static ssize_t show_cpumask(struct padata_instance *pinst,
856 struct attribute *attr, char *buf)
857{
858 struct cpumask *cpumask;
859 ssize_t len;
860
861 mutex_lock(&pinst->lock);
862 if (!strcmp(attr->name, "serial_cpumask"))
863 cpumask = pinst->cpumask.cbcpu;
864 else
865 cpumask = pinst->cpumask.pcpu;
866
Tejun Heo4497da62015-02-13 14:38:05 -0800867 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
868 nr_cpu_ids, cpumask_bits(cpumask));
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400869 mutex_unlock(&pinst->lock);
Tejun Heo4497da62015-02-13 14:38:05 -0800870 return len < PAGE_SIZE ? len : -EINVAL;
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400871}
872
873static ssize_t store_cpumask(struct padata_instance *pinst,
874 struct attribute *attr,
875 const char *buf, size_t count)
876{
877 cpumask_var_t new_cpumask;
878 ssize_t ret;
879 int mask_type;
880
881 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
882 return -ENOMEM;
883
884 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
885 nr_cpumask_bits);
886 if (ret < 0)
887 goto out;
888
889 mask_type = !strcmp(attr->name, "serial_cpumask") ?
890 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
891 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
892 if (!ret)
893 ret = count;
894
895out:
896 free_cpumask_var(new_cpumask);
897 return ret;
898}
899
900#define PADATA_ATTR_RW(_name, _show_name, _store_name) \
901 static struct padata_sysfs_entry _name##_attr = \
902 __ATTR(_name, 0644, _show_name, _store_name)
903#define PADATA_ATTR_RO(_name, _show_name) \
904 static struct padata_sysfs_entry _name##_attr = \
905 __ATTR(_name, 0400, _show_name, NULL)
906
907PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
908PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
909
910/*
911 * Padata sysfs provides the following objects:
912 * serial_cpumask [RW] - cpumask for serial workers
913 * parallel_cpumask [RW] - cpumask for parallel workers
Steffen Klassert16295be2010-01-06 19:47:10 +1100914 */
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400915static struct attribute *padata_default_attrs[] = {
916 &serial_cpumask_attr.attr,
917 &parallel_cpumask_attr.attr,
918 NULL,
919};
Kimberly Brown2064fbc2019-04-01 22:51:47 -0400920ATTRIBUTE_GROUPS(padata_default);
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400921
922static ssize_t padata_sysfs_show(struct kobject *kobj,
923 struct attribute *attr, char *buf)
Steffen Klassert16295be2010-01-06 19:47:10 +1100924{
Steffen Klassert16295be2010-01-06 19:47:10 +1100925 struct padata_instance *pinst;
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400926 struct padata_sysfs_entry *pentry;
927 ssize_t ret = -EIO;
928
929 pinst = kobj2pinst(kobj);
930 pentry = attr2pentry(attr);
931 if (pentry->show)
932 ret = pentry->show(pinst, attr, buf);
933
934 return ret;
935}
936
937static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
938 const char *buf, size_t count)
939{
940 struct padata_instance *pinst;
941 struct padata_sysfs_entry *pentry;
942 ssize_t ret = -EIO;
943
944 pinst = kobj2pinst(kobj);
945 pentry = attr2pentry(attr);
946 if (pentry->show)
947 ret = pentry->store(pinst, attr, buf, count);
948
949 return ret;
950}
951
952static const struct sysfs_ops padata_sysfs_ops = {
953 .show = padata_sysfs_show,
954 .store = padata_sysfs_store,
955};
956
957static struct kobj_type padata_attr_type = {
958 .sysfs_ops = &padata_sysfs_ops,
Kimberly Brown2064fbc2019-04-01 22:51:47 -0400959 .default_groups = padata_default_groups,
Dan Kruchinin5e017dc2010-07-14 14:33:08 +0400960 .release = padata_sysfs_release,
961};
962
Steffen Klassert16295be2010-01-06 19:47:10 +1100963/**
Daniel Jordan3f257192020-07-14 16:13:55 -0400964 * padata_alloc - allocate and initialize a padata instance
Daniel Jordanb128a302019-09-05 21:40:21 -0400965 * @name: used to identify the instance
Daniel Jordanbfcdcef82019-12-03 14:31:14 -0500966 *
967 * Return: new instance on success, NULL on error
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400968 */
Daniel Jordan3f257192020-07-14 16:13:55 -0400969struct padata_instance *padata_alloc(const char *name)
Steffen Klassert16295be2010-01-06 19:47:10 +1100970{
971 struct padata_instance *pinst;
Steffen Klassert16295be2010-01-06 19:47:10 +1100972
973 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
974 if (!pinst)
975 goto err;
976
Daniel Jordanbfde23c2019-09-05 21:40:28 -0400977 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
978 name);
Daniel Jordan45d153c2019-09-05 21:40:27 -0400979 if (!pinst->parallel_wq)
Steffen Klassert16295be2010-01-06 19:47:10 +1100980 goto err_free_inst;
Daniel Jordanb128a302019-09-05 21:40:21 -0400981
Sebastian Andrzej Siewior80771c82021-08-03 16:16:10 +0200982 cpus_read_lock();
Daniel Jordancc491d82019-09-05 21:40:26 -0400983
Daniel Jordan45d153c2019-09-05 21:40:27 -0400984 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
985 WQ_CPU_INTENSIVE, 1, name);
986 if (!pinst->serial_wq)
Daniel Jordancc491d82019-09-05 21:40:26 -0400987 goto err_put_cpus;
Daniel Jordan45d153c2019-09-05 21:40:27 -0400988
989 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
990 goto err_free_serial_wq;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400991 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
992 free_cpumask_var(pinst->cpumask.pcpu);
Daniel Jordan45d153c2019-09-05 21:40:27 -0400993 goto err_free_serial_wq;
Steffen Klassert33e54452010-07-07 15:31:26 +0200994 }
Steffen Klassert16295be2010-01-06 19:47:10 +1100995
Herbert Xubbefa1d2019-11-26 15:58:45 +0800996 INIT_LIST_HEAD(&pinst->pslist);
Steffen Klassert16295be2010-01-06 19:47:10 +1100997
Daniel Jordan3f257192020-07-14 16:13:55 -0400998 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
999 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
Herbert Xubbefa1d2019-11-26 15:58:45 +08001000
1001 if (padata_setup_cpumasks(pinst))
Daniel Jordand69e0372020-07-14 16:13:54 -04001002 goto err_free_masks;
Steffen Klassert16295be2010-01-06 19:47:10 +11001003
Daniel Jordanbd25b4882020-07-14 16:13:51 -04001004 __padata_start(pinst);
Steffen Klassert16295be2010-01-06 19:47:10 +11001005
Dan Kruchinin5e017dc2010-07-14 14:33:08 +04001006 kobject_init(&pinst->kobj, &padata_attr_type);
Steffen Klassert16295be2010-01-06 19:47:10 +11001007 mutex_init(&pinst->lock);
1008
Richard Weinbergerb8b4a412013-08-23 13:12:33 +02001009#ifdef CONFIG_HOTPLUG_CPU
Daniel Jordan3c2214b2020-04-21 12:34:55 -04001010 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1011 &pinst->cpu_online_node);
Daniel Jordan894c9ef2019-12-03 14:31:10 -05001012 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
Daniel Jordan3c2214b2020-04-21 12:34:55 -04001013 &pinst->cpu_dead_node);
Richard Weinbergerb8b4a412013-08-23 13:12:33 +02001014#endif
Daniel Jordancc491d82019-09-05 21:40:26 -04001015
Sebastian Andrzej Siewior80771c82021-08-03 16:16:10 +02001016 cpus_read_unlock();
Daniel Jordancc491d82019-09-05 21:40:26 -04001017
Steffen Klassert16295be2010-01-06 19:47:10 +11001018 return pinst;
1019
Dan Kruchinine15bacb2010-07-14 14:31:57 +04001020err_free_masks:
1021 free_cpumask_var(pinst->cpumask.pcpu);
1022 free_cpumask_var(pinst->cpumask.cbcpu);
Daniel Jordan45d153c2019-09-05 21:40:27 -04001023err_free_serial_wq:
1024 destroy_workqueue(pinst->serial_wq);
Daniel Jordancc491d82019-09-05 21:40:26 -04001025err_put_cpus:
Sebastian Andrzej Siewior80771c82021-08-03 16:16:10 +02001026 cpus_read_unlock();
Daniel Jordan45d153c2019-09-05 21:40:27 -04001027 destroy_workqueue(pinst->parallel_wq);
Steffen Klassert16295be2010-01-06 19:47:10 +11001028err_free_inst:
1029 kfree(pinst);
1030err:
1031 return NULL;
1032}
Daniel Jordan3f257192020-07-14 16:13:55 -04001033EXPORT_SYMBOL(padata_alloc);
Thomas Gleixner95966952017-05-24 10:15:17 +02001034
1035/**
Steffen Klassert16295be2010-01-06 19:47:10 +11001036 * padata_free - free a padata instance
1037 *
Daniel Jordanbfcdcef82019-12-03 14:31:14 -05001038 * @pinst: padata instance to free
Steffen Klassert16295be2010-01-06 19:47:10 +11001039 */
1040void padata_free(struct padata_instance *pinst)
1041{
Dan Kruchinin5e017dc2010-07-14 14:33:08 +04001042 kobject_put(&pinst->kobj);
Steffen Klassert16295be2010-01-06 19:47:10 +11001043}
1044EXPORT_SYMBOL(padata_free);
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +02001045
Herbert Xubbefa1d2019-11-26 15:58:45 +08001046/**
1047 * padata_alloc_shell - Allocate and initialize padata shell.
1048 *
1049 * @pinst: Parent padata_instance object.
Daniel Jordanbfcdcef82019-12-03 14:31:14 -05001050 *
1051 * Return: new shell on success, NULL on error
Herbert Xubbefa1d2019-11-26 15:58:45 +08001052 */
1053struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1054{
1055 struct parallel_data *pd;
1056 struct padata_shell *ps;
1057
1058 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1059 if (!ps)
1060 goto out;
1061
1062 ps->pinst = pinst;
1063
Sebastian Andrzej Siewior80771c82021-08-03 16:16:10 +02001064 cpus_read_lock();
Herbert Xubbefa1d2019-11-26 15:58:45 +08001065 pd = padata_alloc_pd(ps);
Sebastian Andrzej Siewior80771c82021-08-03 16:16:10 +02001066 cpus_read_unlock();
Herbert Xubbefa1d2019-11-26 15:58:45 +08001067
1068 if (!pd)
1069 goto out_free_ps;
1070
1071 mutex_lock(&pinst->lock);
1072 RCU_INIT_POINTER(ps->pd, pd);
1073 list_add(&ps->list, &pinst->pslist);
1074 mutex_unlock(&pinst->lock);
1075
1076 return ps;
1077
1078out_free_ps:
1079 kfree(ps);
1080out:
1081 return NULL;
1082}
1083EXPORT_SYMBOL(padata_alloc_shell);
1084
1085/**
1086 * padata_free_shell - free a padata shell
1087 *
1088 * @ps: padata shell to free
1089 */
1090void padata_free_shell(struct padata_shell *ps)
1091{
Eric Biggers07b24c72020-02-25 20:59:22 -08001092 if (!ps)
1093 return;
Herbert Xubbefa1d2019-11-26 15:58:45 +08001094
Eric Biggers07b24c72020-02-25 20:59:22 -08001095 mutex_lock(&ps->pinst->lock);
Herbert Xubbefa1d2019-11-26 15:58:45 +08001096 list_del(&ps->list);
1097 padata_free_pd(rcu_dereference_protected(ps->pd, 1));
Eric Biggers07b24c72020-02-25 20:59:22 -08001098 mutex_unlock(&ps->pinst->lock);
Herbert Xubbefa1d2019-11-26 15:58:45 +08001099
1100 kfree(ps);
1101}
1102EXPORT_SYMBOL(padata_free_shell);
1103
Daniel Jordanf1b192b2020-06-03 15:59:35 -07001104void __init padata_init(void)
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +02001105{
Daniel Jordan4611ce22020-06-03 15:59:39 -07001106 unsigned int i, possible_cpus;
Daniel Jordanf1b192b2020-06-03 15:59:35 -07001107#ifdef CONFIG_HOTPLUG_CPU
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +02001108 int ret;
1109
1110 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
Daniel Jordan894c9ef2019-12-03 14:31:10 -05001111 padata_cpu_online, NULL);
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +02001112 if (ret < 0)
Daniel Jordanf1b192b2020-06-03 15:59:35 -07001113 goto err;
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +02001114 hp_online = ret;
Daniel Jordan894c9ef2019-12-03 14:31:10 -05001115
1116 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1117 NULL, padata_cpu_dead);
Daniel Jordan4611ce22020-06-03 15:59:39 -07001118 if (ret < 0)
1119 goto remove_online_state;
1120#endif
1121
1122 possible_cpus = num_possible_cpus();
1123 padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1124 GFP_KERNEL);
1125 if (!padata_works)
1126 goto remove_dead_state;
1127
1128 for (i = 0; i < possible_cpus; ++i)
1129 list_add(&padata_works[i].pw_list, &padata_free_works);
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +02001130
Daniel Jordanf1b192b2020-06-03 15:59:35 -07001131 return;
Daniel Jordan4611ce22020-06-03 15:59:39 -07001132
1133remove_dead_state:
1134#ifdef CONFIG_HOTPLUG_CPU
1135 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1136remove_online_state:
1137 cpuhp_remove_multi_state(hp_online);
Daniel Jordanf1b192b2020-06-03 15:59:35 -07001138err:
Sebastian Andrzej Siewior30e92152016-09-06 19:04:49 +02001139#endif
Daniel Jordan4611ce22020-06-03 15:59:39 -07001140 pr_warn("padata: initialization failed\n");
Daniel Jordanf1b192b2020-06-03 15:59:35 -07001141}