blob: 561bcb5351840a0422244753850d3ce2a3d4bc4c [file] [log] [blame]
Horia Geantă618b5dc2018-10-10 14:26:48 +03001// SPDX-License-Identifier: GPL-2.0+
Yuan Kange24f7c92012-06-22 19:48:50 -05002/*
3 * caam - Freescale FSL CAAM support for hw_random
4 *
5 * Copyright 2011 Freescale Semiconductor, Inc.
Horia Geantă1b46c902019-05-03 17:17:39 +03006 * Copyright 2018-2019 NXP
Yuan Kange24f7c92012-06-22 19:48:50 -05007 *
8 * Based on caamalg.c crypto API driver.
9 *
10 * relationship between job descriptors to shared descriptors:
11 *
12 * --------------- --------------
13 * | JobDesc #0 |-------------------->| ShareDesc |
14 * | *(buffer 0) | |------------->| (generate) |
15 * --------------- | | (move) |
16 * | | (store) |
17 * --------------- | --------------
18 * | JobDesc #1 |------|
19 * | *(buffer 1) |
20 * ---------------
21 *
22 * A job desc looks like this:
23 *
24 * ---------------------
25 * | Header |
26 * | ShareDesc Pointer |
27 * | SEQ_OUT_PTR |
28 * | (output buffer) |
29 * ---------------------
30 *
31 * The SharedDesc never changes, and each job descriptor points to one of two
32 * buffers for each device, from which the data will be copied into the
33 * requested destination
34 */
35
36#include <linux/hw_random.h>
37#include <linux/completion.h>
38#include <linux/atomic.h>
39
40#include "compat.h"
41
42#include "regs.h"
43#include "intern.h"
44#include "desc_constr.h"
45#include "jr.h"
46#include "error.h"
47
48/*
49 * Maximum buffer size: maximum number of random, cache-aligned bytes that
50 * will be generated and moved to seq out ptr (extlen not allowed)
51 */
52#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
53 L1_CACHE_BYTES)
54
55/* length of descriptors */
56#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
Horia Geantă39957c8e2016-11-09 10:46:12 +020057#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
Yuan Kange24f7c92012-06-22 19:48:50 -050058
59/* Buffer, its dma address and lock */
60struct buf_data {
Steve Cornelius412c98c2015-06-15 16:52:59 -070061 u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
Yuan Kange24f7c92012-06-22 19:48:50 -050062 dma_addr_t addr;
63 struct completion filled;
64 u32 hw_desc[DESC_JOB_O_LEN];
65#define BUF_NOT_EMPTY 0
66#define BUF_EMPTY 1
67#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
68 atomic_t empty;
69};
70
71/* rng per-device context */
72struct caam_rng_ctx {
73 struct device *jrdev;
74 dma_addr_t sh_desc_dma;
75 u32 sh_desc[DESC_RNG_LEN];
76 unsigned int cur_buf_idx;
77 int current_buf;
78 struct buf_data bufs[2];
79};
80
Nitesh Lal6e4e6032014-03-07 16:06:08 +053081static struct caam_rng_ctx *rng_ctx;
Yuan Kange24f7c92012-06-22 19:48:50 -050082
83static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
84{
85 if (bd->addr)
86 dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
87 DMA_FROM_DEVICE);
88}
89
90static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
91{
92 struct device *jrdev = ctx->jrdev;
93
94 if (ctx->sh_desc_dma)
Yanjiang Jin48422342015-03-06 10:34:42 +080095 dma_unmap_single(jrdev, ctx->sh_desc_dma,
96 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
Yuan Kange24f7c92012-06-22 19:48:50 -050097 rng_unmap_buf(jrdev, &ctx->bufs[0]);
98 rng_unmap_buf(jrdev, &ctx->bufs[1]);
99}
100
101static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
102{
103 struct buf_data *bd;
104
Horia Geantă4ca7c7d2016-11-09 10:46:18 +0200105 bd = container_of(desc, struct buf_data, hw_desc[0]);
Yuan Kange24f7c92012-06-22 19:48:50 -0500106
Marek Vasutfa9659c2014-04-24 20:05:12 +0200107 if (err)
108 caam_jr_strstatus(jrdev, err);
Yuan Kange24f7c92012-06-22 19:48:50 -0500109
110 atomic_set(&bd->empty, BUF_NOT_EMPTY);
111 complete(&bd->filled);
Victoria Milhoane7472422015-08-05 11:28:35 -0700112
113 /* Buffer refilled, invalidate cache */
114 dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
115
Sascha Hauer6e005502019-05-23 10:50:29 +0200116 print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
117 bd->buf, RN_BUF_SIZE, 1);
Yuan Kange24f7c92012-06-22 19:48:50 -0500118}
119
120static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
121{
122 struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
123 struct device *jrdev = ctx->jrdev;
124 u32 *desc = bd->hw_desc;
125 int err;
126
127 dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
128 init_completion(&bd->filled);
129 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
130 if (err)
131 complete(&bd->filled); /* don't wait on failed job*/
132 else
133 atomic_inc(&bd->empty); /* note if pending */
134
135 return err;
136}
137
138static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
139{
Nitesh Lal6e4e6032014-03-07 16:06:08 +0530140 struct caam_rng_ctx *ctx = rng_ctx;
Yuan Kange24f7c92012-06-22 19:48:50 -0500141 struct buf_data *bd = &ctx->bufs[ctx->current_buf];
142 int next_buf_idx, copied_idx;
143 int err;
144
145 if (atomic_read(&bd->empty)) {
146 /* try to submit job if there wasn't one */
147 if (atomic_read(&bd->empty) == BUF_EMPTY) {
148 err = submit_job(ctx, 1);
149 /* if can't submit job, can't even wait */
150 if (err)
151 return 0;
152 }
153 /* no immediate data, so exit if not waiting */
154 if (!wait)
155 return 0;
156
157 /* waiting for pending job */
158 if (atomic_read(&bd->empty))
159 wait_for_completion(&bd->filled);
160 }
161
162 next_buf_idx = ctx->cur_buf_idx + max;
163 dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
164 __func__, ctx->current_buf, ctx->cur_buf_idx);
165
166 /* if enough data in current buffer */
167 if (next_buf_idx < RN_BUF_SIZE) {
168 memcpy(data, bd->buf + ctx->cur_buf_idx, max);
169 ctx->cur_buf_idx = next_buf_idx;
170 return max;
171 }
172
173 /* else, copy what's left... */
174 copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
175 memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
176 ctx->cur_buf_idx = 0;
177 atomic_set(&bd->empty, BUF_EMPTY);
178
179 /* ...refill... */
180 submit_job(ctx, 1);
181
182 /* and use next buffer */
183 ctx->current_buf = !ctx->current_buf;
184 dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
185
186 /* since there already is some data read, don't wait */
187 return copied_idx + caam_read(rng, data + copied_idx,
188 max - copied_idx, false);
189}
190
Horia Geantace572082014-07-11 15:34:49 +0300191static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
Yuan Kange24f7c92012-06-22 19:48:50 -0500192{
193 struct device *jrdev = ctx->jrdev;
194 u32 *desc = ctx->sh_desc;
195
Kim Phillips61bb86b2012-07-13 17:49:28 -0500196 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kange24f7c92012-06-22 19:48:50 -0500197
Yuan Kange24f7c92012-06-22 19:48:50 -0500198 /* Generate random bytes */
199 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
200
201 /* Store bytes */
202 append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
203
204 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
205 DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300206 if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
207 dev_err(jrdev, "unable to map shared descriptor\n");
208 return -ENOMEM;
209 }
Sascha Hauer6e005502019-05-23 10:50:29 +0200210
211 print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
212 desc, desc_bytes(desc), 1);
213
Horia Geantace572082014-07-11 15:34:49 +0300214 return 0;
Yuan Kange24f7c92012-06-22 19:48:50 -0500215}
216
Horia Geantace572082014-07-11 15:34:49 +0300217static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
Yuan Kange24f7c92012-06-22 19:48:50 -0500218{
219 struct device *jrdev = ctx->jrdev;
220 struct buf_data *bd = &ctx->bufs[buf_id];
221 u32 *desc = bd->hw_desc;
222 int sh_len = desc_len(ctx->sh_desc);
223
224 init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
225 HDR_REVERSE);
226
227 bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300228 if (dma_mapping_error(jrdev, bd->addr)) {
229 dev_err(jrdev, "unable to map dst\n");
230 return -ENOMEM;
231 }
Yuan Kange24f7c92012-06-22 19:48:50 -0500232
233 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
Sascha Hauer6e005502019-05-23 10:50:29 +0200234
235 print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
236 desc, desc_bytes(desc), 1);
237
Horia Geantace572082014-07-11 15:34:49 +0300238 return 0;
Yuan Kange24f7c92012-06-22 19:48:50 -0500239}
240
241static void caam_cleanup(struct hwrng *rng)
242{
243 int i;
244 struct buf_data *bd;
245
246 for (i = 0; i < 2; i++) {
Nitesh Lal6e4e6032014-03-07 16:06:08 +0530247 bd = &rng_ctx->bufs[i];
Yuan Kange24f7c92012-06-22 19:48:50 -0500248 if (atomic_read(&bd->empty) == BUF_PENDING)
249 wait_for_completion(&bd->filled);
250 }
251
Nitesh Lal6e4e6032014-03-07 16:06:08 +0530252 rng_unmap_ctx(rng_ctx);
Yuan Kange24f7c92012-06-22 19:48:50 -0500253}
254
Horia Geantace572082014-07-11 15:34:49 +0300255static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
Yuan Kange24f7c92012-06-22 19:48:50 -0500256{
257 struct buf_data *bd = &ctx->bufs[buf_id];
Horia Geantace572082014-07-11 15:34:49 +0300258 int err;
Yuan Kange24f7c92012-06-22 19:48:50 -0500259
Horia Geantace572082014-07-11 15:34:49 +0300260 err = rng_create_job_desc(ctx, buf_id);
261 if (err)
262 return err;
263
Yuan Kange24f7c92012-06-22 19:48:50 -0500264 atomic_set(&bd->empty, BUF_EMPTY);
265 submit_job(ctx, buf_id == ctx->current_buf);
266 wait_for_completion(&bd->filled);
Horia Geantace572082014-07-11 15:34:49 +0300267
268 return 0;
Yuan Kange24f7c92012-06-22 19:48:50 -0500269}
270
Horia Geantace572082014-07-11 15:34:49 +0300271static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
Yuan Kange24f7c92012-06-22 19:48:50 -0500272{
Horia Geantace572082014-07-11 15:34:49 +0300273 int err;
274
Yuan Kange24f7c92012-06-22 19:48:50 -0500275 ctx->jrdev = jrdev;
Horia Geantace572082014-07-11 15:34:49 +0300276
277 err = rng_create_sh_desc(ctx);
278 if (err)
279 return err;
280
Yuan Kange24f7c92012-06-22 19:48:50 -0500281 ctx->current_buf = 0;
282 ctx->cur_buf_idx = 0;
Horia Geantace572082014-07-11 15:34:49 +0300283
284 err = caam_init_buf(ctx, 0);
285 if (err)
286 return err;
287
Horia Geantăf366af42017-07-10 08:40:38 +0300288 return caam_init_buf(ctx, 1);
Yuan Kange24f7c92012-06-22 19:48:50 -0500289}
290
291static struct hwrng caam_rng = {
292 .name = "rng-caam",
293 .cleanup = caam_cleanup,
294 .read = caam_read,
295};
296
Horia Geantă1b46c902019-05-03 17:17:39 +0300297void caam_rng_exit(void)
Yuan Kange24f7c92012-06-22 19:48:50 -0500298{
Nitesh Lal6e4e6032014-03-07 16:06:08 +0530299 caam_jr_free(rng_ctx->jrdev);
Yuan Kange24f7c92012-06-22 19:48:50 -0500300 hwrng_unregister(&caam_rng);
Nitesh Lal6e4e6032014-03-07 16:06:08 +0530301 kfree(rng_ctx);
Yuan Kange24f7c92012-06-22 19:48:50 -0500302}
303
Horia Geantă1b46c902019-05-03 17:17:39 +0300304int caam_rng_init(struct device *ctrldev)
Yuan Kange24f7c92012-06-22 19:48:50 -0500305{
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530306 struct device *dev;
Horia Geantăd239b102018-11-08 15:36:27 +0200307 u32 rng_inst;
Horia Geantă1b46c902019-05-03 17:17:39 +0300308 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
Horia Geantace572082014-07-11 15:34:49 +0300309 int err;
Ruchika Gupta35af6402014-07-07 10:42:12 +0530310
Victoria Milhoanbf834902015-08-05 11:28:48 -0700311 /* Check for an instantiated RNG before registration */
Horia Geantăd239b102018-11-08 15:36:27 +0200312 if (priv->era < 10)
313 rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
314 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
315 else
316 rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
317
Horia Geantă1b46c902019-05-03 17:17:39 +0300318 if (!rng_inst)
319 return 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -0700320
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530321 dev = caam_jr_alloc();
322 if (IS_ERR(dev)) {
323 pr_err("Job Ring Device allocation for transform failed\n");
Horia Geantă1b46c902019-05-03 17:17:39 +0300324 return PTR_ERR(dev);
Shengzhou Liu95bcaa32012-07-13 17:49:21 -0500325 }
Horia Geantăc530e342016-11-09 10:46:15 +0200326 rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
Fabio Estevamac8ad302015-08-12 11:48:42 -0300327 if (!rng_ctx) {
328 err = -ENOMEM;
329 goto free_caam_alloc;
330 }
Horia Geantace572082014-07-11 15:34:49 +0300331 err = caam_init_rng(rng_ctx, dev);
332 if (err)
Fabio Estevamac8ad302015-08-12 11:48:42 -0300333 goto free_rng_ctx;
Yuan Kange24f7c92012-06-22 19:48:50 -0500334
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530335 dev_info(dev, "registering rng-caam\n");
Yuan Kange24f7c92012-06-22 19:48:50 -0500336 return hwrng_register(&caam_rng);
Fabio Estevamac8ad302015-08-12 11:48:42 -0300337
338free_rng_ctx:
339 kfree(rng_ctx);
340free_caam_alloc:
341 caam_jr_free(dev);
342 return err;
Yuan Kange24f7c92012-06-22 19:48:50 -0500343}