blob: ceb498487569951a05887f50ae7d64b6268172fc [file] [log] [blame]
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +00001// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3// Copyright (c) 2018, Linaro Limited
4
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +00005#include <linux/completion.h>
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +00006#include <linux/device.h>
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +00007#include <linux/dma-buf.h>
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +00008#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/list.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/of_address.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/rpmsg.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +000019#include <uapi/misc/fastrpc.h>
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +000020
21#define ADSP_DOMAIN_ID (0)
22#define MDSP_DOMAIN_ID (1)
23#define SDSP_DOMAIN_ID (2)
24#define CDSP_DOMAIN_ID (3)
25#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
26#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +000027#define FASTRPC_ALIGN 128
28#define FASTRPC_MAX_FDLIST 16
29#define FASTRPC_MAX_CRCLIST 64
30#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +000031#define FASTRPC_CTX_MAX (256)
Srinivas Kandagatlad73f71c2019-02-08 17:11:26 +000032#define FASTRPC_INIT_HANDLE 1
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +000033#define FASTRPC_CTXID_MASK (0xFF0)
Srinivas Kandagatlad73f71c2019-02-08 17:11:26 +000034#define INIT_FILELEN_MAX (2 * 1024 * 1024)
35#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +000036#define FASTRPC_DEVICE_NAME "fastrpc"
37
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +000038/* Retrives number of input buffers from the scalars parameter */
39#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
40
41/* Retrives number of output buffers from the scalars parameter */
42#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
43
44/* Retrives number of input handles from the scalars parameter */
45#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
46
47/* Retrives number of output handles from the scalars parameter */
48#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
49
50#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
51 REMOTE_SCALARS_OUTBUFS(sc) + \
52 REMOTE_SCALARS_INHANDLES(sc)+ \
53 REMOTE_SCALARS_OUTHANDLES(sc))
54#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
55 (((attr & 0x07) << 29) | \
56 ((method & 0x1f) << 24) | \
57 ((in & 0xff) << 16) | \
58 ((out & 0xff) << 8) | \
59 ((oin & 0x0f) << 4) | \
60 (oout & 0x0f))
61
62#define FASTRPC_SCALARS(method, in, out) \
63 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
64
Srinivas Kandagatlad73f71c2019-02-08 17:11:26 +000065#define FASTRPC_CREATE_PROCESS_NARGS 6
66/* Remote Method id table */
67#define FASTRPC_RMID_INIT_ATTACH 0
68#define FASTRPC_RMID_INIT_RELEASE 1
69#define FASTRPC_RMID_INIT_CREATE 6
70#define FASTRPC_RMID_INIT_CREATE_ATTR 7
71#define FASTRPC_RMID_INIT_CREATE_STATIC 8
72
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +000073#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
74
75static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
76 "sdsp", "cdsp"};
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +000077struct fastrpc_phy_page {
78 u64 addr; /* physical address */
79 u64 size; /* size of contiguous region */
80};
81
82struct fastrpc_invoke_buf {
83 u32 num; /* number of contiguous regions */
84 u32 pgidx; /* index to start of contiguous region */
85};
86
87struct fastrpc_remote_arg {
88 u64 pv;
89 u64 len;
90};
91
92struct fastrpc_msg {
93 int pid; /* process group id */
94 int tid; /* thread id */
95 u64 ctx; /* invoke caller context */
96 u32 handle; /* handle to invoke */
97 u32 sc; /* scalars structure describing the data */
98 u64 addr; /* physical address */
99 u64 size; /* size of contiguous region */
100};
101
102struct fastrpc_invoke_rsp {
103 u64 ctx; /* invoke caller context */
104 int retval; /* invoke return value */
105};
106
107struct fastrpc_buf {
108 struct fastrpc_user *fl;
109 struct device *dev;
110 void *virt;
111 u64 phys;
112 u64 size;
113};
114
115struct fastrpc_map {
116 struct list_head node;
117 struct fastrpc_user *fl;
118 int fd;
119 struct dma_buf *buf;
120 struct sg_table *table;
121 struct dma_buf_attachment *attach;
122 u64 phys;
123 u64 size;
124 void *va;
125 u64 len;
126 struct kref refcount;
127};
128
129struct fastrpc_invoke_ctx {
130 int nscalars;
131 int nbufs;
132 int retval;
133 int pid;
134 int tgid;
135 u32 sc;
136 u32 *crc;
137 u64 ctxid;
138 u64 msg_sz;
139 struct kref refcount;
140 struct list_head node; /* list of ctxs */
141 struct completion work;
142 struct fastrpc_msg msg;
143 struct fastrpc_user *fl;
144 struct fastrpc_remote_arg *rpra;
145 struct fastrpc_map **maps;
146 struct fastrpc_buf *buf;
147 struct fastrpc_invoke_args *args;
148 struct fastrpc_channel_ctx *cctx;
149};
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000150
151struct fastrpc_session_ctx {
152 struct device *dev;
153 int sid;
154 bool used;
155 bool valid;
156};
157
158struct fastrpc_channel_ctx {
159 int domain_id;
160 int sesscount;
161 struct rpmsg_device *rpdev;
162 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
163 spinlock_t lock;
164 struct idr ctx_idr;
165 struct list_head users;
166 struct miscdevice miscdev;
167};
168
169struct fastrpc_user {
170 struct list_head user;
171 struct list_head maps;
172 struct list_head pending;
173
174 struct fastrpc_channel_ctx *cctx;
175 struct fastrpc_session_ctx *sctx;
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +0000176 struct fastrpc_buf *init_mem;
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000177
178 int tgid;
179 int pd;
180 /* Lock for lists */
181 spinlock_t lock;
182 /* lock for allocations */
183 struct mutex mutex;
184};
185
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +0000186static void fastrpc_free_map(struct kref *ref)
187{
188 struct fastrpc_map *map;
189
190 map = container_of(ref, struct fastrpc_map, refcount);
191
192 if (map->table) {
193 dma_buf_unmap_attachment(map->attach, map->table,
194 DMA_BIDIRECTIONAL);
195 dma_buf_detach(map->buf, map->attach);
196 dma_buf_put(map->buf);
197 }
198
199 kfree(map);
200}
201
202static void fastrpc_map_put(struct fastrpc_map *map)
203{
204 if (map)
205 kref_put(&map->refcount, fastrpc_free_map);
206}
207
208static void fastrpc_map_get(struct fastrpc_map *map)
209{
210 if (map)
211 kref_get(&map->refcount);
212}
213
214static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
215 struct fastrpc_map **ppmap)
216{
217 struct fastrpc_map *map = NULL;
218
219 mutex_lock(&fl->mutex);
220 list_for_each_entry(map, &fl->maps, node) {
221 if (map->fd == fd) {
222 fastrpc_map_get(map);
223 *ppmap = map;
224 mutex_unlock(&fl->mutex);
225 return 0;
226 }
227 }
228 mutex_unlock(&fl->mutex);
229
230 return -ENOENT;
231}
232
233static void fastrpc_buf_free(struct fastrpc_buf *buf)
234{
235 dma_free_coherent(buf->dev, buf->size, buf->virt,
236 FASTRPC_PHYS(buf->phys));
237 kfree(buf);
238}
239
240static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
241 u64 size, struct fastrpc_buf **obuf)
242{
243 struct fastrpc_buf *buf;
244
245 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
246 if (!buf)
247 return -ENOMEM;
248
249 buf->fl = fl;
250 buf->virt = NULL;
251 buf->phys = 0;
252 buf->size = size;
253 buf->dev = dev;
254
255 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
256 GFP_KERNEL);
257 if (!buf->virt)
258 return -ENOMEM;
259
260 if (fl->sctx && fl->sctx->sid)
261 buf->phys += ((u64)fl->sctx->sid << 32);
262
263 *obuf = buf;
264
265 return 0;
266}
267
268static void fastrpc_context_free(struct kref *ref)
269{
270 struct fastrpc_invoke_ctx *ctx;
271 struct fastrpc_channel_ctx *cctx;
272 int i;
273
274 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
275 cctx = ctx->cctx;
276
277 for (i = 0; i < ctx->nscalars; i++)
278 fastrpc_map_put(ctx->maps[i]);
279
280 if (ctx->buf)
281 fastrpc_buf_free(ctx->buf);
282
283 spin_lock(&cctx->lock);
284 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
285 spin_unlock(&cctx->lock);
286
287 kfree(ctx->maps);
288 kfree(ctx);
289}
290
291static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
292{
293 kref_get(&ctx->refcount);
294}
295
296static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
297{
298 kref_put(&ctx->refcount, fastrpc_context_free);
299}
300
301static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
302 struct fastrpc_user *user, u32 kernel, u32 sc,
303 struct fastrpc_invoke_args *args)
304{
305 struct fastrpc_channel_ctx *cctx = user->cctx;
306 struct fastrpc_invoke_ctx *ctx = NULL;
307 int ret;
308
309 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
310 if (!ctx)
311 return ERR_PTR(-ENOMEM);
312
313 INIT_LIST_HEAD(&ctx->node);
314 ctx->fl = user;
315 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
316 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
317 REMOTE_SCALARS_OUTBUFS(sc);
318
319 if (ctx->nscalars) {
320 ctx->maps = kcalloc(ctx->nscalars,
321 sizeof(*ctx->maps), GFP_KERNEL);
322 if (!ctx->maps) {
323 kfree(ctx);
324 return ERR_PTR(-ENOMEM);
325 }
326 ctx->args = args;
327 }
328
329 ctx->sc = sc;
330 ctx->retval = -1;
331 ctx->pid = current->pid;
332 ctx->tgid = user->tgid;
333 ctx->cctx = cctx;
334 init_completion(&ctx->work);
335
336 spin_lock(&user->lock);
337 list_add_tail(&ctx->node, &user->pending);
338 spin_unlock(&user->lock);
339
340 spin_lock(&cctx->lock);
341 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
342 FASTRPC_CTX_MAX, GFP_ATOMIC);
343 if (ret < 0) {
344 spin_unlock(&cctx->lock);
345 goto err_idr;
346 }
347 ctx->ctxid = ret << 4;
348 spin_unlock(&cctx->lock);
349
350 kref_init(&ctx->refcount);
351
352 return ctx;
353err_idr:
354 spin_lock(&user->lock);
355 list_del(&ctx->node);
356 spin_unlock(&user->lock);
357 kfree(ctx->maps);
358 kfree(ctx);
359
360 return ERR_PTR(ret);
361}
362
363static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
364 u64 len, struct fastrpc_map **ppmap)
365{
366 struct fastrpc_session_ctx *sess = fl->sctx;
367 struct fastrpc_map *map = NULL;
368 int err = 0;
369
370 if (!fastrpc_map_find(fl, fd, ppmap))
371 return 0;
372
373 map = kzalloc(sizeof(*map), GFP_KERNEL);
374 if (!map)
375 return -ENOMEM;
376
377 INIT_LIST_HEAD(&map->node);
378 map->fl = fl;
379 map->fd = fd;
380 map->buf = dma_buf_get(fd);
381 if (!map->buf) {
382 err = -EINVAL;
383 goto get_err;
384 }
385
386 map->attach = dma_buf_attach(map->buf, sess->dev);
387 if (IS_ERR(map->attach)) {
388 dev_err(sess->dev, "Failed to attach dmabuf\n");
389 err = PTR_ERR(map->attach);
390 goto attach_err;
391 }
392
393 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
394 if (IS_ERR(map->table)) {
395 err = PTR_ERR(map->table);
396 goto map_err;
397 }
398
399 map->phys = sg_dma_address(map->table->sgl);
400 map->phys += ((u64)fl->sctx->sid << 32);
401 map->size = len;
402 map->va = sg_virt(map->table->sgl);
403 map->len = len;
404 kref_init(&map->refcount);
405
406 spin_lock(&fl->lock);
407 list_add_tail(&map->node, &fl->maps);
408 spin_unlock(&fl->lock);
409 *ppmap = map;
410
411 return 0;
412
413map_err:
414 dma_buf_detach(map->buf, map->attach);
415attach_err:
416 dma_buf_put(map->buf);
417get_err:
418 kfree(map);
419
420 return err;
421}
422
423/*
424 * Fastrpc payload buffer with metadata looks like:
425 *
426 * >>>>>> START of METADATA <<<<<<<<<
427 * +---------------------------------+
428 * | Arguments |
429 * | type:(struct fastrpc_remote_arg)|
430 * | (0 - N) |
431 * +---------------------------------+
432 * | Invoke Buffer list |
433 * | type:(struct fastrpc_invoke_buf)|
434 * | (0 - N) |
435 * +---------------------------------+
436 * | Page info list |
437 * | type:(struct fastrpc_phy_page) |
438 * | (0 - N) |
439 * +---------------------------------+
440 * | Optional info |
441 * |(can be specific to SoC/Firmware)|
442 * +---------------------------------+
443 * >>>>>>>> END of METADATA <<<<<<<<<
444 * +---------------------------------+
445 * | Inline ARGS |
446 * | (0-N) |
447 * +---------------------------------+
448 */
449
450static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
451{
452 int size = 0;
453
454 size = (sizeof(struct fastrpc_remote_arg) +
455 sizeof(struct fastrpc_invoke_buf) +
456 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
457 sizeof(u64) * FASTRPC_MAX_FDLIST +
458 sizeof(u32) * FASTRPC_MAX_CRCLIST;
459
460 return size;
461}
462
463static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
464{
465 u64 size = 0;
466 int i;
467
468 size = ALIGN(metalen, FASTRPC_ALIGN);
469 for (i = 0; i < ctx->nscalars; i++) {
470 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
471 size = ALIGN(size, FASTRPC_ALIGN);
472 size += ctx->args[i].length;
473 }
474 }
475
476 return size;
477}
478
479static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
480{
481 struct device *dev = ctx->fl->sctx->dev;
482 int i, err;
483
484 for (i = 0; i < ctx->nscalars; ++i) {
485 /* Make sure reserved field is set to 0 */
486 if (ctx->args[i].reserved)
487 return -EINVAL;
488
489 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
490 ctx->args[i].length == 0)
491 continue;
492
493 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
494 ctx->args[i].length, &ctx->maps[i]);
495 if (err) {
496 dev_err(dev, "Error Creating map %d\n", err);
497 return -EINVAL;
498 }
499
500 }
501 return 0;
502}
503
504static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
505{
506 struct device *dev = ctx->fl->sctx->dev;
507 struct fastrpc_remote_arg *rpra;
508 struct fastrpc_invoke_buf *list;
509 struct fastrpc_phy_page *pages;
510 int inbufs, i, err = 0;
511 u64 rlen, pkt_size;
512 uintptr_t args;
513 int metalen;
514
515
516 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
517 metalen = fastrpc_get_meta_size(ctx);
518 pkt_size = fastrpc_get_payload_size(ctx, metalen);
519
520 err = fastrpc_create_maps(ctx);
521 if (err)
522 return err;
523
524 ctx->msg_sz = pkt_size;
525
526 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
527 if (err)
528 return err;
529
530 rpra = ctx->buf->virt;
531 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
532 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
533 sizeof(*rpra));
534 args = (uintptr_t)ctx->buf->virt + metalen;
535 rlen = pkt_size - metalen;
536 ctx->rpra = rpra;
537
538 for (i = 0; i < ctx->nbufs; ++i) {
539 u64 len = ctx->args[i].length;
540
541 rpra[i].pv = 0;
542 rpra[i].len = len;
543 list[i].num = len ? 1 : 0;
544 list[i].pgidx = i;
545
546 if (!len)
547 continue;
548
549 pages[i].size = roundup(len, PAGE_SIZE);
550
551 if (ctx->maps[i]) {
552 rpra[i].pv = (u64) ctx->args[i].ptr;
553 pages[i].addr = ctx->maps[i]->phys;
554 } else {
555 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
556 args = ALIGN(args, FASTRPC_ALIGN);
557 if (rlen < len)
558 goto bail;
559
560 rpra[i].pv = args;
561 pages[i].addr = ctx->buf->phys + (pkt_size - rlen);
562 pages[i].addr = pages[i].addr & PAGE_MASK;
563 args = args + len;
564 rlen -= len;
565 }
566
567 if (i < inbufs && !ctx->maps[i]) {
568 void *dst = (void *)(uintptr_t)rpra[i].pv;
569 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
570
571 if (!kernel) {
572 if (copy_from_user(dst, (void __user *)src,
573 len)) {
574 err = -EFAULT;
575 goto bail;
576 }
577 } else {
578 memcpy(dst, src, len);
579 }
580 }
581 }
582
583 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
584 rpra[i].pv = (u64) ctx->args[i].ptr;
585 rpra[i].len = ctx->args[i].length;
586 list[i].num = ctx->args[i].length ? 1 : 0;
587 list[i].pgidx = i;
588 pages[i].addr = ctx->maps[i]->phys;
589 pages[i].size = ctx->maps[i]->size;
590 }
591
592bail:
593 if (err)
594 dev_err(dev, "Error: get invoke args failed:%d\n", err);
595
596 return err;
597}
598
599static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
600 u32 kernel)
601{
602 struct fastrpc_remote_arg *rpra = ctx->rpra;
603 int i, inbufs;
604
605 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
606
607 for (i = inbufs; i < ctx->nbufs; ++i) {
608 void *src = (void *)(uintptr_t)rpra[i].pv;
609 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
610 u64 len = rpra[i].len;
611
612 if (!kernel) {
613 if (copy_to_user((void __user *)dst, src, len))
614 return -EFAULT;
615 } else {
616 memcpy(dst, src, len);
617 }
618 }
619
620 return 0;
621}
622
623static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
624 struct fastrpc_invoke_ctx *ctx,
625 u32 kernel, uint32_t handle)
626{
627 struct fastrpc_channel_ctx *cctx;
628 struct fastrpc_user *fl = ctx->fl;
629 struct fastrpc_msg *msg = &ctx->msg;
630
631 cctx = fl->cctx;
632 msg->pid = fl->tgid;
633 msg->tid = current->pid;
634
635 if (kernel)
636 msg->pid = 0;
637
638 msg->ctx = ctx->ctxid | fl->pd;
639 msg->handle = handle;
640 msg->sc = ctx->sc;
641 msg->addr = ctx->buf ? ctx->buf->phys : 0;
642 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
643 fastrpc_context_get(ctx);
644
645 return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
646}
647
648static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
649 u32 handle, u32 sc,
650 struct fastrpc_invoke_args *args)
651{
652 struct fastrpc_invoke_ctx *ctx = NULL;
653 int err = 0;
654
655 if (!fl->sctx)
656 return -EINVAL;
657
658 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
659 if (IS_ERR(ctx))
660 return PTR_ERR(ctx);
661
662 if (ctx->nscalars) {
663 err = fastrpc_get_args(kernel, ctx);
664 if (err)
665 goto bail;
666 }
667 /* Send invoke buffer to remote dsp */
668 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
669 if (err)
670 goto bail;
671
672 /* Wait for remote dsp to respond or time out */
673 err = wait_for_completion_interruptible(&ctx->work);
674 if (err)
675 goto bail;
676
677 /* Check the response from remote dsp */
678 err = ctx->retval;
679 if (err)
680 goto bail;
681
682 if (ctx->nscalars) {
683 /* populate all the output buffers with results */
684 err = fastrpc_put_args(ctx, kernel);
685 if (err)
686 goto bail;
687 }
688
689bail:
690 /* We are done with this compute context, remove it from pending list */
691 spin_lock(&fl->lock);
692 list_del(&ctx->node);
693 spin_unlock(&fl->lock);
694 fastrpc_context_put(ctx);
695
696 if (err)
697 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
698
699 return err;
700}
701
Srinivas Kandagatlad73f71c2019-02-08 17:11:26 +0000702static int fastrpc_init_create_process(struct fastrpc_user *fl,
703 char __user *argp)
704{
705 struct fastrpc_init_create init;
706 struct fastrpc_invoke_args *args;
707 struct fastrpc_phy_page pages[1];
708 struct fastrpc_map *map = NULL;
709 struct fastrpc_buf *imem = NULL;
710 int memlen;
711 int err;
712 struct {
713 int pgid;
714 u32 namelen;
715 u32 filelen;
716 u32 pageslen;
717 u32 attrs;
718 u32 siglen;
719 } inbuf;
720 u32 sc;
721
722 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
723 if (!args)
724 return -ENOMEM;
725
726 if (copy_from_user(&init, argp, sizeof(init))) {
727 err = -EFAULT;
728 goto bail;
729 }
730
731 if (init.filelen > INIT_FILELEN_MAX) {
732 err = -EINVAL;
733 goto bail;
734 }
735
736 inbuf.pgid = fl->tgid;
737 inbuf.namelen = strlen(current->comm) + 1;
738 inbuf.filelen = init.filelen;
739 inbuf.pageslen = 1;
740 inbuf.attrs = init.attrs;
741 inbuf.siglen = init.siglen;
742 fl->pd = 1;
743
744 if (init.filelen && init.filefd) {
745 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
746 if (err)
747 goto bail;
748 }
749
750 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
751 1024 * 1024);
752 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
753 &imem);
754 if (err) {
755 fastrpc_map_put(map);
756 goto bail;
757 }
758
759 fl->init_mem = imem;
760 args[0].ptr = (u64)(uintptr_t)&inbuf;
761 args[0].length = sizeof(inbuf);
762 args[0].fd = -1;
763
764 args[1].ptr = (u64)(uintptr_t)current->comm;
765 args[1].length = inbuf.namelen;
766 args[1].fd = -1;
767
768 args[2].ptr = (u64) init.file;
769 args[2].length = inbuf.filelen;
770 args[2].fd = init.filefd;
771
772 pages[0].addr = imem->phys;
773 pages[0].size = imem->size;
774
775 args[3].ptr = (u64)(uintptr_t) pages;
776 args[3].length = 1 * sizeof(*pages);
777 args[3].fd = -1;
778
779 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
780 args[4].length = sizeof(inbuf.attrs);
781 args[4].fd = -1;
782
783 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
784 args[5].length = sizeof(inbuf.siglen);
785 args[5].fd = -1;
786
787 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
788 if (init.attrs)
789 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
790
791 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
792 sc, args);
793
794 if (err) {
795 fastrpc_map_put(map);
796 fastrpc_buf_free(imem);
797 }
798
799bail:
800 kfree(args);
801
802 return err;
803}
804
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000805static struct fastrpc_session_ctx *fastrpc_session_alloc(
806 struct fastrpc_channel_ctx *cctx)
807{
808 struct fastrpc_session_ctx *session = NULL;
809 int i;
810
811 spin_lock(&cctx->lock);
812 for (i = 0; i < cctx->sesscount; i++) {
813 if (!cctx->session[i].used && cctx->session[i].valid) {
814 cctx->session[i].used = true;
815 session = &cctx->session[i];
816 break;
817 }
818 }
819 spin_unlock(&cctx->lock);
820
821 return session;
822}
823
824static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
825 struct fastrpc_session_ctx *session)
826{
827 spin_lock(&cctx->lock);
828 session->used = false;
829 spin_unlock(&cctx->lock);
830}
831
Srinivas Kandagatlad73f71c2019-02-08 17:11:26 +0000832static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
833{
834 struct fastrpc_invoke_args args[1];
835 int tgid = 0;
836 u32 sc;
837
838 tgid = fl->tgid;
839 args[0].ptr = (u64)(uintptr_t) &tgid;
840 args[0].length = sizeof(tgid);
841 args[0].fd = -1;
842 args[0].reserved = 0;
843 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
844
845 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
846 sc, &args[0]);
847}
848
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000849static int fastrpc_device_release(struct inode *inode, struct file *file)
850{
851 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
852 struct fastrpc_channel_ctx *cctx = fl->cctx;
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +0000853 struct fastrpc_invoke_ctx *ctx, *n;
854 struct fastrpc_map *map, *m;
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000855
Srinivas Kandagatlad73f71c2019-02-08 17:11:26 +0000856 fastrpc_release_current_dsp_process(fl);
857
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000858 spin_lock(&cctx->lock);
859 list_del(&fl->user);
860 spin_unlock(&cctx->lock);
861
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +0000862 if (fl->init_mem)
863 fastrpc_buf_free(fl->init_mem);
864
865 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
866 list_del(&ctx->node);
867 fastrpc_context_put(ctx);
868 }
869
870 list_for_each_entry_safe(map, m, &fl->maps, node) {
871 list_del(&map->node);
872 fastrpc_map_put(map);
873 }
874
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000875 fastrpc_session_free(cctx, fl->sctx);
876
877 mutex_destroy(&fl->mutex);
878 kfree(fl);
879 file->private_data = NULL;
880
881 return 0;
882}
883
884static int fastrpc_device_open(struct inode *inode, struct file *filp)
885{
886 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
887 struct fastrpc_user *fl = NULL;
888
889 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
890 if (!fl)
891 return -ENOMEM;
892
893 filp->private_data = fl;
894 spin_lock_init(&fl->lock);
895 mutex_init(&fl->mutex);
896 INIT_LIST_HEAD(&fl->pending);
897 INIT_LIST_HEAD(&fl->maps);
898 INIT_LIST_HEAD(&fl->user);
899 fl->tgid = current->tgid;
900 fl->cctx = cctx;
901 spin_lock(&cctx->lock);
902 list_add_tail(&fl->user, &cctx->users);
903 spin_unlock(&cctx->lock);
904 fl->sctx = fastrpc_session_alloc(cctx);
905
906 return 0;
907}
908
Srinivas Kandagatlad73f71c2019-02-08 17:11:26 +0000909static int fastrpc_init_attach(struct fastrpc_user *fl)
910{
911 struct fastrpc_invoke_args args[1];
912 int tgid = fl->tgid;
913 u32 sc;
914
915 args[0].ptr = (u64)(uintptr_t) &tgid;
916 args[0].length = sizeof(tgid);
917 args[0].fd = -1;
918 args[0].reserved = 0;
919 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
920 fl->pd = 0;
921
922 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
923 sc, &args[0]);
924}
925
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +0000926static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
927{
928 struct fastrpc_invoke_args *args = NULL;
929 struct fastrpc_invoke inv;
930 u32 nscalars;
931 int err;
932
933 if (copy_from_user(&inv, argp, sizeof(inv)))
934 return -EFAULT;
935
936 /* nscalars is truncated here to max supported value */
937 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
938 if (nscalars) {
939 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
940 if (!args)
941 return -ENOMEM;
942
943 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
944 nscalars * sizeof(*args))) {
945 kfree(args);
946 return -EFAULT;
947 }
948 }
949
950 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
951 kfree(args);
952
953 return err;
954}
955
956static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
957 unsigned long arg)
958{
959 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
960 char __user *argp = (char __user *)arg;
961 int err;
962
963 switch (cmd) {
964 case FASTRPC_IOCTL_INVOKE:
965 err = fastrpc_invoke(fl, argp);
966 break;
Srinivas Kandagatlad73f71c2019-02-08 17:11:26 +0000967 case FASTRPC_IOCTL_INIT_ATTACH:
968 err = fastrpc_init_attach(fl);
969 break;
970 case FASTRPC_IOCTL_INIT_CREATE:
971 err = fastrpc_init_create_process(fl, argp);
972 break;
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +0000973 default:
974 err = -ENOTTY;
975 break;
976 }
977
978 return err;
979}
980
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000981static const struct file_operations fastrpc_fops = {
982 .open = fastrpc_device_open,
983 .release = fastrpc_device_release,
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +0000984 .unlocked_ioctl = fastrpc_device_ioctl,
985 .compat_ioctl = fastrpc_device_ioctl,
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +0000986};
987
988static int fastrpc_cb_probe(struct platform_device *pdev)
989{
990 struct fastrpc_channel_ctx *cctx;
991 struct fastrpc_session_ctx *sess;
992 struct device *dev = &pdev->dev;
993 int i, sessions = 0;
994
995 cctx = dev_get_drvdata(dev->parent);
996 if (!cctx)
997 return -EINVAL;
998
999 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1000
1001 spin_lock(&cctx->lock);
1002 sess = &cctx->session[cctx->sesscount];
1003 sess->used = false;
1004 sess->valid = true;
1005 sess->dev = dev;
1006 dev_set_drvdata(dev, sess);
1007
1008 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1009 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1010
1011 if (sessions > 0) {
1012 struct fastrpc_session_ctx *dup_sess;
1013
1014 for (i = 1; i < sessions; i++) {
1015 if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1016 break;
1017 dup_sess = &cctx->session[cctx->sesscount];
1018 memcpy(dup_sess, sess, sizeof(*dup_sess));
1019 }
1020 }
1021 cctx->sesscount++;
1022 spin_unlock(&cctx->lock);
1023 dma_set_mask(dev, DMA_BIT_MASK(32));
1024
1025 return 0;
1026}
1027
1028static int fastrpc_cb_remove(struct platform_device *pdev)
1029{
1030 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1031 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1032 int i;
1033
1034 spin_lock(&cctx->lock);
1035 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1036 if (cctx->session[i].sid == sess->sid) {
1037 cctx->session[i].valid = false;
1038 cctx->sesscount--;
1039 }
1040 }
1041 spin_unlock(&cctx->lock);
1042
1043 return 0;
1044}
1045
1046static const struct of_device_id fastrpc_match_table[] = {
1047 { .compatible = "qcom,fastrpc-compute-cb", },
1048 {}
1049};
1050
1051static struct platform_driver fastrpc_cb_driver = {
1052 .probe = fastrpc_cb_probe,
1053 .remove = fastrpc_cb_remove,
1054 .driver = {
1055 .name = "qcom,fastrpc-cb",
1056 .of_match_table = fastrpc_match_table,
1057 .suppress_bind_attrs = true,
1058 },
1059};
1060
1061static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1062{
1063 struct device *rdev = &rpdev->dev;
1064 struct fastrpc_channel_ctx *data;
1065 int i, err, domain_id = -1;
1066 const char *domain;
1067
1068 data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
1069 if (!data)
1070 return -ENOMEM;
1071
1072 err = of_property_read_string(rdev->of_node, "label", &domain);
1073 if (err) {
1074 dev_info(rdev, "FastRPC Domain not specified in DT\n");
1075 return err;
1076 }
1077
1078 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1079 if (!strcmp(domains[i], domain)) {
1080 domain_id = i;
1081 break;
1082 }
1083 }
1084
1085 if (domain_id < 0) {
1086 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1087 return -EINVAL;
1088 }
1089
1090 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1091 data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
1092 domains[domain_id]);
1093 data->miscdev.fops = &fastrpc_fops;
1094 err = misc_register(&data->miscdev);
1095 if (err)
1096 return err;
1097
1098 dev_set_drvdata(&rpdev->dev, data);
1099 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1100 INIT_LIST_HEAD(&data->users);
1101 spin_lock_init(&data->lock);
1102 idr_init(&data->ctx_idr);
1103 data->domain_id = domain_id;
1104 data->rpdev = rpdev;
1105
1106 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1107}
1108
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +00001109static void fastrpc_notify_users(struct fastrpc_user *user)
1110{
1111 struct fastrpc_invoke_ctx *ctx;
1112
1113 spin_lock(&user->lock);
1114 list_for_each_entry(ctx, &user->pending, node)
1115 complete(&ctx->work);
1116 spin_unlock(&user->lock);
1117}
1118
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +00001119static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1120{
1121 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +00001122 struct fastrpc_user *user;
1123
1124 spin_lock(&cctx->lock);
1125 list_for_each_entry(user, &cctx->users, user)
1126 fastrpc_notify_users(user);
1127 spin_unlock(&cctx->lock);
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +00001128
1129 misc_deregister(&cctx->miscdev);
1130 of_platform_depopulate(&rpdev->dev);
1131 kfree(cctx);
1132}
1133
1134static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1135 int len, void *priv, u32 addr)
1136{
Srinivas Kandagatlac68cfb72019-02-08 17:11:25 +00001137 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1138 struct fastrpc_invoke_rsp *rsp = data;
1139 struct fastrpc_invoke_ctx *ctx;
1140 unsigned long flags;
1141 unsigned long ctxid;
1142
1143 if (len < sizeof(*rsp))
1144 return -EINVAL;
1145
1146 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1147
1148 spin_lock_irqsave(&cctx->lock, flags);
1149 ctx = idr_find(&cctx->ctx_idr, ctxid);
1150 spin_unlock_irqrestore(&cctx->lock, flags);
1151
1152 if (!ctx) {
1153 dev_err(&rpdev->dev, "No context ID matches response\n");
1154 return -ENOENT;
1155 }
1156
1157 ctx->retval = rsp->retval;
1158 complete(&ctx->work);
1159 fastrpc_context_put(ctx);
1160
Srinivas Kandagatlaf6f92792019-02-08 17:11:24 +00001161 return 0;
1162}
1163
1164static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1165 { .compatible = "qcom,fastrpc" },
1166 { },
1167};
1168MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1169
1170static struct rpmsg_driver fastrpc_driver = {
1171 .probe = fastrpc_rpmsg_probe,
1172 .remove = fastrpc_rpmsg_remove,
1173 .callback = fastrpc_rpmsg_callback,
1174 .drv = {
1175 .name = "qcom,fastrpc",
1176 .of_match_table = fastrpc_rpmsg_of_match,
1177 },
1178};
1179
1180static int fastrpc_init(void)
1181{
1182 int ret;
1183
1184 ret = platform_driver_register(&fastrpc_cb_driver);
1185 if (ret < 0) {
1186 pr_err("fastrpc: failed to register cb driver\n");
1187 return ret;
1188 }
1189
1190 ret = register_rpmsg_driver(&fastrpc_driver);
1191 if (ret < 0) {
1192 pr_err("fastrpc: failed to register rpmsg driver\n");
1193 platform_driver_unregister(&fastrpc_cb_driver);
1194 return ret;
1195 }
1196
1197 return 0;
1198}
1199module_init(fastrpc_init);
1200
1201static void fastrpc_exit(void)
1202{
1203 platform_driver_unregister(&fastrpc_cb_driver);
1204 unregister_rpmsg_driver(&fastrpc_driver);
1205}
1206module_exit(fastrpc_exit);
1207
1208MODULE_LICENSE("GPL v2");