blob: 92759d70bfcefc84c02110494b9768c031b0e351 [file] [log] [blame]
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2021, Linaro Limited
4 * Copyright (c) 2016, EPAM Systems
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/arm-smccc.h>
10#include <linux/errno.h>
Jens Wiklander6749e692021-06-15 22:23:54 +020011#include <linux/interrupt.h>
Jens Wiklanderc51a5642021-07-21 16:30:28 +020012#include <linux/io.h>
Jens Wiklander6749e692021-06-15 22:23:54 +020013#include <linux/irqdomain.h>
Jens Wiklander34f3c672021-10-21 14:55:39 +020014#include <linux/mm.h>
Jens Wiklanderc51a5642021-07-21 16:30:28 +020015#include <linux/module.h>
16#include <linux/of.h>
Jens Wiklander6749e692021-06-15 22:23:54 +020017#include <linux/of_irq.h>
Jens Wiklanderc51a5642021-07-21 16:30:28 +020018#include <linux/of_platform.h>
19#include <linux/platform_device.h>
Jens Wiklander6749e692021-06-15 22:23:54 +020020#include <linux/sched.h>
Jens Wiklanderc51a5642021-07-21 16:30:28 +020021#include <linux/slab.h>
22#include <linux/string.h>
23#include <linux/tee_drv.h>
24#include <linux/types.h>
25#include <linux/workqueue.h>
26#include "optee_private.h"
27#include "optee_smc.h"
28#include "optee_rpc_cmd.h"
29#define CREATE_TRACE_POINTS
30#include "optee_trace.h"
31
32/*
33 * This file implement the SMC ABI used when communicating with secure world
34 * OP-TEE OS via raw SMCs.
35 * This file is divided into the following sections:
36 * 1. Convert between struct tee_param and struct optee_msg_param
37 * 2. Low level support functions to register shared memory in secure world
38 * 3. Dynamic shared memory pool based on alloc_pages()
39 * 4. Do a normal scheduled call into secure world
Jens Wiklander6749e692021-06-15 22:23:54 +020040 * 5. Asynchronous notification
41 * 6. Driver initialization.
Jens Wiklanderc51a5642021-07-21 16:30:28 +020042 */
43
44#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
45
46/*
47 * 1. Convert between struct tee_param and struct optee_msg_param
48 *
49 * optee_from_msg_param() and optee_to_msg_param() are the main
50 * functions.
51 */
52
53static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
54 const struct optee_msg_param *mp)
55{
56 struct tee_shm *shm;
57 phys_addr_t pa;
58 int rc;
59
60 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
61 attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
62 p->u.memref.size = mp->u.tmem.size;
63 shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
64 if (!shm) {
65 p->u.memref.shm_offs = 0;
66 p->u.memref.shm = NULL;
67 return 0;
68 }
69
70 rc = tee_shm_get_pa(shm, 0, &pa);
71 if (rc)
72 return rc;
73
74 p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
75 p->u.memref.shm = shm;
76
77 /* Check that the memref is covered by the shm object */
78 if (p->u.memref.size) {
79 size_t o = p->u.memref.shm_offs +
80 p->u.memref.size - 1;
81
82 rc = tee_shm_get_pa(shm, o, NULL);
83 if (rc)
84 return rc;
85 }
86
87 return 0;
88}
89
90static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
91 const struct optee_msg_param *mp)
92{
93 struct tee_shm *shm;
94
95 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
96 attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
97 p->u.memref.size = mp->u.rmem.size;
98 shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
99
100 if (shm) {
101 p->u.memref.shm_offs = mp->u.rmem.offs;
102 p->u.memref.shm = shm;
103 } else {
104 p->u.memref.shm_offs = 0;
105 p->u.memref.shm = NULL;
106 }
107}
108
109/**
110 * optee_from_msg_param() - convert from OPTEE_MSG parameters to
111 * struct tee_param
112 * @optee: main service struct
113 * @params: subsystem internal parameter representation
114 * @num_params: number of elements in the parameter arrays
115 * @msg_params: OPTEE_MSG parameters
116 * Returns 0 on success or <0 on failure
117 */
118static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
119 size_t num_params,
120 const struct optee_msg_param *msg_params)
121{
122 int rc;
123 size_t n;
124
125 for (n = 0; n < num_params; n++) {
126 struct tee_param *p = params + n;
127 const struct optee_msg_param *mp = msg_params + n;
128 u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
129
130 switch (attr) {
131 case OPTEE_MSG_ATTR_TYPE_NONE:
132 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
133 memset(&p->u, 0, sizeof(p->u));
134 break;
135 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
136 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
137 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
138 optee_from_msg_param_value(p, attr, mp);
139 break;
140 case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
141 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
142 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
143 rc = from_msg_param_tmp_mem(p, attr, mp);
144 if (rc)
145 return rc;
146 break;
147 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
148 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
149 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
150 from_msg_param_reg_mem(p, attr, mp);
151 break;
152
153 default:
154 return -EINVAL;
155 }
156 }
157 return 0;
158}
159
160static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
161 const struct tee_param *p)
162{
163 int rc;
164 phys_addr_t pa;
165
166 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
167 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
168
169 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
170 mp->u.tmem.size = p->u.memref.size;
171
172 if (!p->u.memref.shm) {
173 mp->u.tmem.buf_ptr = 0;
174 return 0;
175 }
176
177 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
178 if (rc)
179 return rc;
180
181 mp->u.tmem.buf_ptr = pa;
182 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
183 OPTEE_MSG_ATTR_CACHE_SHIFT;
184
185 return 0;
186}
187
188static int to_msg_param_reg_mem(struct optee_msg_param *mp,
189 const struct tee_param *p)
190{
191 mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
192 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
193
194 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
195 mp->u.rmem.size = p->u.memref.size;
196 mp->u.rmem.offs = p->u.memref.shm_offs;
197 return 0;
198}
199
200/**
201 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
202 * @optee: main service struct
203 * @msg_params: OPTEE_MSG parameters
204 * @num_params: number of elements in the parameter arrays
205 * @params: subsystem itnernal parameter representation
206 * Returns 0 on success or <0 on failure
207 */
208static int optee_to_msg_param(struct optee *optee,
209 struct optee_msg_param *msg_params,
210 size_t num_params, const struct tee_param *params)
211{
212 int rc;
213 size_t n;
214
215 for (n = 0; n < num_params; n++) {
216 const struct tee_param *p = params + n;
217 struct optee_msg_param *mp = msg_params + n;
218
219 switch (p->attr) {
220 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
221 mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
222 memset(&mp->u, 0, sizeof(mp->u));
223 break;
224 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
225 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
226 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
227 optee_to_msg_param_value(mp, p);
228 break;
229 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
230 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
231 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
232 if (tee_shm_is_registered(p->u.memref.shm))
233 rc = to_msg_param_reg_mem(mp, p);
234 else
235 rc = to_msg_param_tmp_mem(mp, p);
236 if (rc)
237 return rc;
238 break;
239 default:
240 return -EINVAL;
241 }
242 }
243 return 0;
244}
245
246/*
247 * 2. Low level support functions to register shared memory in secure world
248 *
249 * Functions to enable/disable shared memory caching in secure world, that
250 * is, lazy freeing of previously allocated shared memory. Freeing is
251 * performed when a request has been compled.
252 *
253 * Functions to register and unregister shared memory both for normal
254 * clients and for tee-supplicant.
255 */
256
257/**
258 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
259 * in OP-TEE
260 * @optee: main service struct
261 */
262static void optee_enable_shm_cache(struct optee *optee)
263{
264 struct optee_call_waiter w;
265
266 /* We need to retry until secure world isn't busy. */
267 optee_cq_wait_init(&optee->call_queue, &w);
268 while (true) {
269 struct arm_smccc_res res;
270
271 optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
272 0, 0, 0, 0, 0, 0, 0, &res);
273 if (res.a0 == OPTEE_SMC_RETURN_OK)
274 break;
275 optee_cq_wait_for_completion(&optee->call_queue, &w);
276 }
277 optee_cq_wait_final(&optee->call_queue, &w);
278}
279
280/**
281 * __optee_disable_shm_cache() - Disables caching of some shared memory
282 * allocation in OP-TEE
283 * @optee: main service struct
284 * @is_mapped: true if the cached shared memory addresses were mapped by this
285 * kernel, are safe to dereference, and should be freed
286 */
287static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
288{
289 struct optee_call_waiter w;
290
291 /* We need to retry until secure world isn't busy. */
292 optee_cq_wait_init(&optee->call_queue, &w);
293 while (true) {
294 union {
295 struct arm_smccc_res smccc;
296 struct optee_smc_disable_shm_cache_result result;
297 } res;
298
299 optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
300 0, 0, 0, 0, 0, 0, 0, &res.smccc);
301 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
302 break; /* All shm's freed */
303 if (res.result.status == OPTEE_SMC_RETURN_OK) {
304 struct tee_shm *shm;
305
306 /*
307 * Shared memory references that were not mapped by
308 * this kernel must be ignored to prevent a crash.
309 */
310 if (!is_mapped)
311 continue;
312
313 shm = reg_pair_to_ptr(res.result.shm_upper32,
314 res.result.shm_lower32);
315 tee_shm_free(shm);
316 } else {
317 optee_cq_wait_for_completion(&optee->call_queue, &w);
318 }
319 }
320 optee_cq_wait_final(&optee->call_queue, &w);
321}
322
323/**
324 * optee_disable_shm_cache() - Disables caching of mapped shared memory
325 * allocations in OP-TEE
326 * @optee: main service struct
327 */
328static void optee_disable_shm_cache(struct optee *optee)
329{
330 return __optee_disable_shm_cache(optee, true);
331}
332
333/**
334 * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
335 * allocations in OP-TEE which are not
336 * currently mapped
337 * @optee: main service struct
338 */
339static void optee_disable_unmapped_shm_cache(struct optee *optee)
340{
341 return __optee_disable_shm_cache(optee, false);
342}
343
344#define PAGELIST_ENTRIES_PER_PAGE \
345 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
346
347/*
348 * The final entry in each pagelist page is a pointer to the next
349 * pagelist page.
350 */
351static size_t get_pages_list_size(size_t num_entries)
352{
353 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
354
355 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
356}
357
358static u64 *optee_allocate_pages_list(size_t num_entries)
359{
360 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
361}
362
363static void optee_free_pages_list(void *list, size_t num_entries)
364{
365 free_pages_exact(list, get_pages_list_size(num_entries));
366}
367
368/**
369 * optee_fill_pages_list() - write list of user pages to given shared
370 * buffer.
371 *
372 * @dst: page-aligned buffer where list of pages will be stored
373 * @pages: array of pages that represents shared buffer
374 * @num_pages: number of entries in @pages
375 * @page_offset: offset of user buffer from page start
376 *
377 * @dst should be big enough to hold list of user page addresses and
378 * links to the next pages of buffer
379 */
380static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
381 size_t page_offset)
382{
383 int n = 0;
384 phys_addr_t optee_page;
385 /*
386 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
387 * for details.
388 */
389 struct {
390 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
391 u64 next_page_data;
392 } *pages_data;
393
394 /*
395 * Currently OP-TEE uses 4k page size and it does not looks
396 * like this will change in the future. On other hand, there are
397 * no know ARM architectures with page size < 4k.
398 * Thus the next built assert looks redundant. But the following
399 * code heavily relies on this assumption, so it is better be
400 * safe than sorry.
401 */
402 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
403
404 pages_data = (void *)dst;
405 /*
406 * If linux page is bigger than 4k, and user buffer offset is
407 * larger than 4k/8k/12k/etc this will skip first 4k pages,
408 * because they bear no value data for OP-TEE.
409 */
410 optee_page = page_to_phys(*pages) +
411 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
412
413 while (true) {
414 pages_data->pages_list[n++] = optee_page;
415
416 if (n == PAGELIST_ENTRIES_PER_PAGE) {
417 pages_data->next_page_data =
418 virt_to_phys(pages_data + 1);
419 pages_data++;
420 n = 0;
421 }
422
423 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
424 if (!(optee_page & ~PAGE_MASK)) {
425 if (!--num_pages)
426 break;
427 pages++;
428 optee_page = page_to_phys(*pages);
429 }
430 }
431}
432
433static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
434 struct page **pages, size_t num_pages,
435 unsigned long start)
436{
437 struct optee *optee = tee_get_drvdata(ctx->teedev);
438 struct optee_msg_arg *msg_arg;
439 struct tee_shm *shm_arg;
440 u64 *pages_list;
441 int rc;
442
443 if (!num_pages)
444 return -EINVAL;
445
446 rc = optee_check_mem_type(start, num_pages);
447 if (rc)
448 return rc;
449
450 pages_list = optee_allocate_pages_list(num_pages);
451 if (!pages_list)
452 return -ENOMEM;
453
454 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
455 if (IS_ERR(shm_arg)) {
456 rc = PTR_ERR(shm_arg);
457 goto out;
458 }
459
460 optee_fill_pages_list(pages_list, pages, num_pages,
461 tee_shm_get_page_offset(shm));
462
463 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
464 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
465 OPTEE_MSG_ATTR_NONCONTIG;
466 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
467 msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
468 /*
469 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
470 * store buffer offset from 4k page, as described in OP-TEE ABI.
471 */
472 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
473 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
474
475 if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
476 msg_arg->ret != TEEC_SUCCESS)
477 rc = -EINVAL;
478
479 tee_shm_free(shm_arg);
480out:
481 optee_free_pages_list(pages_list, num_pages);
482 return rc;
483}
484
485static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
486{
487 struct optee *optee = tee_get_drvdata(ctx->teedev);
488 struct optee_msg_arg *msg_arg;
489 struct tee_shm *shm_arg;
490 int rc = 0;
491
492 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
493 if (IS_ERR(shm_arg))
494 return PTR_ERR(shm_arg);
495
496 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
497
498 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
499 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
500
501 if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
502 msg_arg->ret != TEEC_SUCCESS)
503 rc = -EINVAL;
504 tee_shm_free(shm_arg);
505 return rc;
506}
507
508static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
509 struct page **pages, size_t num_pages,
510 unsigned long start)
511{
512 /*
513 * We don't want to register supplicant memory in OP-TEE.
514 * Instead information about it will be passed in RPC code.
515 */
516 return optee_check_mem_type(start, num_pages);
517}
518
519static int optee_shm_unregister_supp(struct tee_context *ctx,
520 struct tee_shm *shm)
521{
522 return 0;
523}
524
525/*
526 * 3. Dynamic shared memory pool based on alloc_pages()
527 *
528 * Implements an OP-TEE specific shared memory pool which is used
529 * when dynamic shared memory is supported by secure world.
530 *
531 * The main function is optee_shm_pool_alloc_pages().
532 */
533
534static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
535 struct tee_shm *shm, size_t size)
536{
537 /*
538 * Shared memory private to the OP-TEE driver doesn't need
539 * to be registered with OP-TEE.
540 */
541 if (shm->flags & TEE_SHM_PRIV)
542 return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
543
544 return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
545}
546
547static void pool_op_free(struct tee_shm_pool_mgr *poolm,
548 struct tee_shm *shm)
549{
550 if (!(shm->flags & TEE_SHM_PRIV))
551 optee_shm_unregister(shm->ctx, shm);
552
553 free_pages((unsigned long)shm->kaddr, get_order(shm->size));
554 shm->kaddr = NULL;
555}
556
557static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
558{
559 kfree(poolm);
560}
561
562static const struct tee_shm_pool_mgr_ops pool_ops = {
563 .alloc = pool_op_alloc,
564 .free = pool_op_free,
565 .destroy_poolmgr = pool_op_destroy_poolmgr,
566};
567
568/**
569 * optee_shm_pool_alloc_pages() - create page-based allocator pool
570 *
571 * This pool is used when OP-TEE supports dymanic SHM. In this case
572 * command buffers and such are allocated from kernel's own memory.
573 */
574static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
575{
576 struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
577
578 if (!mgr)
579 return ERR_PTR(-ENOMEM);
580
581 mgr->ops = &pool_ops;
582
583 return mgr;
584}
585
586/*
587 * 4. Do a normal scheduled call into secure world
588 *
589 * The function optee_smc_do_call_with_arg() performs a normal scheduled
590 * call into secure world. During this call may normal world request help
591 * from normal world using RPCs, Remote Procedure Calls. This includes
592 * delivery of non-secure interrupts to for instance allow rescheduling of
593 * the current task.
594 */
595
596static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
597 struct optee_msg_arg *arg)
598{
599 struct tee_shm *shm;
600
601 arg->ret_origin = TEEC_ORIGIN_COMMS;
602
603 if (arg->num_params != 1 ||
604 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
605 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
606 return;
607 }
608
609 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
610 switch (arg->params[0].u.value.a) {
611 case OPTEE_RPC_SHM_TYPE_APPL:
612 optee_rpc_cmd_free_suppl(ctx, shm);
613 break;
614 case OPTEE_RPC_SHM_TYPE_KERNEL:
615 tee_shm_free(shm);
616 break;
617 default:
618 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
619 }
620 arg->ret = TEEC_SUCCESS;
621}
622
623static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
624 struct optee_msg_arg *arg,
625 struct optee_call_ctx *call_ctx)
626{
627 phys_addr_t pa;
628 struct tee_shm *shm;
629 size_t sz;
630 size_t n;
631
632 arg->ret_origin = TEEC_ORIGIN_COMMS;
633
634 if (!arg->num_params ||
635 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
636 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
637 return;
638 }
639
640 for (n = 1; n < arg->num_params; n++) {
641 if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
642 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
643 return;
644 }
645 }
646
647 sz = arg->params[0].u.value.b;
648 switch (arg->params[0].u.value.a) {
649 case OPTEE_RPC_SHM_TYPE_APPL:
650 shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
651 break;
652 case OPTEE_RPC_SHM_TYPE_KERNEL:
653 shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
654 break;
655 default:
656 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
657 return;
658 }
659
660 if (IS_ERR(shm)) {
661 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
662 return;
663 }
664
665 if (tee_shm_get_pa(shm, 0, &pa)) {
666 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
667 goto bad;
668 }
669
670 sz = tee_shm_get_size(shm);
671
672 if (tee_shm_is_registered(shm)) {
673 struct page **pages;
674 u64 *pages_list;
675 size_t page_num;
676
677 pages = tee_shm_get_pages(shm, &page_num);
678 if (!pages || !page_num) {
679 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
680 goto bad;
681 }
682
683 pages_list = optee_allocate_pages_list(page_num);
684 if (!pages_list) {
685 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
686 goto bad;
687 }
688
689 call_ctx->pages_list = pages_list;
690 call_ctx->num_entries = page_num;
691
692 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
693 OPTEE_MSG_ATTR_NONCONTIG;
694 /*
695 * In the least bits of u.tmem.buf_ptr we store buffer offset
696 * from 4k page, as described in OP-TEE ABI.
697 */
698 arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
699 (tee_shm_get_page_offset(shm) &
700 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
701 arg->params[0].u.tmem.size = tee_shm_get_size(shm);
702 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
703
704 optee_fill_pages_list(pages_list, pages, page_num,
705 tee_shm_get_page_offset(shm));
706 } else {
707 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
708 arg->params[0].u.tmem.buf_ptr = pa;
709 arg->params[0].u.tmem.size = sz;
710 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
711 }
712
713 arg->ret = TEEC_SUCCESS;
714 return;
715bad:
716 tee_shm_free(shm);
717}
718
719static void free_pages_list(struct optee_call_ctx *call_ctx)
720{
721 if (call_ctx->pages_list) {
722 optee_free_pages_list(call_ctx->pages_list,
723 call_ctx->num_entries);
724 call_ctx->pages_list = NULL;
725 call_ctx->num_entries = 0;
726 }
727}
728
729static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
730{
731 free_pages_list(call_ctx);
732}
733
734static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
735 struct tee_shm *shm,
736 struct optee_call_ctx *call_ctx)
737{
738 struct optee_msg_arg *arg;
739
740 arg = tee_shm_get_va(shm, 0);
741 if (IS_ERR(arg)) {
742 pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
743 return;
744 }
745
746 switch (arg->cmd) {
747 case OPTEE_RPC_CMD_SHM_ALLOC:
748 free_pages_list(call_ctx);
749 handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
750 break;
751 case OPTEE_RPC_CMD_SHM_FREE:
752 handle_rpc_func_cmd_shm_free(ctx, arg);
753 break;
754 default:
755 optee_rpc_cmd(ctx, optee, arg);
756 }
757}
758
759/**
760 * optee_handle_rpc() - handle RPC from secure world
761 * @ctx: context doing the RPC
762 * @param: value of registers for the RPC
763 * @call_ctx: call context. Preserved during one OP-TEE invocation
764 *
765 * Result of RPC is written back into @param.
766 */
767static void optee_handle_rpc(struct tee_context *ctx,
768 struct optee_rpc_param *param,
769 struct optee_call_ctx *call_ctx)
770{
771 struct tee_device *teedev = ctx->teedev;
772 struct optee *optee = tee_get_drvdata(teedev);
773 struct tee_shm *shm;
774 phys_addr_t pa;
775
776 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
777 case OPTEE_SMC_RPC_FUNC_ALLOC:
778 shm = tee_shm_alloc(ctx, param->a1,
779 TEE_SHM_MAPPED | TEE_SHM_PRIV);
780 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
781 reg_pair_from_64(&param->a1, &param->a2, pa);
782 reg_pair_from_64(&param->a4, &param->a5,
783 (unsigned long)shm);
784 } else {
785 param->a1 = 0;
786 param->a2 = 0;
787 param->a4 = 0;
788 param->a5 = 0;
789 }
790 break;
791 case OPTEE_SMC_RPC_FUNC_FREE:
792 shm = reg_pair_to_ptr(param->a1, param->a2);
793 tee_shm_free(shm);
794 break;
795 case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
796 /*
797 * A foreign interrupt was raised while secure world was
798 * executing, since they are handled in Linux a dummy RPC is
799 * performed to let Linux take the interrupt through the normal
800 * vector.
801 */
802 break;
803 case OPTEE_SMC_RPC_FUNC_CMD:
804 shm = reg_pair_to_ptr(param->a1, param->a2);
805 handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
806 break;
807 default:
808 pr_warn("Unknown RPC func 0x%x\n",
809 (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
810 break;
811 }
812
813 param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
814}
815
816/**
817 * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
818 * @ctx: calling context
819 * @arg: shared memory holding the message to pass to secure world
820 *
821 * Does and SMC to OP-TEE in secure world and handles eventual resulting
822 * Remote Procedure Calls (RPC) from OP-TEE.
823 *
824 * Returns return code from secure world, 0 is OK
825 */
826static int optee_smc_do_call_with_arg(struct tee_context *ctx,
827 struct tee_shm *arg)
828{
829 struct optee *optee = tee_get_drvdata(ctx->teedev);
830 struct optee_call_waiter w;
831 struct optee_rpc_param param = { };
832 struct optee_call_ctx call_ctx = { };
833 phys_addr_t parg;
834 int rc;
835
836 rc = tee_shm_get_pa(arg, 0, &parg);
837 if (rc)
838 return rc;
839
840 param.a0 = OPTEE_SMC_CALL_WITH_ARG;
841 reg_pair_from_64(&param.a1, &param.a2, parg);
842 /* Initialize waiter */
843 optee_cq_wait_init(&optee->call_queue, &w);
844 while (true) {
845 struct arm_smccc_res res;
846
847 trace_optee_invoke_fn_begin(&param);
848 optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
849 param.a4, param.a5, param.a6, param.a7,
850 &res);
851 trace_optee_invoke_fn_end(&param, &res);
852
853 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
854 /*
855 * Out of threads in secure world, wait for a thread
856 * become available.
857 */
858 optee_cq_wait_for_completion(&optee->call_queue, &w);
859 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
860 cond_resched();
861 param.a0 = res.a0;
862 param.a1 = res.a1;
863 param.a2 = res.a2;
864 param.a3 = res.a3;
865 optee_handle_rpc(ctx, &param, &call_ctx);
866 } else {
867 rc = res.a0;
868 break;
869 }
870 }
871
872 optee_rpc_finalize_call(&call_ctx);
873 /*
874 * We're done with our thread in secure world, if there's any
875 * thread waiters wake up one.
876 */
877 optee_cq_wait_final(&optee->call_queue, &w);
878
879 return rc;
880}
881
Jens Wiklander6749e692021-06-15 22:23:54 +0200882static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
883{
884 struct optee_msg_arg *msg_arg;
885 struct tee_shm *shm;
886
887 shm = optee_get_msg_arg(ctx, 0, &msg_arg);
888 if (IS_ERR(shm))
889 return PTR_ERR(shm);
890
891 msg_arg->cmd = cmd;
892 optee_smc_do_call_with_arg(ctx, shm);
893
894 tee_shm_free(shm);
895 return 0;
896}
897
898static int optee_smc_do_bottom_half(struct tee_context *ctx)
899{
900 return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
901}
902
903static int optee_smc_stop_async_notif(struct tee_context *ctx)
904{
905 return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
906}
907
Jens Wiklanderc51a5642021-07-21 16:30:28 +0200908/*
Jens Wiklander6749e692021-06-15 22:23:54 +0200909 * 5. Asynchronous notification
910 */
911
912static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
913 bool *value_pending)
914{
915 struct arm_smccc_res res;
916
917 invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
918
919 if (res.a0)
920 return 0;
921 *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
922 *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
923 return res.a1;
924}
925
926static irqreturn_t notif_irq_handler(int irq, void *dev_id)
927{
928 struct optee *optee = dev_id;
929 bool do_bottom_half = false;
930 bool value_valid;
931 bool value_pending;
932 u32 value;
933
934 do {
935 value = get_async_notif_value(optee->smc.invoke_fn,
936 &value_valid, &value_pending);
937 if (!value_valid)
938 break;
939
940 if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
941 do_bottom_half = true;
942 else
943 optee_notif_send(optee, value);
944 } while (value_pending);
945
946 if (do_bottom_half)
947 return IRQ_WAKE_THREAD;
948 return IRQ_HANDLED;
949}
950
951static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
952{
953 struct optee *optee = dev_id;
954
955 optee_smc_do_bottom_half(optee->notif.ctx);
956
957 return IRQ_HANDLED;
958}
959
960static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
961{
962 struct tee_context *ctx;
963 int rc;
964
965 ctx = teedev_open(optee->teedev);
966 if (IS_ERR(ctx))
967 return PTR_ERR(ctx);
968
969 optee->notif.ctx = ctx;
970 rc = request_threaded_irq(irq, notif_irq_handler,
971 notif_irq_thread_fn,
972 0, "optee_notification", optee);
973 if (rc)
974 goto err_close_ctx;
975
976 optee->smc.notif_irq = irq;
977
978 return 0;
979
980err_close_ctx:
981 teedev_close_context(optee->notif.ctx);
982 optee->notif.ctx = NULL;
983
984 return rc;
985}
986
987static void optee_smc_notif_uninit_irq(struct optee *optee)
988{
989 if (optee->notif.ctx) {
990 optee_smc_stop_async_notif(optee->notif.ctx);
991 if (optee->smc.notif_irq) {
992 free_irq(optee->smc.notif_irq, optee);
993 irq_dispose_mapping(optee->smc.notif_irq);
994 }
995
996 /*
997 * The thread normally working with optee->notif.ctx was
998 * stopped with free_irq() above.
999 *
1000 * Note we're not using teedev_close_context() or
1001 * tee_client_close_context() since we have already called
1002 * tee_device_put() while initializing to avoid a circular
1003 * reference counting.
1004 */
1005 teedev_close_context(optee->notif.ctx);
1006 }
1007}
1008
1009/*
1010 * 6. Driver initialization
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001011 *
Jens Wiklander6749e692021-06-15 22:23:54 +02001012 * During driver initialization is secure world probed to find out which
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001013 * features it supports so the driver can be initialized with a matching
1014 * configuration. This involves for instance support for dynamic shared
1015 * memory instead of a static memory carvout.
1016 */
1017
1018static void optee_get_version(struct tee_device *teedev,
1019 struct tee_ioctl_version_data *vers)
1020{
1021 struct tee_ioctl_version_data v = {
1022 .impl_id = TEE_IMPL_ID_OPTEE,
1023 .impl_caps = TEE_OPTEE_CAP_TZ,
1024 .gen_caps = TEE_GEN_CAP_GP,
1025 };
1026 struct optee *optee = tee_get_drvdata(teedev);
1027
1028 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1029 v.gen_caps |= TEE_GEN_CAP_REG_MEM;
1030 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
1031 v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
1032 *vers = v;
1033}
1034
1035static int optee_smc_open(struct tee_context *ctx)
1036{
1037 struct optee *optee = tee_get_drvdata(ctx->teedev);
1038 u32 sec_caps = optee->smc.sec_caps;
1039
1040 return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
1041}
1042
1043static const struct tee_driver_ops optee_clnt_ops = {
1044 .get_version = optee_get_version,
1045 .open = optee_smc_open,
1046 .release = optee_release,
1047 .open_session = optee_open_session,
1048 .close_session = optee_close_session,
1049 .invoke_func = optee_invoke_func,
1050 .cancel_req = optee_cancel_req,
1051 .shm_register = optee_shm_register,
1052 .shm_unregister = optee_shm_unregister,
1053};
1054
1055static const struct tee_desc optee_clnt_desc = {
1056 .name = DRIVER_NAME "-clnt",
1057 .ops = &optee_clnt_ops,
1058 .owner = THIS_MODULE,
1059};
1060
1061static const struct tee_driver_ops optee_supp_ops = {
1062 .get_version = optee_get_version,
1063 .open = optee_smc_open,
1064 .release = optee_release_supp,
1065 .supp_recv = optee_supp_recv,
1066 .supp_send = optee_supp_send,
1067 .shm_register = optee_shm_register_supp,
1068 .shm_unregister = optee_shm_unregister_supp,
1069};
1070
1071static const struct tee_desc optee_supp_desc = {
1072 .name = DRIVER_NAME "-supp",
1073 .ops = &optee_supp_ops,
1074 .owner = THIS_MODULE,
1075 .flags = TEE_DESC_PRIVILEGED,
1076};
1077
1078static const struct optee_ops optee_ops = {
1079 .do_call_with_arg = optee_smc_do_call_with_arg,
1080 .to_msg_param = optee_to_msg_param,
1081 .from_msg_param = optee_from_msg_param,
1082};
1083
Jens Wiklander6749e692021-06-15 22:23:54 +02001084static int enable_async_notif(optee_invoke_fn *invoke_fn)
1085{
1086 struct arm_smccc_res res;
1087
1088 invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
1089
1090 if (res.a0)
1091 return -EINVAL;
1092 return 0;
1093}
1094
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001095static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
1096{
1097 struct arm_smccc_res res;
1098
1099 invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1100
1101 if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
1102 res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
1103 return true;
1104 return false;
1105}
1106
1107static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
1108{
1109 union {
1110 struct arm_smccc_res smccc;
1111 struct optee_smc_call_get_os_revision_result result;
1112 } res = {
1113 .result = {
1114 .build_id = 0
1115 }
1116 };
1117
1118 invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
1119 &res.smccc);
1120
1121 if (res.result.build_id)
1122 pr_info("revision %lu.%lu (%08lx)", res.result.major,
1123 res.result.minor, res.result.build_id);
1124 else
1125 pr_info("revision %lu.%lu", res.result.major, res.result.minor);
1126}
1127
1128static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
1129{
1130 union {
1131 struct arm_smccc_res smccc;
1132 struct optee_smc_calls_revision_result result;
1133 } res;
1134
1135 invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1136
1137 if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
1138 (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
1139 return true;
1140 return false;
1141}
1142
1143static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
Jens Wiklander6749e692021-06-15 22:23:54 +02001144 u32 *sec_caps, u32 *max_notif_value)
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001145{
1146 union {
1147 struct arm_smccc_res smccc;
1148 struct optee_smc_exchange_capabilities_result result;
1149 } res;
1150 u32 a1 = 0;
1151
1152 /*
1153 * TODO This isn't enough to tell if it's UP system (from kernel
1154 * point of view) or not, is_smp() returns the information
1155 * needed, but can't be called directly from here.
1156 */
1157 if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1158 a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1159
1160 invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1161 &res.smccc);
1162
1163 if (res.result.status != OPTEE_SMC_RETURN_OK)
1164 return false;
1165
1166 *sec_caps = res.result.capabilities;
Jens Wiklander6749e692021-06-15 22:23:54 +02001167 if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
1168 *max_notif_value = res.result.max_notif_value;
1169 else
1170 *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
1171
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001172 return true;
1173}
1174
1175static struct tee_shm_pool *optee_config_dyn_shm(void)
1176{
1177 struct tee_shm_pool_mgr *priv_mgr;
1178 struct tee_shm_pool_mgr *dmabuf_mgr;
1179 void *rc;
1180
1181 rc = optee_shm_pool_alloc_pages();
1182 if (IS_ERR(rc))
1183 return rc;
1184 priv_mgr = rc;
1185
1186 rc = optee_shm_pool_alloc_pages();
1187 if (IS_ERR(rc)) {
1188 tee_shm_pool_mgr_destroy(priv_mgr);
1189 return rc;
1190 }
1191 dmabuf_mgr = rc;
1192
1193 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1194 if (IS_ERR(rc)) {
1195 tee_shm_pool_mgr_destroy(priv_mgr);
1196 tee_shm_pool_mgr_destroy(dmabuf_mgr);
1197 }
1198
1199 return rc;
1200}
1201
1202static struct tee_shm_pool *
1203optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1204{
1205 union {
1206 struct arm_smccc_res smccc;
1207 struct optee_smc_get_shm_config_result result;
1208 } res;
1209 unsigned long vaddr;
1210 phys_addr_t paddr;
1211 size_t size;
1212 phys_addr_t begin;
1213 phys_addr_t end;
1214 void *va;
1215 struct tee_shm_pool_mgr *priv_mgr;
1216 struct tee_shm_pool_mgr *dmabuf_mgr;
1217 void *rc;
1218 const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
1219
1220 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1221 if (res.result.status != OPTEE_SMC_RETURN_OK) {
1222 pr_err("static shm service not available\n");
1223 return ERR_PTR(-ENOENT);
1224 }
1225
1226 if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1227 pr_err("only normal cached shared memory supported\n");
1228 return ERR_PTR(-EINVAL);
1229 }
1230
1231 begin = roundup(res.result.start, PAGE_SIZE);
1232 end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1233 paddr = begin;
1234 size = end - begin;
1235
1236 if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
1237 pr_err("too small shared memory area\n");
1238 return ERR_PTR(-EINVAL);
1239 }
1240
1241 va = memremap(paddr, size, MEMREMAP_WB);
1242 if (!va) {
1243 pr_err("shared memory ioremap failed\n");
1244 return ERR_PTR(-EINVAL);
1245 }
1246 vaddr = (unsigned long)va;
1247
1248 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
1249 3 /* 8 bytes aligned */);
1250 if (IS_ERR(rc))
1251 goto err_memunmap;
1252 priv_mgr = rc;
1253
1254 vaddr += sz;
1255 paddr += sz;
1256 size -= sz;
1257
1258 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
1259 if (IS_ERR(rc))
1260 goto err_free_priv_mgr;
1261 dmabuf_mgr = rc;
1262
1263 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1264 if (IS_ERR(rc))
1265 goto err_free_dmabuf_mgr;
1266
1267 *memremaped_shm = va;
1268
1269 return rc;
1270
1271err_free_dmabuf_mgr:
1272 tee_shm_pool_mgr_destroy(dmabuf_mgr);
1273err_free_priv_mgr:
1274 tee_shm_pool_mgr_destroy(priv_mgr);
1275err_memunmap:
1276 memunmap(va);
1277 return rc;
1278}
1279
1280/* Simple wrapper functions to be able to use a function pointer */
1281static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1282 unsigned long a2, unsigned long a3,
1283 unsigned long a4, unsigned long a5,
1284 unsigned long a6, unsigned long a7,
1285 struct arm_smccc_res *res)
1286{
1287 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1288}
1289
1290static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1291 unsigned long a2, unsigned long a3,
1292 unsigned long a4, unsigned long a5,
1293 unsigned long a6, unsigned long a7,
1294 struct arm_smccc_res *res)
1295{
1296 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1297}
1298
1299static optee_invoke_fn *get_invoke_func(struct device *dev)
1300{
1301 const char *method;
1302
1303 pr_info("probing for conduit method.\n");
1304
1305 if (device_property_read_string(dev, "method", &method)) {
1306 pr_warn("missing \"method\" property\n");
1307 return ERR_PTR(-ENXIO);
1308 }
1309
1310 if (!strcmp("hvc", method))
1311 return optee_smccc_hvc;
1312 else if (!strcmp("smc", method))
1313 return optee_smccc_smc;
1314
1315 pr_warn("invalid \"method\" property: %s\n", method);
1316 return ERR_PTR(-EINVAL);
1317}
1318
1319/* optee_remove - Device Removal Routine
1320 * @pdev: platform device information struct
1321 *
1322 * optee_remove is called by platform subsystem to alert the driver
1323 * that it should release the device
1324 */
1325static int optee_smc_remove(struct platform_device *pdev)
1326{
1327 struct optee *optee = platform_get_drvdata(pdev);
1328
1329 /*
1330 * Ask OP-TEE to free all cached shared memory objects to decrease
1331 * reference counters and also avoid wild pointers in secure world
1332 * into the old shared memory range.
1333 */
1334 optee_disable_shm_cache(optee);
1335
Jens Wiklander6749e692021-06-15 22:23:54 +02001336 optee_smc_notif_uninit_irq(optee);
1337
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001338 optee_remove_common(optee);
1339
1340 if (optee->smc.memremaped_shm)
1341 memunmap(optee->smc.memremaped_shm);
1342
1343 kfree(optee);
1344
1345 return 0;
1346}
1347
1348/* optee_shutdown - Device Removal Routine
1349 * @pdev: platform device information struct
1350 *
1351 * platform_shutdown is called by the platform subsystem to alert
1352 * the driver that a shutdown, reboot, or kexec is happening and
1353 * device must be disabled.
1354 */
1355static void optee_shutdown(struct platform_device *pdev)
1356{
1357 optee_disable_shm_cache(platform_get_drvdata(pdev));
1358}
1359
1360static int optee_probe(struct platform_device *pdev)
1361{
1362 optee_invoke_fn *invoke_fn;
1363 struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1364 struct optee *optee = NULL;
1365 void *memremaped_shm = NULL;
1366 struct tee_device *teedev;
Jens Wiklander6749e692021-06-15 22:23:54 +02001367 u32 max_notif_value;
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001368 u32 sec_caps;
1369 int rc;
1370
1371 invoke_fn = get_invoke_func(&pdev->dev);
1372 if (IS_ERR(invoke_fn))
1373 return PTR_ERR(invoke_fn);
1374
1375 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1376 pr_warn("api uid mismatch\n");
1377 return -EINVAL;
1378 }
1379
1380 optee_msg_get_os_revision(invoke_fn);
1381
1382 if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1383 pr_warn("api revision mismatch\n");
1384 return -EINVAL;
1385 }
1386
Jens Wiklander6749e692021-06-15 22:23:54 +02001387 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
1388 &max_notif_value)) {
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001389 pr_warn("capabilities mismatch\n");
1390 return -EINVAL;
1391 }
1392
1393 /*
1394 * Try to use dynamic shared memory if possible
1395 */
1396 if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1397 pool = optee_config_dyn_shm();
1398
1399 /*
1400 * If dynamic shared memory is not available or failed - try static one
1401 */
1402 if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
1403 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1404
1405 if (IS_ERR(pool))
1406 return PTR_ERR(pool);
1407
1408 optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1409 if (!optee) {
1410 rc = -ENOMEM;
Jens Wiklander6749e692021-06-15 22:23:54 +02001411 goto err_free_pool;
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001412 }
1413
1414 optee->ops = &optee_ops;
1415 optee->smc.invoke_fn = invoke_fn;
1416 optee->smc.sec_caps = sec_caps;
1417
1418 teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1419 if (IS_ERR(teedev)) {
1420 rc = PTR_ERR(teedev);
Jens Wiklander6749e692021-06-15 22:23:54 +02001421 goto err_free_optee;
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001422 }
1423 optee->teedev = teedev;
1424
1425 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1426 if (IS_ERR(teedev)) {
1427 rc = PTR_ERR(teedev);
Jens Wiklander6749e692021-06-15 22:23:54 +02001428 goto err_unreg_teedev;
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001429 }
1430 optee->supp_teedev = teedev;
1431
1432 rc = tee_device_register(optee->teedev);
1433 if (rc)
Jens Wiklander6749e692021-06-15 22:23:54 +02001434 goto err_unreg_supp_teedev;
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001435
1436 rc = tee_device_register(optee->supp_teedev);
1437 if (rc)
Jens Wiklander6749e692021-06-15 22:23:54 +02001438 goto err_unreg_supp_teedev;
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001439
1440 mutex_init(&optee->call_queue.mutex);
1441 INIT_LIST_HEAD(&optee->call_queue.waiters);
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001442 optee_supp_init(&optee->supp);
1443 optee->smc.memremaped_shm = memremaped_shm;
1444 optee->pool = pool;
1445
Jens Wiklander787c80c2021-06-15 22:23:53 +02001446 platform_set_drvdata(pdev, optee);
Jens Wiklander6749e692021-06-15 22:23:54 +02001447 rc = optee_notif_init(optee, max_notif_value);
1448 if (rc)
1449 goto err_supp_uninit;
1450
1451 if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1452 unsigned int irq;
1453
1454 rc = platform_get_irq(pdev, 0);
1455 if (rc < 0) {
1456 pr_err("platform_get_irq: ret %d\n", rc);
1457 goto err_notif_uninit;
1458 }
1459 irq = rc;
1460
1461 rc = optee_smc_notif_init_irq(optee, irq);
1462 if (rc) {
1463 irq_dispose_mapping(irq);
1464 goto err_notif_uninit;
1465 }
1466 enable_async_notif(optee->smc.invoke_fn);
1467 pr_info("Asynchronous notifications enabled\n");
Jens Wiklander787c80c2021-06-15 22:23:53 +02001468 }
1469
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001470 /*
1471 * Ensure that there are no pre-existing shm objects before enabling
1472 * the shm cache so that there's no chance of receiving an invalid
1473 * address during shutdown. This could occur, for example, if we're
1474 * kexec booting from an older kernel that did not properly cleanup the
1475 * shm cache.
1476 */
1477 optee_disable_unmapped_shm_cache(optee);
1478
1479 optee_enable_shm_cache(optee);
1480
1481 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1482 pr_info("dynamic shared memory is enabled\n");
1483
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001484 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
Jens Wiklander6749e692021-06-15 22:23:54 +02001485 if (rc)
1486 goto err_disable_shm_cache;
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001487
1488 pr_info("initialized driver\n");
1489 return 0;
Jens Wiklander6749e692021-06-15 22:23:54 +02001490
1491err_disable_shm_cache:
1492 optee_disable_shm_cache(optee);
1493 optee_smc_notif_uninit_irq(optee);
1494 optee_unregister_devices();
1495err_notif_uninit:
1496 optee_notif_uninit(optee);
1497err_supp_uninit:
1498 optee_supp_uninit(&optee->supp);
1499 mutex_destroy(&optee->call_queue.mutex);
1500err_unreg_supp_teedev:
1501 tee_device_unregister(optee->supp_teedev);
1502err_unreg_teedev:
1503 tee_device_unregister(optee->teedev);
1504err_free_optee:
1505 kfree(optee);
1506err_free_pool:
1507 tee_shm_pool_free(pool);
1508 if (optee->smc.memremaped_shm)
1509 memunmap(optee->smc.memremaped_shm);
Jens Wiklanderc51a5642021-07-21 16:30:28 +02001510 return rc;
1511}
1512
1513static const struct of_device_id optee_dt_match[] = {
1514 { .compatible = "linaro,optee-tz" },
1515 {},
1516};
1517MODULE_DEVICE_TABLE(of, optee_dt_match);
1518
1519static struct platform_driver optee_driver = {
1520 .probe = optee_probe,
1521 .remove = optee_smc_remove,
1522 .shutdown = optee_shutdown,
1523 .driver = {
1524 .name = "optee",
1525 .of_match_table = optee_dt_match,
1526 },
1527};
1528
1529int optee_smc_abi_register(void)
1530{
1531 return platform_driver_register(&optee_driver);
1532}
1533
1534void optee_smc_abi_unregister(void)
1535{
1536 platform_driver_unregister(&optee_driver);
1537}