blob: d767eebf30bdd5625b10350904b019edbe8166ca [file] [log] [blame]
Thomas Gleixner9c92ab62019-05-29 07:17:56 -07001// SPDX-License-Identifier: GPL-2.0-only
Volodymyr Babchukabd135b2017-11-29 14:48:35 +02002/*
3 * Copyright (c) 2015, Linaro Limited
4 * Copyright (c) 2017, EPAM Systems
Volodymyr Babchukabd135b2017-11-29 14:48:35 +02005 */
6#include <linux/device.h>
7#include <linux/dma-buf.h>
8#include <linux/genalloc.h>
9#include <linux/slab.h>
10#include <linux/tee_drv.h>
11#include "optee_private.h"
12#include "optee_smc.h"
13#include "shm_pool.h"
14
15static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
16 struct tee_shm *shm, size_t size)
17{
18 unsigned int order = get_order(size);
19 struct page *page;
Sumit Garga249dd22019-11-08 16:57:14 +053020 int rc = 0;
Volodymyr Babchukabd135b2017-11-29 14:48:35 +020021
22 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
23 if (!page)
24 return -ENOMEM;
25
26 shm->kaddr = page_address(page);
27 shm->paddr = page_to_phys(page);
28 shm->size = PAGE_SIZE << order;
29
Sumit Garga249dd22019-11-08 16:57:14 +053030 if (shm->flags & TEE_SHM_DMA_BUF) {
Sumit Garg5a769f62019-12-30 18:52:40 +053031 unsigned int nr_pages = 1 << order, i;
32 struct page **pages;
33
34 pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
35 if (!pages)
36 return -ENOMEM;
37
38 for (i = 0; i < nr_pages; i++) {
39 pages[i] = page;
40 page++;
41 }
42
Sumit Garga249dd22019-11-08 16:57:14 +053043 shm->flags |= TEE_SHM_REGISTER;
Sumit Garg5a769f62019-12-30 18:52:40 +053044 rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
Sumit Garga249dd22019-11-08 16:57:14 +053045 (unsigned long)shm->kaddr);
Sumit Garg5a769f62019-12-30 18:52:40 +053046 kfree(pages);
Sumit Garga249dd22019-11-08 16:57:14 +053047 }
48
49 return rc;
Volodymyr Babchukabd135b2017-11-29 14:48:35 +020050}
51
52static void pool_op_free(struct tee_shm_pool_mgr *poolm,
53 struct tee_shm *shm)
54{
Sumit Garga249dd22019-11-08 16:57:14 +053055 if (shm->flags & TEE_SHM_DMA_BUF)
56 optee_shm_unregister(shm->ctx, shm);
57
Volodymyr Babchukabd135b2017-11-29 14:48:35 +020058 free_pages((unsigned long)shm->kaddr, get_order(shm->size));
59 shm->kaddr = NULL;
60}
61
62static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
63{
64 kfree(poolm);
65}
66
67static const struct tee_shm_pool_mgr_ops pool_ops = {
68 .alloc = pool_op_alloc,
69 .free = pool_op_free,
70 .destroy_poolmgr = pool_op_destroy_poolmgr,
71};
72
73/**
74 * optee_shm_pool_alloc_pages() - create page-based allocator pool
75 *
76 * This pool is used when OP-TEE supports dymanic SHM. In this case
77 * command buffers and such are allocated from kernel's own memory.
78 */
79struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
80{
81 struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
82
83 if (!mgr)
84 return ERR_PTR(-ENOMEM);
85
86 mgr->ops = &pool_ops;
87
88 return mgr;
89}