blob: 56bf5ad01ad54e40c297a4697a2b7f3aea1058cb [file] [log] [blame]
Andrew F. Davisc02a81f2019-12-03 17:26:37 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Framework for userspace DMA-BUF allocations
4 *
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019 Linaro Ltd.
7 */
8
9#include <linux/cdev.h>
10#include <linux/debugfs.h>
11#include <linux/device.h>
12#include <linux/dma-buf.h>
13#include <linux/err.h>
14#include <linux/xarray.h>
15#include <linux/list.h>
16#include <linux/slab.h>
17#include <linux/uaccess.h>
18#include <linux/syscalls.h>
19#include <linux/dma-heap.h>
20#include <uapi/linux/dma-heap.h>
21
22#define DEVNAME "dma_heap"
23
24#define NUM_HEAP_MINORS 128
25
26/**
27 * struct dma_heap - represents a dmabuf heap in the system
28 * @name: used for debugging/device-node name
29 * @ops: ops struct for this heap
30 * @heap_devt heap device node
31 * @list list head connecting to list of heaps
32 * @heap_cdev heap char device
33 *
34 * Represents a heap of memory from which buffers can be made.
35 */
36struct dma_heap {
37 const char *name;
38 const struct dma_heap_ops *ops;
39 void *priv;
40 dev_t heap_devt;
41 struct list_head list;
42 struct cdev heap_cdev;
43};
44
45static LIST_HEAD(heap_list);
46static DEFINE_MUTEX(heap_list_lock);
47static dev_t dma_heap_devt;
48static struct class *dma_heap_class;
49static DEFINE_XARRAY_ALLOC(dma_heap_minors);
50
51static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
52 unsigned int fd_flags,
53 unsigned int heap_flags)
54{
John Stultzc7f59e32021-01-19 20:45:08 +000055 struct dma_buf *dmabuf;
56 int fd;
57
Andrew F. Davisc02a81f2019-12-03 17:26:37 +000058 /*
59 * Allocations from all heaps have to begin
60 * and end on page boundaries.
61 */
62 len = PAGE_ALIGN(len);
63 if (!len)
64 return -EINVAL;
65
John Stultzc7f59e32021-01-19 20:45:08 +000066 dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
67 if (IS_ERR(dmabuf))
68 return PTR_ERR(dmabuf);
69
70 fd = dma_buf_fd(dmabuf, fd_flags);
71 if (fd < 0) {
72 dma_buf_put(dmabuf);
73 /* just return, as put will call release and that will free */
74 }
75 return fd;
Andrew F. Davisc02a81f2019-12-03 17:26:37 +000076}
77
78static int dma_heap_open(struct inode *inode, struct file *file)
79{
80 struct dma_heap *heap;
81
82 heap = xa_load(&dma_heap_minors, iminor(inode));
83 if (!heap) {
84 pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
85 return -ENODEV;
86 }
87
88 /* instance data as context */
89 file->private_data = heap;
90 nonseekable_open(inode, file);
91
92 return 0;
93}
94
95static long dma_heap_ioctl_allocate(struct file *file, void *data)
96{
97 struct dma_heap_allocation_data *heap_allocation = data;
98 struct dma_heap *heap = file->private_data;
99 int fd;
100
101 if (heap_allocation->fd)
102 return -EINVAL;
103
104 if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
105 return -EINVAL;
106
107 if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
108 return -EINVAL;
109
110 fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
111 heap_allocation->fd_flags,
112 heap_allocation->heap_flags);
113 if (fd < 0)
114 return fd;
115
116 heap_allocation->fd = fd;
117
118 return 0;
119}
120
zhong jiang7d411af2019-12-18 00:38:22 +0530121static unsigned int dma_heap_ioctl_cmds[] = {
Andrew F. Davisb3b43462019-12-16 08:34:04 -0500122 DMA_HEAP_IOCTL_ALLOC,
Andrew F. Davisc02a81f2019-12-03 17:26:37 +0000123};
124
125static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
126 unsigned long arg)
127{
128 char stack_kdata[128];
129 char *kdata = stack_kdata;
130 unsigned int kcmd;
131 unsigned int in_size, out_size, drv_size, ksize;
132 int nr = _IOC_NR(ucmd);
133 int ret = 0;
134
135 if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
136 return -EINVAL;
137
138 /* Get the kernel ioctl cmd that matches */
139 kcmd = dma_heap_ioctl_cmds[nr];
140
141 /* Figure out the delta between user cmd size and kernel cmd size */
142 drv_size = _IOC_SIZE(kcmd);
143 out_size = _IOC_SIZE(ucmd);
144 in_size = out_size;
145 if ((ucmd & kcmd & IOC_IN) == 0)
146 in_size = 0;
147 if ((ucmd & kcmd & IOC_OUT) == 0)
148 out_size = 0;
149 ksize = max(max(in_size, out_size), drv_size);
150
151 /* If necessary, allocate buffer for ioctl argument */
152 if (ksize > sizeof(stack_kdata)) {
153 kdata = kmalloc(ksize, GFP_KERNEL);
154 if (!kdata)
155 return -ENOMEM;
156 }
157
158 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
159 ret = -EFAULT;
160 goto err;
161 }
162
163 /* zero out any difference between the kernel/user structure size */
164 if (ksize > in_size)
165 memset(kdata + in_size, 0, ksize - in_size);
166
167 switch (kcmd) {
Andrew F. Davisb3b43462019-12-16 08:34:04 -0500168 case DMA_HEAP_IOCTL_ALLOC:
Andrew F. Davisc02a81f2019-12-03 17:26:37 +0000169 ret = dma_heap_ioctl_allocate(file, kdata);
170 break;
171 default:
Colin Ian Kingf9d3b2c2019-12-16 16:10:59 +0000172 ret = -ENOTTY;
173 goto err;
Andrew F. Davisc02a81f2019-12-03 17:26:37 +0000174 }
175
176 if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
177 ret = -EFAULT;
178err:
179 if (kdata != stack_kdata)
180 kfree(kdata);
181 return ret;
182}
183
184static const struct file_operations dma_heap_fops = {
185 .owner = THIS_MODULE,
186 .open = dma_heap_open,
187 .unlocked_ioctl = dma_heap_ioctl,
188#ifdef CONFIG_COMPAT
189 .compat_ioctl = dma_heap_ioctl,
190#endif
191};
192
193/**
194 * dma_heap_get_drvdata() - get per-subdriver data for the heap
195 * @heap: DMA-Heap to retrieve private data for
196 *
197 * Returns:
198 * The per-subdriver data for the heap.
199 */
200void *dma_heap_get_drvdata(struct dma_heap *heap)
201{
202 return heap->priv;
203}
204
John Stultz98cd02d2021-02-09 19:48:17 +0000205/**
206 * dma_heap_get_name() - get heap name
207 * @heap: DMA-Heap to retrieve private data for
208 *
209 * Returns:
210 * The char* for the heap name.
211 */
212const char *dma_heap_get_name(struct dma_heap *heap)
213{
214 return heap->name;
215}
216
Andrew F. Davisc02a81f2019-12-03 17:26:37 +0000217struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
218{
219 struct dma_heap *heap, *h, *err_ret;
220 struct device *dev_ret;
221 unsigned int minor;
222 int ret;
223
224 if (!exp_info->name || !strcmp(exp_info->name, "")) {
225 pr_err("dma_heap: Cannot add heap without a name\n");
226 return ERR_PTR(-EINVAL);
227 }
228
229 if (!exp_info->ops || !exp_info->ops->allocate) {
230 pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
231 return ERR_PTR(-EINVAL);
232 }
233
234 /* check the name is unique */
235 mutex_lock(&heap_list_lock);
236 list_for_each_entry(h, &heap_list, list) {
237 if (!strcmp(h->name, exp_info->name)) {
238 mutex_unlock(&heap_list_lock);
239 pr_err("dma_heap: Already registered heap named %s\n",
240 exp_info->name);
241 return ERR_PTR(-EINVAL);
242 }
243 }
244 mutex_unlock(&heap_list_lock);
245
246 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
247 if (!heap)
248 return ERR_PTR(-ENOMEM);
249
250 heap->name = exp_info->name;
251 heap->ops = exp_info->ops;
252 heap->priv = exp_info->priv;
253
254 /* Find unused minor number */
255 ret = xa_alloc(&dma_heap_minors, &minor, heap,
256 XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
257 if (ret < 0) {
258 pr_err("dma_heap: Unable to get minor number for heap\n");
259 err_ret = ERR_PTR(ret);
260 goto err0;
261 }
262
263 /* Create device */
264 heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
265
266 cdev_init(&heap->heap_cdev, &dma_heap_fops);
267 ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
268 if (ret < 0) {
269 pr_err("dma_heap: Unable to add char device\n");
270 err_ret = ERR_PTR(ret);
271 goto err1;
272 }
273
274 dev_ret = device_create(dma_heap_class,
275 NULL,
276 heap->heap_devt,
277 NULL,
278 heap->name);
279 if (IS_ERR(dev_ret)) {
280 pr_err("dma_heap: Unable to create device\n");
281 err_ret = ERR_CAST(dev_ret);
282 goto err2;
283 }
284 /* Add heap to the list */
285 mutex_lock(&heap_list_lock);
286 list_add(&heap->list, &heap_list);
287 mutex_unlock(&heap_list_lock);
288
289 return heap;
290
291err2:
292 cdev_del(&heap->heap_cdev);
293err1:
294 xa_erase(&dma_heap_minors, minor);
295err0:
296 kfree(heap);
297 return err_ret;
298}
299
300static char *dma_heap_devnode(struct device *dev, umode_t *mode)
301{
302 return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
303}
304
305static int dma_heap_init(void)
306{
307 int ret;
308
309 ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
310 if (ret)
311 return ret;
312
313 dma_heap_class = class_create(THIS_MODULE, DEVNAME);
314 if (IS_ERR(dma_heap_class)) {
315 unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
316 return PTR_ERR(dma_heap_class);
317 }
318 dma_heap_class->devnode = dma_heap_devnode;
319
320 return 0;
321}
322subsys_initcall(dma_heap_init);