blob: 2e8502250afecacf7afff1e4960762dc45962035 [file] [log] [blame]
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02001// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/module.h>
4#include <linux/device.h>
5#include <linux/kernel.h>
6#include <linux/slab.h>
7#include <linux/miscdevice.h>
8#include <linux/dma-buf.h>
9#include <linux/highmem.h>
10#include <linux/cred.h>
11#include <linux/shmem_fs.h>
12#include <linux/memfd.h>
13
14#include <uapi/linux/udmabuf.h>
15
16struct udmabuf {
17 u32 pagecount;
18 struct page **pages;
19};
20
21static int udmabuf_vm_fault(struct vm_fault *vmf)
22{
23 struct vm_area_struct *vma = vmf->vma;
24 struct udmabuf *ubuf = vma->vm_private_data;
25
26 if (WARN_ON(vmf->pgoff >= ubuf->pagecount))
27 return VM_FAULT_SIGBUS;
28
29 vmf->page = ubuf->pages[vmf->pgoff];
30 get_page(vmf->page);
31 return 0;
32}
33
34static const struct vm_operations_struct udmabuf_vm_ops = {
35 .fault = udmabuf_vm_fault,
36};
37
38static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
39{
40 struct udmabuf *ubuf = buf->priv;
41
42 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
43 return -EINVAL;
44
45 vma->vm_ops = &udmabuf_vm_ops;
46 vma->vm_private_data = ubuf;
47 return 0;
48}
49
50static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
51 enum dma_data_direction direction)
52{
53 struct udmabuf *ubuf = at->dmabuf->priv;
54 struct sg_table *sg;
55
56 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
57 if (!sg)
58 goto err1;
59 if (sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
60 0, ubuf->pagecount << PAGE_SHIFT,
61 GFP_KERNEL) < 0)
62 goto err2;
63 if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction))
64 goto err3;
65
66 return sg;
67
68err3:
69 sg_free_table(sg);
70err2:
71 kfree(sg);
72err1:
73 return ERR_PTR(-ENOMEM);
74}
75
76static void unmap_udmabuf(struct dma_buf_attachment *at,
77 struct sg_table *sg,
78 enum dma_data_direction direction)
79{
80 sg_free_table(sg);
81 kfree(sg);
82}
83
84static void release_udmabuf(struct dma_buf *buf)
85{
86 struct udmabuf *ubuf = buf->priv;
87 pgoff_t pg;
88
89 for (pg = 0; pg < ubuf->pagecount; pg++)
90 put_page(ubuf->pages[pg]);
91 kfree(ubuf->pages);
92 kfree(ubuf);
93}
94
95static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
96{
97 struct udmabuf *ubuf = buf->priv;
98 struct page *page = ubuf->pages[page_num];
99
100 return kmap(page);
101}
102
103static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
104 void *vaddr)
105{
106 kunmap(vaddr);
107}
108
109static struct dma_buf_ops udmabuf_ops = {
110 .map_dma_buf = map_udmabuf,
111 .unmap_dma_buf = unmap_udmabuf,
112 .release = release_udmabuf,
113 .map = kmap_udmabuf,
114 .unmap = kunmap_udmabuf,
115 .mmap = mmap_udmabuf,
116};
117
118#define SEALS_WANTED (F_SEAL_SHRINK)
119#define SEALS_DENIED (F_SEAL_WRITE)
120
121static long udmabuf_create(struct udmabuf_create_list *head,
122 struct udmabuf_create_item *list)
123{
124 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
125 struct file *memfd = NULL;
126 struct udmabuf *ubuf;
127 struct dma_buf *buf;
128 pgoff_t pgoff, pgcnt, pgidx, pgbuf;
129 struct page *page;
130 int seals, ret = -EINVAL;
131 u32 i, flags;
132
133 ubuf = kzalloc(sizeof(struct udmabuf), GFP_KERNEL);
134 if (!ubuf)
135 return -ENOMEM;
136
137 for (i = 0; i < head->count; i++) {
138 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
139 goto err_free_ubuf;
140 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
141 goto err_free_ubuf;
142 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
143 }
144 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(struct page *),
145 GFP_KERNEL);
146 if (!ubuf->pages) {
147 ret = -ENOMEM;
148 goto err_free_ubuf;
149 }
150
151 pgbuf = 0;
152 for (i = 0; i < head->count; i++) {
153 memfd = fget(list[i].memfd);
154 if (!memfd)
155 goto err_put_pages;
156 if (!shmem_mapping(file_inode(memfd)->i_mapping))
157 goto err_put_pages;
158 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
159 if (seals == -EINVAL ||
160 (seals & SEALS_WANTED) != SEALS_WANTED ||
161 (seals & SEALS_DENIED) != 0)
162 goto err_put_pages;
163 pgoff = list[i].offset >> PAGE_SHIFT;
164 pgcnt = list[i].size >> PAGE_SHIFT;
165 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
166 page = shmem_read_mapping_page(
167 file_inode(memfd)->i_mapping, pgoff + pgidx);
168 if (IS_ERR(page)) {
169 ret = PTR_ERR(page);
170 goto err_put_pages;
171 }
172 ubuf->pages[pgbuf++] = page;
173 }
174 fput(memfd);
175 }
176 memfd = NULL;
177
178 exp_info.ops = &udmabuf_ops;
179 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
180 exp_info.priv = ubuf;
181
182 buf = dma_buf_export(&exp_info);
183 if (IS_ERR(buf)) {
184 ret = PTR_ERR(buf);
185 goto err_put_pages;
186 }
187
188 flags = 0;
189 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
190 flags |= O_CLOEXEC;
191 return dma_buf_fd(buf, flags);
192
193err_put_pages:
194 while (pgbuf > 0)
195 put_page(ubuf->pages[--pgbuf]);
196err_free_ubuf:
Gustavo A. R. Silva683a0e62018-09-04 14:07:49 -0500197 if (memfd)
198 fput(memfd);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200199 kfree(ubuf->pages);
200 kfree(ubuf);
201 return ret;
202}
203
204static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
205{
206 struct udmabuf_create create;
207 struct udmabuf_create_list head;
208 struct udmabuf_create_item list;
209
210 if (copy_from_user(&create, (void __user *)arg,
211 sizeof(struct udmabuf_create)))
212 return -EFAULT;
213
214 head.flags = create.flags;
215 head.count = 1;
216 list.memfd = create.memfd;
217 list.offset = create.offset;
218 list.size = create.size;
219
220 return udmabuf_create(&head, &list);
221}
222
223static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
224{
225 struct udmabuf_create_list head;
226 struct udmabuf_create_item *list;
227 int ret = -EINVAL;
228 u32 lsize;
229
230 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
231 return -EFAULT;
232 if (head.count > 1024)
233 return -EINVAL;
234 lsize = sizeof(struct udmabuf_create_item) * head.count;
235 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
236 if (IS_ERR(list))
237 return PTR_ERR(list);
238
239 ret = udmabuf_create(&head, list);
240 kfree(list);
241 return ret;
242}
243
244static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
245 unsigned long arg)
246{
247 long ret;
248
249 switch (ioctl) {
250 case UDMABUF_CREATE:
251 ret = udmabuf_ioctl_create(filp, arg);
252 break;
253 case UDMABUF_CREATE_LIST:
254 ret = udmabuf_ioctl_create_list(filp, arg);
255 break;
256 default:
257 ret = -EINVAL;
258 break;
259 }
260 return ret;
261}
262
263static const struct file_operations udmabuf_fops = {
264 .owner = THIS_MODULE,
265 .unlocked_ioctl = udmabuf_ioctl,
266};
267
268static struct miscdevice udmabuf_misc = {
269 .minor = MISC_DYNAMIC_MINOR,
270 .name = "udmabuf",
271 .fops = &udmabuf_fops,
272};
273
274static int __init udmabuf_dev_init(void)
275{
276 return misc_register(&udmabuf_misc);
277}
278
279static void __exit udmabuf_dev_exit(void)
280{
281 misc_deregister(&udmabuf_misc);
282}
283
284module_init(udmabuf_dev_init)
285module_exit(udmabuf_dev_exit)
286
287MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
288MODULE_LICENSE("GPL v2");