blob: eadd1eaa2fb541cd4eaff428b8d53865619ea5ad [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Sumit Semwald15bd7e2011-12-26 14:53:15 +05302/*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
Sumit Semwald15bd7e2011-12-26 14:53:15 +053012 */
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
Chris Wilsonf54d1862016-10-25 13:00:45 +010017#include <linux/dma-fence.h>
Sumit Semwald15bd7e2011-12-26 14:53:15 +053018#include <linux/anon_inodes.h>
19#include <linux/export.h>
Sumit Semwalb89e35632013-04-04 11:44:37 +053020#include <linux/debugfs.h>
Sumit Semwal9abdffe2015-05-05 14:56:15 +053021#include <linux/module.h>
Sumit Semwalb89e35632013-04-04 11:44:37 +053022#include <linux/seq_file.h>
Maarten Lankhorst9b495a52014-07-01 12:57:43 +020023#include <linux/poll.h>
Christian König52791ee2019-08-11 10:06:32 +020024#include <linux/dma-resv.h>
Muhammad Falak R Wanib02da6f2016-05-23 17:08:42 +053025#include <linux/mm.h>
Greg Hackmanned63bb12019-06-13 15:34:06 -070026#include <linux/mount.h>
Linus Torvalds933a90b2019-07-19 10:42:02 -070027#include <linux/pseudo_fs.h>
Sumit Semwald15bd7e2011-12-26 14:53:15 +053028
Daniel Vetterc11e3912016-02-11 20:04:51 -020029#include <uapi/linux/dma-buf.h>
Greg Hackmanned63bb12019-06-13 15:34:06 -070030#include <uapi/linux/magic.h>
Daniel Vetterc11e3912016-02-11 20:04:51 -020031
Sumit Semwald15bd7e2011-12-26 14:53:15 +053032static inline int is_dma_buf_file(struct file *);
33
Sumit Semwalb89e35632013-04-04 11:44:37 +053034struct dma_buf_list {
35 struct list_head head;
36 struct mutex lock;
37};
38
39static struct dma_buf_list db_list;
40
Greg Hackmannbb2bb902019-06-13 15:34:07 -070041static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42{
43 struct dma_buf *dmabuf;
44 char name[DMA_BUF_NAME_LEN];
45 size_t ret = 0;
46
47 dmabuf = dentry->d_fsdata;
Charan Teja Kalla6348dd22020-06-19 17:27:19 +053048 spin_lock(&dmabuf->name_lock);
Greg Hackmannbb2bb902019-06-13 15:34:07 -070049 if (dmabuf->name)
50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
Charan Teja Kalla6348dd22020-06-19 17:27:19 +053051 spin_unlock(&dmabuf->name_lock);
Greg Hackmannbb2bb902019-06-13 15:34:07 -070052
53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 dentry->d_name.name, ret > 0 ? name : "");
55}
56
Sumit Semwal4ab59c32020-06-11 17:14:18 +053057static void dma_buf_release(struct dentry *dentry)
Sumit Semwald15bd7e2011-12-26 14:53:15 +053058{
59 struct dma_buf *dmabuf;
60
Sumit Semwal4ab59c32020-06-11 17:14:18 +053061 dmabuf = dentry->d_fsdata;
Charan Teja Reddy19a508b2020-09-18 16:02:31 +053062 if (unlikely(!dmabuf))
63 return;
Sumit Semwald15bd7e2011-12-26 14:53:15 +053064
Daniel Vetterf00b4da2012-12-20 14:14:23 +010065 BUG_ON(dmabuf->vmapping_counter);
66
Maarten Lankhorst9b495a52014-07-01 12:57:43 +020067 /*
68 * Any fences that a dma-buf poll can wait on should be signaled
69 * before releasing dma-buf. This is the responsibility of each
70 * driver that uses the reservation objects.
71 *
72 * If you hit this BUG() it means someone dropped their ref to the
73 * dma-buf while still having pending operation to the buffer.
74 */
75 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
76
Sumit Semwald15bd7e2011-12-26 14:53:15 +053077 dmabuf->ops->release(dmabuf);
Sumit Semwalb89e35632013-04-04 11:44:37 +053078
Christian König52791ee2019-08-11 10:06:32 +020079 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
80 dma_resv_fini(dmabuf->resv);
Maarten Lankhorst3aac4502014-07-01 12:57:26 +020081
Sumit Semwal9abdffe2015-05-05 14:56:15 +053082 module_put(dmabuf->owner);
Cong Wangd1f37222019-12-26 22:32:04 -080083 kfree(dmabuf->name);
Sumit Semwald15bd7e2011-12-26 14:53:15 +053084 kfree(dmabuf);
Sumit Semwal4ab59c32020-06-11 17:14:18 +053085}
86
Charan Teja Reddy05cd8462021-01-05 20:06:39 +053087static int dma_buf_file_release(struct inode *inode, struct file *file)
88{
89 struct dma_buf *dmabuf;
90
91 if (!is_dma_buf_file(file))
92 return -EINVAL;
93
94 dmabuf = file->private_data;
95
96 mutex_lock(&db_list.lock);
97 list_del(&dmabuf->list_node);
98 mutex_unlock(&db_list.lock);
99
100 return 0;
101}
102
Sumit Semwal4ab59c32020-06-11 17:14:18 +0530103static const struct dentry_operations dma_buf_dentry_ops = {
104 .d_dname = dmabuffs_dname,
105 .d_release = dma_buf_release,
106};
107
108static struct vfsmount *dma_buf_mnt;
109
110static int dma_buf_fs_init_context(struct fs_context *fc)
111{
112 struct pseudo_fs_context *ctx;
113
114 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
115 if (!ctx)
116 return -ENOMEM;
117 ctx->dops = &dma_buf_dentry_ops;
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530118 return 0;
119}
120
Sumit Semwal4ab59c32020-06-11 17:14:18 +0530121static struct file_system_type dma_buf_fs_type = {
122 .name = "dmabuf",
123 .init_fs_context = dma_buf_fs_init_context,
124 .kill_sb = kill_anon_super,
125};
126
Daniel Vetter4c785132012-04-24 14:38:52 +0530127static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
128{
129 struct dma_buf *dmabuf;
130
131 if (!is_dma_buf_file(file))
132 return -EINVAL;
133
134 dmabuf = file->private_data;
135
Andrew F. Davise3a9d6c2019-03-29 11:52:01 -0500136 /* check if buffer supports mmap */
137 if (!dmabuf->ops->mmap)
138 return -EINVAL;
139
Daniel Vetter4c785132012-04-24 14:38:52 +0530140 /* check for overflowing the buffer's size */
Muhammad Falak R Wanib02da6f2016-05-23 17:08:42 +0530141 if (vma->vm_pgoff + vma_pages(vma) >
Daniel Vetter4c785132012-04-24 14:38:52 +0530142 dmabuf->size >> PAGE_SHIFT)
143 return -EINVAL;
144
145 return dmabuf->ops->mmap(dmabuf, vma);
146}
147
Christopher James Halse Rogers19e86972013-09-10 11:36:45 +0530148static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
149{
150 struct dma_buf *dmabuf;
151 loff_t base;
152
153 if (!is_dma_buf_file(file))
154 return -EBADF;
155
156 dmabuf = file->private_data;
157
158 /* only support discovering the end of the buffer,
159 but also allow SEEK_SET to maintain the idiomatic
160 SEEK_END(0), SEEK_CUR(0) pattern */
161 if (whence == SEEK_END)
162 base = dmabuf->size;
163 else if (whence == SEEK_SET)
164 base = 0;
165 else
166 return -EINVAL;
167
168 if (offset != 0)
169 return -EINVAL;
170
171 return base + offset;
172}
173
Daniel Vettere7e21c72016-12-09 22:50:55 +0100174/**
Daniel Vetter102514e2020-06-12 09:05:35 +0200175 * DOC: implicit fence polling
Daniel Vettere7e21c72016-12-09 22:50:55 +0100176 *
177 * To support cross-device and cross-driver synchronization of buffer access
Daniel Vetter102514e2020-06-12 09:05:35 +0200178 * implicit fences (represented internally in the kernel with &struct dma_fence)
179 * can be attached to a &dma_buf. The glue for that and a few related things are
Christian König52791ee2019-08-11 10:06:32 +0200180 * provided in the &dma_resv structure.
Daniel Vettere7e21c72016-12-09 22:50:55 +0100181 *
182 * Userspace can query the state of these implicitly tracked fences using poll()
183 * and related system calls:
184 *
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800185 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
Daniel Vettere7e21c72016-12-09 22:50:55 +0100186 * most recent write or exclusive fence.
187 *
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800188 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
Daniel Vettere7e21c72016-12-09 22:50:55 +0100189 * all attached fences, shared and exclusive ones.
190 *
191 * Note that this only signals the completion of the respective fences, i.e. the
192 * DMA transfers are complete. Cache flushing and any other necessary
193 * preparations before CPU access can begin still need to happen.
194 */
195
Chris Wilsonf54d1862016-10-25 13:00:45 +0100196static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200197{
198 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
199 unsigned long flags;
200
201 spin_lock_irqsave(&dcb->poll->lock, flags);
202 wake_up_locked_poll(dcb->poll, dcb->active);
203 dcb->active = 0;
204 spin_unlock_irqrestore(&dcb->poll->lock, flags);
205}
206
Al Viroafc9a422017-07-03 06:39:46 -0400207static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200208{
209 struct dma_buf *dmabuf;
Christian König52791ee2019-08-11 10:06:32 +0200210 struct dma_resv *resv;
211 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100212 struct dma_fence *fence_excl;
Al Viro01699432017-07-03 03:14:15 -0400213 __poll_t events;
Chris Wilsonb016cd62019-08-14 19:24:01 +0100214 unsigned shared_count, seq;
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200215
216 dmabuf = file->private_data;
217 if (!dmabuf || !dmabuf->resv)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800218 return EPOLLERR;
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200219
220 resv = dmabuf->resv;
221
222 poll_wait(file, &dmabuf->poll, poll);
223
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800224 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200225 if (!events)
226 return 0;
227
Chris Wilsonb016cd62019-08-14 19:24:01 +0100228retry:
229 seq = read_seqcount_begin(&resv->seq);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200230 rcu_read_lock();
Chris Wilsonb016cd62019-08-14 19:24:01 +0100231
232 fobj = rcu_dereference(resv->fence);
233 if (fobj)
234 shared_count = fobj->shared_count;
235 else
236 shared_count = 0;
237 fence_excl = rcu_dereference(resv->fence_excl);
238 if (read_seqcount_retry(&resv->seq, seq)) {
239 rcu_read_unlock();
240 goto retry;
241 }
242
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800243 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200244 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800245 __poll_t pevents = EPOLLIN;
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200246
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200247 if (shared_count == 0)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800248 pevents |= EPOLLOUT;
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200249
250 spin_lock_irq(&dmabuf->poll.lock);
251 if (dcb->active) {
252 dcb->active |= pevents;
253 events &= ~pevents;
254 } else
255 dcb->active = pevents;
256 spin_unlock_irq(&dmabuf->poll.lock);
257
258 if (events & pevents) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100259 if (!dma_fence_get_rcu(fence_excl)) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200260 /* force a recheck */
261 events &= ~pevents;
262 dma_buf_poll_cb(NULL, &dcb->cb);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100263 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
264 dma_buf_poll_cb)) {
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200265 events &= ~pevents;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100266 dma_fence_put(fence_excl);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200267 } else {
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200268 /*
269 * No callback queued, wake up any additional
270 * waiters.
271 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100272 dma_fence_put(fence_excl);
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200273 dma_buf_poll_cb(NULL, &dcb->cb);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200274 }
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200275 }
276 }
277
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800278 if ((events & EPOLLOUT) && shared_count > 0) {
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200279 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
280 int i;
281
282 /* Only queue a new callback if no event has fired yet */
283 spin_lock_irq(&dmabuf->poll.lock);
284 if (dcb->active)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800285 events &= ~EPOLLOUT;
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200286 else
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800287 dcb->active = EPOLLOUT;
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200288 spin_unlock_irq(&dmabuf->poll.lock);
289
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800290 if (!(events & EPOLLOUT))
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200291 goto out;
292
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200293 for (i = 0; i < shared_count; ++i) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100294 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200295
Chris Wilsonf54d1862016-10-25 13:00:45 +0100296 if (!dma_fence_get_rcu(fence)) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200297 /*
298 * fence refcount dropped to zero, this means
299 * that fobj has been freed
300 *
301 * call dma_buf_poll_cb and force a recheck!
302 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800303 events &= ~EPOLLOUT;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200304 dma_buf_poll_cb(NULL, &dcb->cb);
305 break;
306 }
Chris Wilsonf54d1862016-10-25 13:00:45 +0100307 if (!dma_fence_add_callback(fence, &dcb->cb,
308 dma_buf_poll_cb)) {
309 dma_fence_put(fence);
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800310 events &= ~EPOLLOUT;
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200311 break;
312 }
Chris Wilsonf54d1862016-10-25 13:00:45 +0100313 dma_fence_put(fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200314 }
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200315
316 /* No callback queued, wake up any additional waiters. */
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200317 if (i == shared_count)
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200318 dma_buf_poll_cb(NULL, &dcb->cb);
319 }
320
321out:
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200322 rcu_read_unlock();
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200323 return events;
324}
325
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700326/**
327 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
328 * The name of the dma-buf buffer can only be set when the dma-buf is not
329 * attached to any devices. It could theoritically support changing the
330 * name of the dma-buf if the same piece of memory is used for multiple
331 * purpose between different devices.
332 *
Krzysztof Kozlowski6d3ba802020-08-19 19:51:33 +0200333 * @dmabuf: [in] dmabuf buffer that will be renamed.
334 * @buf: [in] A piece of userspace memory that contains the name of
335 * the dma-buf.
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700336 *
337 * Returns 0 on success. If the dma-buf buffer is already attached to
338 * devices, return -EBUSY.
339 *
340 */
341static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
342{
343 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
344 long ret = 0;
345
346 if (IS_ERR(name))
347 return PTR_ERR(name);
348
Christian König15fd5522018-07-03 16:42:26 +0200349 dma_resv_lock(dmabuf->resv, NULL);
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700350 if (!list_empty(&dmabuf->attachments)) {
351 ret = -EBUSY;
352 kfree(name);
353 goto out_unlock;
354 }
Charan Teja Kalla6348dd22020-06-19 17:27:19 +0530355 spin_lock(&dmabuf->name_lock);
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700356 kfree(dmabuf->name);
357 dmabuf->name = name;
Charan Teja Kalla6348dd22020-06-19 17:27:19 +0530358 spin_unlock(&dmabuf->name_lock);
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700359
360out_unlock:
Christian König15fd5522018-07-03 16:42:26 +0200361 dma_resv_unlock(dmabuf->resv);
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700362 return ret;
363}
364
Daniel Vetterc11e3912016-02-11 20:04:51 -0200365static long dma_buf_ioctl(struct file *file,
366 unsigned int cmd, unsigned long arg)
367{
368 struct dma_buf *dmabuf;
369 struct dma_buf_sync sync;
370 enum dma_data_direction direction;
Chris Wilson18b862d2016-03-18 20:02:39 +0000371 int ret;
Daniel Vetterc11e3912016-02-11 20:04:51 -0200372
373 dmabuf = file->private_data;
374
375 switch (cmd) {
376 case DMA_BUF_IOCTL_SYNC:
377 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
378 return -EFAULT;
379
380 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
381 return -EINVAL;
382
383 switch (sync.flags & DMA_BUF_SYNC_RW) {
384 case DMA_BUF_SYNC_READ:
385 direction = DMA_FROM_DEVICE;
386 break;
387 case DMA_BUF_SYNC_WRITE:
388 direction = DMA_TO_DEVICE;
389 break;
390 case DMA_BUF_SYNC_RW:
391 direction = DMA_BIDIRECTIONAL;
392 break;
393 default:
394 return -EINVAL;
395 }
396
397 if (sync.flags & DMA_BUF_SYNC_END)
Chris Wilson18b862d2016-03-18 20:02:39 +0000398 ret = dma_buf_end_cpu_access(dmabuf, direction);
Daniel Vetterc11e3912016-02-11 20:04:51 -0200399 else
Chris Wilson18b862d2016-03-18 20:02:39 +0000400 ret = dma_buf_begin_cpu_access(dmabuf, direction);
Daniel Vetterc11e3912016-02-11 20:04:51 -0200401
Chris Wilson18b862d2016-03-18 20:02:39 +0000402 return ret;
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700403
Daniel Vettera5bff922020-04-07 15:30:02 +0200404 case DMA_BUF_SET_NAME_A:
405 case DMA_BUF_SET_NAME_B:
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700406 return dma_buf_set_name(dmabuf, (const char __user *)arg);
407
Daniel Vetterc11e3912016-02-11 20:04:51 -0200408 default:
409 return -ENOTTY;
410 }
411}
412
Greg Hackmannbcc07112019-06-13 15:34:08 -0700413static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
414{
415 struct dma_buf *dmabuf = file->private_data;
416
417 seq_printf(m, "size:\t%zu\n", dmabuf->size);
418 /* Don't count the temporary reference taken inside procfs seq_show */
419 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
420 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
Charan Teja Kalla6348dd22020-06-19 17:27:19 +0530421 spin_lock(&dmabuf->name_lock);
Greg Hackmannbcc07112019-06-13 15:34:08 -0700422 if (dmabuf->name)
423 seq_printf(m, "name:\t%s\n", dmabuf->name);
Charan Teja Kalla6348dd22020-06-19 17:27:19 +0530424 spin_unlock(&dmabuf->name_lock);
Greg Hackmannbcc07112019-06-13 15:34:08 -0700425}
426
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530427static const struct file_operations dma_buf_fops = {
Charan Teja Reddy05cd8462021-01-05 20:06:39 +0530428 .release = dma_buf_file_release,
Daniel Vetter4c785132012-04-24 14:38:52 +0530429 .mmap = dma_buf_mmap_internal,
Christopher James Halse Rogers19e86972013-09-10 11:36:45 +0530430 .llseek = dma_buf_llseek,
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200431 .poll = dma_buf_poll,
Daniel Vetterc11e3912016-02-11 20:04:51 -0200432 .unlocked_ioctl = dma_buf_ioctl,
Arnd Bergmann1832f2d2018-09-11 21:59:08 +0200433 .compat_ioctl = compat_ptr_ioctl,
Greg Hackmannbcc07112019-06-13 15:34:08 -0700434 .show_fdinfo = dma_buf_show_fdinfo,
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530435};
436
437/*
438 * is_dma_buf_file - Check if struct file* is associated with dma_buf
439 */
440static inline int is_dma_buf_file(struct file *file)
441{
442 return file->f_op == &dma_buf_fops;
443}
444
Greg Hackmanned63bb12019-06-13 15:34:06 -0700445static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
446{
447 struct file *file;
448 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
449
450 if (IS_ERR(inode))
451 return ERR_CAST(inode);
452
453 inode->i_size = dmabuf->size;
454 inode_set_bytes(inode, dmabuf->size);
455
456 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
457 flags, &dma_buf_fops);
458 if (IS_ERR(file))
459 goto err_alloc_file;
460 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
461 file->private_data = dmabuf;
Greg Hackmannbb2bb902019-06-13 15:34:07 -0700462 file->f_path.dentry->d_fsdata = dmabuf;
Greg Hackmanned63bb12019-06-13 15:34:06 -0700463
464 return file;
465
466err_alloc_file:
467 iput(inode);
468 return file;
469}
470
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530471/**
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100472 * DOC: dma buf device access
473 *
474 * For device DMA access to a shared DMA buffer the usual sequence of operations
475 * is fairly simple:
476 *
477 * 1. The exporter defines his exporter instance using
478 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
479 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
480 * as a file descriptor by calling dma_buf_fd().
481 *
482 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
483 * to share with: First the filedescriptor is converted to a &dma_buf using
Liviu Dudauc1387822017-11-01 14:06:30 +0000484 * dma_buf_get(). Then the buffer is attached to the device using
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100485 * dma_buf_attach().
486 *
487 * Up to this stage the exporter is still free to migrate or reallocate the
488 * backing storage.
489 *
Liviu Dudauc1387822017-11-01 14:06:30 +0000490 * 3. Once the buffer is attached to all devices userspace can initiate DMA
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100491 * access to the shared buffer. In the kernel this is done by calling
492 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
493 *
494 * 4. Once a driver is done with a shared buffer it needs to call
495 * dma_buf_detach() (after cleaning up any mappings) and then release the
Daniel Vetter85804b72020-12-11 16:58:41 +0100496 * reference acquired with dma_buf_get() by calling dma_buf_put().
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100497 *
498 * For the detailed semantics exporters are expected to implement see
499 * &dma_buf_ops.
500 */
501
502/**
Sumit Semwald8fbe342015-01-23 12:53:43 +0530503 * dma_buf_export - Creates a new dma_buf, and associates an anon file
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530504 * with this buffer, so it can be exported.
505 * Also connect the allocator specific data and ops to the buffer.
Sumit Semwal78df9692013-03-22 18:22:16 +0530506 * Additionally, provide a name string for exporter; useful in debugging.
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530507 *
Sumit Semwald8fbe342015-01-23 12:53:43 +0530508 * @exp_info: [in] holds all the export related information provided
Daniel Vetterf641d3b2016-12-29 21:48:24 +0100509 * by the exporter. see &struct dma_buf_export_info
Sumit Semwald8fbe342015-01-23 12:53:43 +0530510 * for further details.
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530511 *
Daniel Vetter85804b72020-12-11 16:58:41 +0100512 * Returns, on success, a newly created struct dma_buf object, which wraps the
513 * supplied private data and operations for struct dma_buf_ops. On either
514 * missing ops, or error in allocating struct dma_buf, will return negative
515 * error.
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530516 *
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100517 * For most cases the easiest way to create @exp_info is through the
518 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530519 */
Sumit Semwald8fbe342015-01-23 12:53:43 +0530520struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530521{
522 struct dma_buf *dmabuf;
Christian König52791ee2019-08-11 10:06:32 +0200523 struct dma_resv *resv = exp_info->resv;
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530524 struct file *file;
Maarten Lankhorst3aac4502014-07-01 12:57:26 +0200525 size_t alloc_size = sizeof(struct dma_buf);
Chris Wilsona026df42016-07-18 12:16:22 +0100526 int ret;
Jagan Teki51366292015-05-21 01:09:31 +0530527
Sumit Semwald8fbe342015-01-23 12:53:43 +0530528 if (!exp_info->resv)
Christian König52791ee2019-08-11 10:06:32 +0200529 alloc_size += sizeof(struct dma_resv);
Maarten Lankhorst3aac4502014-07-01 12:57:26 +0200530 else
531 /* prevent &dma_buf[1] == dma_buf->resv */
532 alloc_size += 1;
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530533
Sumit Semwald8fbe342015-01-23 12:53:43 +0530534 if (WARN_ON(!exp_info->priv
535 || !exp_info->ops
536 || !exp_info->ops->map_dma_buf
537 || !exp_info->ops->unmap_dma_buf
Andrew F. Davise3a9d6c2019-03-29 11:52:01 -0500538 || !exp_info->ops->release)) {
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530539 return ERR_PTR(-EINVAL);
540 }
541
Christian König15fd5522018-07-03 16:42:26 +0200542 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
Christian Königbd2275e2020-02-18 16:57:24 +0100543 (exp_info->ops->pin || exp_info->ops->unpin)))
Christian König15fd5522018-07-03 16:42:26 +0200544 return ERR_PTR(-EINVAL);
545
Christian Königbd2275e2020-02-18 16:57:24 +0100546 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530547 return ERR_PTR(-EINVAL);
548
Sumit Semwal9abdffe2015-05-05 14:56:15 +0530549 if (!try_module_get(exp_info->owner))
550 return ERR_PTR(-ENOENT);
551
Maarten Lankhorst3aac4502014-07-01 12:57:26 +0200552 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
Sumit Semwal9abdffe2015-05-05 14:56:15 +0530553 if (!dmabuf) {
Chris Wilsona026df42016-07-18 12:16:22 +0100554 ret = -ENOMEM;
555 goto err_module;
Sumit Semwal9abdffe2015-05-05 14:56:15 +0530556 }
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530557
Sumit Semwald8fbe342015-01-23 12:53:43 +0530558 dmabuf->priv = exp_info->priv;
559 dmabuf->ops = exp_info->ops;
560 dmabuf->size = exp_info->size;
561 dmabuf->exp_name = exp_info->exp_name;
Sumit Semwal9abdffe2015-05-05 14:56:15 +0530562 dmabuf->owner = exp_info->owner;
Charan Teja Kalla6348dd22020-06-19 17:27:19 +0530563 spin_lock_init(&dmabuf->name_lock);
Maarten Lankhorst9b495a52014-07-01 12:57:43 +0200564 init_waitqueue_head(&dmabuf->poll);
565 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
566 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
567
Maarten Lankhorst3aac4502014-07-01 12:57:26 +0200568 if (!resv) {
Christian König52791ee2019-08-11 10:06:32 +0200569 resv = (struct dma_resv *)&dmabuf[1];
570 dma_resv_init(resv);
Maarten Lankhorst3aac4502014-07-01 12:57:26 +0200571 }
572 dmabuf->resv = resv;
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530573
Greg Hackmanned63bb12019-06-13 15:34:06 -0700574 file = dma_buf_getfile(dmabuf, exp_info->flags);
Tuomas Tynkkynen9022e242013-08-27 16:30:38 +0300575 if (IS_ERR(file)) {
Chris Wilsona026df42016-07-18 12:16:22 +0100576 ret = PTR_ERR(file);
577 goto err_dmabuf;
Tuomas Tynkkynen9022e242013-08-27 16:30:38 +0300578 }
Christopher James Halse Rogers19e86972013-09-10 11:36:45 +0530579
580 file->f_mode |= FMODE_LSEEK;
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530581 dmabuf->file = file;
582
583 mutex_init(&dmabuf->lock);
584 INIT_LIST_HEAD(&dmabuf->attachments);
585
Sumit Semwalb89e35632013-04-04 11:44:37 +0530586 mutex_lock(&db_list.lock);
587 list_add(&dmabuf->list_node, &db_list.head);
588 mutex_unlock(&db_list.lock);
589
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530590 return dmabuf;
Chris Wilsona026df42016-07-18 12:16:22 +0100591
592err_dmabuf:
593 kfree(dmabuf);
594err_module:
595 module_put(exp_info->owner);
596 return ERR_PTR(ret);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530597}
Sumit Semwald8fbe342015-01-23 12:53:43 +0530598EXPORT_SYMBOL_GPL(dma_buf_export);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530599
600/**
Daniel Vetter85804b72020-12-11 16:58:41 +0100601 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530602 * @dmabuf: [in] pointer to dma_buf for which fd is required.
Dave Airlie55c1c4c2012-03-16 10:34:02 +0000603 * @flags: [in] flags to give to fd
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530604 *
605 * On success, returns an associated 'fd'. Else, returns error.
606 */
Dave Airlie55c1c4c2012-03-16 10:34:02 +0000607int dma_buf_fd(struct dma_buf *dmabuf, int flags)
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530608{
Borislav Petkovf5e097f2012-12-11 16:05:26 +0100609 int fd;
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530610
611 if (!dmabuf || !dmabuf->file)
612 return -EINVAL;
613
Borislav Petkovf5e097f2012-12-11 16:05:26 +0100614 fd = get_unused_fd_flags(flags);
615 if (fd < 0)
616 return fd;
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530617
618 fd_install(fd, dmabuf->file);
619
620 return fd;
621}
622EXPORT_SYMBOL_GPL(dma_buf_fd);
623
624/**
Daniel Vetter85804b72020-12-11 16:58:41 +0100625 * dma_buf_get - returns the struct dma_buf related to an fd
626 * @fd: [in] fd associated with the struct dma_buf to be returned
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530627 *
Daniel Vetter85804b72020-12-11 16:58:41 +0100628 * On success, returns the struct dma_buf associated with an fd; uses
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530629 * file's refcounting done by fget to increase refcount. returns ERR_PTR
630 * otherwise.
631 */
632struct dma_buf *dma_buf_get(int fd)
633{
634 struct file *file;
635
636 file = fget(fd);
637
638 if (!file)
639 return ERR_PTR(-EBADF);
640
641 if (!is_dma_buf_file(file)) {
642 fput(file);
643 return ERR_PTR(-EINVAL);
644 }
645
646 return file->private_data;
647}
648EXPORT_SYMBOL_GPL(dma_buf_get);
649
650/**
651 * dma_buf_put - decreases refcount of the buffer
652 * @dmabuf: [in] buffer to reduce refcount of
653 *
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100654 * Uses file's refcounting done implicitly by fput().
655 *
656 * If, as a result of this call, the refcount becomes 0, the 'release' file
Daniel Vettere9b4d7b2016-12-29 21:48:25 +0100657 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
658 * in turn, and frees the memory allocated for dmabuf when exported.
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530659 */
660void dma_buf_put(struct dma_buf *dmabuf)
661{
662 if (WARN_ON(!dmabuf || !dmabuf->file))
663 return;
664
665 fput(dmabuf->file);
666}
667EXPORT_SYMBOL_GPL(dma_buf_put);
668
Daniel Vetter84335672021-01-15 17:47:39 +0100669static void mangle_sg_table(struct sg_table *sg_table)
670{
671#ifdef CONFIG_DMABUF_DEBUG
672 int i;
673 struct scatterlist *sg;
674
675 /* To catch abuse of the underlying struct page by importers mix
676 * up the bits, but take care to preserve the low SG_ bits to
677 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
678 * before passing the sgt back to the exporter. */
679 for_each_sgtable_sg(sg_table, sg, i)
680 sg->page_link ^= ~0xffUL;
681#endif
682
683}
684static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
685 enum dma_data_direction direction)
686{
687 struct sg_table *sg_table;
688
689 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
690
691 if (!IS_ERR_OR_NULL(sg_table))
692 mangle_sg_table(sg_table);
693
694 return sg_table;
695}
696
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530697/**
Daniel Vetter85804b72020-12-11 16:58:41 +0100698 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
Christian König15fd5522018-07-03 16:42:26 +0200699 * @dmabuf: [in] buffer to attach device to.
700 * @dev: [in] device to be attached.
Randy Dunlap6f49c252020-04-07 21:20:34 -0700701 * @importer_ops: [in] importer operations for the attachment
702 * @importer_priv: [in] importer private pointer for the attachment
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530703 *
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100704 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
705 * must be cleaned up by calling dma_buf_detach().
706 *
Daniel Vetter85804b72020-12-11 16:58:41 +0100707 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
708 * functionality.
709 *
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100710 * Returns:
711 *
712 * A pointer to newly created &dma_buf_attachment on success, or a negative
713 * error code wrapped into a pointer on failure.
714 *
715 * Note that this can fail if the backing storage of @dmabuf is in a place not
716 * accessible to @dev, and cannot be moved to a more suitable place. This is
717 * indicated with the error code -EBUSY.
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530718 */
Christian König15fd5522018-07-03 16:42:26 +0200719struct dma_buf_attachment *
720dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
Christian Königbb42df42018-07-03 16:42:26 +0200721 const struct dma_buf_attach_ops *importer_ops,
722 void *importer_priv)
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530723{
724 struct dma_buf_attachment *attach;
725 int ret;
726
Laurent Pinchartd1aa06a2012-01-26 12:27:23 +0100727 if (WARN_ON(!dmabuf || !dev))
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530728 return ERR_PTR(-EINVAL);
729
Christian König4981cdb2020-02-19 13:32:43 +0100730 if (WARN_ON(importer_ops && !importer_ops->move_notify))
731 return ERR_PTR(-EINVAL);
732
Markus Elfringdb7942b2017-05-08 10:50:09 +0200733 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
Markus Elfring34d84ec2017-05-08 10:54:17 +0200734 if (!attach)
Laurent Pincharta9fbc3b2012-01-26 12:27:24 +0100735 return ERR_PTR(-ENOMEM);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530736
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530737 attach->dev = dev;
738 attach->dmabuf = dmabuf;
Christian König09606b52018-03-22 17:09:42 +0100739 if (importer_ops)
740 attach->peer2peer = importer_ops->allow_peer2peer;
Christian Königbb42df42018-07-03 16:42:26 +0200741 attach->importer_ops = importer_ops;
742 attach->importer_priv = importer_priv;
Laurent Pinchart2ed92012012-01-26 12:27:25 +0100743
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530744 if (dmabuf->ops->attach) {
Christian Königa19741e2018-05-28 11:47:52 +0200745 ret = dmabuf->ops->attach(dmabuf, attach);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530746 if (ret)
747 goto err_attach;
748 }
Christian König15fd5522018-07-03 16:42:26 +0200749 dma_resv_lock(dmabuf->resv, NULL);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530750 list_add(&attach->node, &dmabuf->attachments);
Christian König15fd5522018-07-03 16:42:26 +0200751 dma_resv_unlock(dmabuf->resv);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530752
Christian König15fd5522018-07-03 16:42:26 +0200753 /* When either the importer or the exporter can't handle dynamic
754 * mappings we cache the mapping here to avoid issues with the
755 * reservation object lock.
756 */
757 if (dma_buf_attachment_is_dynamic(attach) !=
758 dma_buf_is_dynamic(dmabuf)) {
759 struct sg_table *sgt;
760
Christian Königbb42df42018-07-03 16:42:26 +0200761 if (dma_buf_is_dynamic(attach->dmabuf)) {
Christian König15fd5522018-07-03 16:42:26 +0200762 dma_resv_lock(attach->dmabuf->resv, NULL);
Christian König7e008b02021-05-17 13:20:17 +0200763 ret = dmabuf->ops->pin(attach);
Christian Königbb42df42018-07-03 16:42:26 +0200764 if (ret)
765 goto err_unlock;
766 }
Christian König15fd5522018-07-03 16:42:26 +0200767
Daniel Vetter84335672021-01-15 17:47:39 +0100768 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
Christian König15fd5522018-07-03 16:42:26 +0200769 if (!sgt)
770 sgt = ERR_PTR(-ENOMEM);
771 if (IS_ERR(sgt)) {
772 ret = PTR_ERR(sgt);
Christian Königbb42df42018-07-03 16:42:26 +0200773 goto err_unpin;
Christian König15fd5522018-07-03 16:42:26 +0200774 }
775 if (dma_buf_is_dynamic(attach->dmabuf))
776 dma_resv_unlock(attach->dmabuf->resv);
777 attach->sgt = sgt;
778 attach->dir = DMA_BIDIRECTIONAL;
779 }
780
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530781 return attach;
782
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530783err_attach:
784 kfree(attach);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530785 return ERR_PTR(ret);
Christian König15fd5522018-07-03 16:42:26 +0200786
Christian Königbb42df42018-07-03 16:42:26 +0200787err_unpin:
788 if (dma_buf_is_dynamic(attach->dmabuf))
Christian König7e008b02021-05-17 13:20:17 +0200789 dmabuf->ops->unpin(attach);
Christian Königbb42df42018-07-03 16:42:26 +0200790
Christian König15fd5522018-07-03 16:42:26 +0200791err_unlock:
792 if (dma_buf_is_dynamic(attach->dmabuf))
793 dma_resv_unlock(attach->dmabuf->resv);
794
795 dma_buf_detach(dmabuf, attach);
796 return ERR_PTR(ret);
797}
798EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
799
800/**
801 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
802 * @dmabuf: [in] buffer to attach device to.
803 * @dev: [in] device to be attached.
804 *
805 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
806 * mapping.
807 */
808struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
809 struct device *dev)
810{
Christian Königbb42df42018-07-03 16:42:26 +0200811 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530812}
813EXPORT_SYMBOL_GPL(dma_buf_attach);
814
Daniel Vetter84335672021-01-15 17:47:39 +0100815static void __unmap_dma_buf(struct dma_buf_attachment *attach,
816 struct sg_table *sg_table,
817 enum dma_data_direction direction)
818{
819 /* uses XOR, hence this unmangles */
820 mangle_sg_table(sg_table);
821
822 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
823}
824
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530825/**
Daniel Vetter85804b72020-12-11 16:58:41 +0100826 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530827 * @dmabuf: [in] buffer to detach from.
828 * @attach: [in] attachment to be detached; is free'd after this call.
829 *
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100830 * Clean up a device attachment obtained by calling dma_buf_attach().
Daniel Vetter85804b72020-12-11 16:58:41 +0100831 *
832 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530833 */
834void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
835{
Laurent Pinchartd1aa06a2012-01-26 12:27:23 +0100836 if (WARN_ON(!dmabuf || !attach))
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530837 return;
838
Christian König15fd5522018-07-03 16:42:26 +0200839 if (attach->sgt) {
840 if (dma_buf_is_dynamic(attach->dmabuf))
841 dma_resv_lock(attach->dmabuf->resv, NULL);
842
Daniel Vetter84335672021-01-15 17:47:39 +0100843 __unmap_dma_buf(attach, attach->sgt, attach->dir);
Christian Königf13e1432018-07-03 16:42:26 +0200844
Christian Königbb42df42018-07-03 16:42:26 +0200845 if (dma_buf_is_dynamic(attach->dmabuf)) {
Christian König7e008b02021-05-17 13:20:17 +0200846 dmabuf->ops->unpin(attach);
Christian König15fd5522018-07-03 16:42:26 +0200847 dma_resv_unlock(attach->dmabuf->resv);
Christian Königbb42df42018-07-03 16:42:26 +0200848 }
Christian König15fd5522018-07-03 16:42:26 +0200849 }
850
Christian König15fd5522018-07-03 16:42:26 +0200851 dma_resv_lock(dmabuf->resv, NULL);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530852 list_del(&attach->node);
Christian König15fd5522018-07-03 16:42:26 +0200853 dma_resv_unlock(dmabuf->resv);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530854 if (dmabuf->ops->detach)
855 dmabuf->ops->detach(dmabuf, attach);
856
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530857 kfree(attach);
858}
859EXPORT_SYMBOL_GPL(dma_buf_detach);
860
861/**
Christian Königbb42df42018-07-03 16:42:26 +0200862 * dma_buf_pin - Lock down the DMA-buf
Christian Königbb42df42018-07-03 16:42:26 +0200863 * @attach: [in] attachment which should be pinned
864 *
Daniel Vetterc5457812020-12-11 16:58:43 +0100865 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
866 * call this, and only for limited use cases like scanout and not for temporary
867 * pin operations. It is not permitted to allow userspace to pin arbitrary
868 * amounts of buffers through this interface.
869 *
870 * Buffers must be unpinned by calling dma_buf_unpin().
871 *
Christian Königbb42df42018-07-03 16:42:26 +0200872 * Returns:
873 * 0 on success, negative error code on failure.
874 */
875int dma_buf_pin(struct dma_buf_attachment *attach)
876{
877 struct dma_buf *dmabuf = attach->dmabuf;
878 int ret = 0;
879
Daniel Vetterc5457812020-12-11 16:58:43 +0100880 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
881
Christian Königbb42df42018-07-03 16:42:26 +0200882 dma_resv_assert_held(dmabuf->resv);
883
884 if (dmabuf->ops->pin)
885 ret = dmabuf->ops->pin(attach);
886
887 return ret;
888}
889EXPORT_SYMBOL_GPL(dma_buf_pin);
890
891/**
Daniel Vetterc5457812020-12-11 16:58:43 +0100892 * dma_buf_unpin - Unpin a DMA-buf
Christian Königbb42df42018-07-03 16:42:26 +0200893 * @attach: [in] attachment which should be unpinned
Daniel Vetterc5457812020-12-11 16:58:43 +0100894 *
895 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
896 * any mapping of @attach again and inform the importer through
897 * &dma_buf_attach_ops.move_notify.
Christian Königbb42df42018-07-03 16:42:26 +0200898 */
899void dma_buf_unpin(struct dma_buf_attachment *attach)
900{
901 struct dma_buf *dmabuf = attach->dmabuf;
902
Daniel Vetterc5457812020-12-11 16:58:43 +0100903 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
904
Christian Königbb42df42018-07-03 16:42:26 +0200905 dma_resv_assert_held(dmabuf->resv);
906
907 if (dmabuf->ops->unpin)
908 dmabuf->ops->unpin(attach);
909}
910EXPORT_SYMBOL_GPL(dma_buf_unpin);
911
912/**
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530913 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
914 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
915 * dma_buf_ops.
916 * @attach: [in] attachment whose scatterlist is to be returned
917 * @direction: [in] direction of DMA transfer
918 *
Colin Crossfee0c542013-12-20 16:43:50 -0800919 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100920 * on error. May return -EINTR if it is interrupted by a signal.
921 *
Jianxin Xiongac80cd12020-10-14 09:16:01 -0700922 * On success, the DMA addresses and lengths in the returned scatterlist are
923 * PAGE_SIZE aligned.
924 *
Liviu Dudauc1387822017-11-01 14:06:30 +0000925 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
Daniel Vetter2904a8c2016-12-09 19:53:07 +0100926 * the underlying backing storage is pinned for as long as a mapping exists,
927 * therefore users/importers should not hold onto a mapping for undue amounts of
928 * time.
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530929 */
930struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
931 enum dma_data_direction direction)
932{
Colin Ian King531beb02017-09-15 00:05:16 +0100933 struct sg_table *sg_table;
Christian Königbb42df42018-07-03 16:42:26 +0200934 int r;
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530935
936 might_sleep();
937
Laurent Pinchartd1aa06a2012-01-26 12:27:23 +0100938 if (WARN_ON(!attach || !attach->dmabuf))
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530939 return ERR_PTR(-EINVAL);
940
Christian König15fd5522018-07-03 16:42:26 +0200941 if (dma_buf_attachment_is_dynamic(attach))
942 dma_resv_assert_held(attach->dmabuf->resv);
943
Christian Königf13e1432018-07-03 16:42:26 +0200944 if (attach->sgt) {
945 /*
946 * Two mappings with different directions for the same
947 * attachment are not allowed.
948 */
949 if (attach->dir != direction &&
950 attach->dir != DMA_BIDIRECTIONAL)
951 return ERR_PTR(-EBUSY);
952
953 return attach->sgt;
954 }
955
Christian Königbb42df42018-07-03 16:42:26 +0200956 if (dma_buf_is_dynamic(attach->dmabuf)) {
Christian König15fd5522018-07-03 16:42:26 +0200957 dma_resv_assert_held(attach->dmabuf->resv);
Christian König4981cdb2020-02-19 13:32:43 +0100958 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
Christian König7e008b02021-05-17 13:20:17 +0200959 r = attach->dmabuf->ops->pin(attach);
Christian Königbb42df42018-07-03 16:42:26 +0200960 if (r)
961 return ERR_PTR(r);
962 }
963 }
Christian König15fd5522018-07-03 16:42:26 +0200964
Daniel Vetter84335672021-01-15 17:47:39 +0100965 sg_table = __map_dma_buf(attach, direction);
Colin Crossfee0c542013-12-20 16:43:50 -0800966 if (!sg_table)
967 sg_table = ERR_PTR(-ENOMEM);
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530968
Christian Königbb42df42018-07-03 16:42:26 +0200969 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
Christian König4981cdb2020-02-19 13:32:43 +0100970 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
Christian König7e008b02021-05-17 13:20:17 +0200971 attach->dmabuf->ops->unpin(attach);
Christian Königbb42df42018-07-03 16:42:26 +0200972
Christian Königf13e1432018-07-03 16:42:26 +0200973 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
974 attach->sgt = sg_table;
975 attach->dir = direction;
976 }
977
Jianxin Xiongac80cd12020-10-14 09:16:01 -0700978#ifdef CONFIG_DMA_API_DEBUG
Jianxin Xiong00efd652020-11-02 19:51:58 -0800979 if (!IS_ERR(sg_table)) {
Jianxin Xiongac80cd12020-10-14 09:16:01 -0700980 struct scatterlist *sg;
981 u64 addr;
982 int len;
983 int i;
984
985 for_each_sgtable_dma_sg(sg_table, sg, i) {
986 addr = sg_dma_address(sg);
987 len = sg_dma_len(sg);
988 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
989 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
990 __func__, addr, len);
991 }
992 }
993 }
994#endif /* CONFIG_DMA_API_DEBUG */
995
Sumit Semwald15bd7e2011-12-26 14:53:15 +0530996 return sg_table;
997}
998EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
999
1000/**
1001 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1002 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1003 * dma_buf_ops.
1004 * @attach: [in] attachment to unmap buffer from
1005 * @sg_table: [in] scatterlist info of the buffer to unmap
Sumit Semwal33ea2dc2012-01-27 15:09:27 +05301006 * @direction: [in] direction of DMA transfer
Sumit Semwald15bd7e2011-12-26 14:53:15 +05301007 *
Daniel Vetter2904a8c2016-12-09 19:53:07 +01001008 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
Sumit Semwald15bd7e2011-12-26 14:53:15 +05301009 */
1010void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
Sumit Semwal33ea2dc2012-01-27 15:09:27 +05301011 struct sg_table *sg_table,
1012 enum dma_data_direction direction)
Sumit Semwald15bd7e2011-12-26 14:53:15 +05301013{
Rob Clarkb6fa0cd2012-09-28 09:29:43 +02001014 might_sleep();
1015
Laurent Pinchartd1aa06a2012-01-26 12:27:23 +01001016 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
Sumit Semwald15bd7e2011-12-26 14:53:15 +05301017 return;
1018
Christian König15fd5522018-07-03 16:42:26 +02001019 if (dma_buf_attachment_is_dynamic(attach))
1020 dma_resv_assert_held(attach->dmabuf->resv);
1021
Christian Königf13e1432018-07-03 16:42:26 +02001022 if (attach->sgt == sg_table)
1023 return;
1024
Christian König15fd5522018-07-03 16:42:26 +02001025 if (dma_buf_is_dynamic(attach->dmabuf))
1026 dma_resv_assert_held(attach->dmabuf->resv);
1027
Daniel Vetter84335672021-01-15 17:47:39 +01001028 __unmap_dma_buf(attach, sg_table, direction);
Christian Königbb42df42018-07-03 16:42:26 +02001029
1030 if (dma_buf_is_dynamic(attach->dmabuf) &&
Christian König4981cdb2020-02-19 13:32:43 +01001031 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
Christian Königbb42df42018-07-03 16:42:26 +02001032 dma_buf_unpin(attach);
Sumit Semwald15bd7e2011-12-26 14:53:15 +05301033}
1034EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
Daniel Vetterfc130202012-03-20 00:02:37 +01001035
Daniel Vetter0959a162016-12-09 19:53:08 +01001036/**
Christian Königbb42df42018-07-03 16:42:26 +02001037 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1038 *
1039 * @dmabuf: [in] buffer which is moving
1040 *
1041 * Informs all attachmenst that they need to destroy and recreated all their
1042 * mappings.
1043 */
1044void dma_buf_move_notify(struct dma_buf *dmabuf)
1045{
1046 struct dma_buf_attachment *attach;
1047
1048 dma_resv_assert_held(dmabuf->resv);
1049
1050 list_for_each_entry(attach, &dmabuf->attachments, node)
Christian König4981cdb2020-02-19 13:32:43 +01001051 if (attach->importer_ops)
Christian Königbb42df42018-07-03 16:42:26 +02001052 attach->importer_ops->move_notify(attach);
1053}
1054EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1055
1056/**
Daniel Vetter0959a162016-12-09 19:53:08 +01001057 * DOC: cpu access
1058 *
1059 * There are mutliple reasons for supporting CPU access to a dma buffer object:
1060 *
1061 * - Fallback operations in the kernel, for example when a device is connected
1062 * over USB and the kernel needs to shuffle the data around first before
1063 * sending it away. Cache coherency is handled by braketing any transactions
1064 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1065 * access.
1066 *
Daniel Vetter7f0de8d2019-11-18 11:35:30 +01001067 * Since for most kernel internal dma-buf accesses need the entire buffer, a
1068 * vmap interface is introduced. Note that on very old 32-bit architectures
1069 * vmalloc space might be limited and result in vmap calls failing.
Daniel Vetter0959a162016-12-09 19:53:08 +01001070 *
1071 * Interfaces::
Daniel Vetterde9114e2020-12-11 16:58:40 +01001072 *
Daniel Vetter0959a162016-12-09 19:53:08 +01001073 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1074 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1075 *
1076 * The vmap call can fail if there is no vmap support in the exporter, or if
Daniel Vetterde9114e2020-12-11 16:58:40 +01001077 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1078 * count for all vmap access and calls down into the exporter's vmap function
1079 * only when no vmapping exists, and only unmaps it once. Protection against
1080 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
Daniel Vetter0959a162016-12-09 19:53:08 +01001081 *
1082 * - For full compatibility on the importer side with existing userspace
1083 * interfaces, which might already support mmap'ing buffers. This is needed in
1084 * many processing pipelines (e.g. feeding a software rendered image into a
1085 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1086 * framework already supported this and for DMA buffer file descriptors to
1087 * replace ION buffers mmap support was needed.
1088 *
1089 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1090 * fd. But like for CPU access there's a need to braket the actual access,
1091 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1092 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1093 * be restarted.
1094 *
1095 * Some systems might need some sort of cache coherency management e.g. when
1096 * CPU and GPU domains are being accessed through dma-buf at the same time.
1097 * To circumvent this problem there are begin/end coherency markers, that
1098 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1099 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1100 * sequence would be used like following:
1101 *
1102 * - mmap dma-buf fd
1103 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1104 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1105 * want (with the new data being consumed by say the GPU or the scanout
1106 * device)
1107 * - munmap once you don't need the buffer any more
1108 *
1109 * For correctness and optimal performance, it is always required to use
1110 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1111 * mapped address. Userspace cannot rely on coherent access, even when there
1112 * are systems where it just works without calling these ioctls.
1113 *
1114 * - And as a CPU fallback in userspace processing pipelines.
1115 *
1116 * Similar to the motivation for kernel cpu access it is again important that
1117 * the userspace code of a given importing subsystem can use the same
1118 * interfaces with a imported dma-buf buffer object as with a native buffer
1119 * object. This is especially important for drm where the userspace part of
1120 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1121 * use a different way to mmap a buffer rather invasive.
1122 *
1123 * The assumption in the current dma-buf interfaces is that redirecting the
1124 * initial mmap is all that's needed. A survey of some of the existing
1125 * subsystems shows that no driver seems to do any nefarious thing like
1126 * syncing up with outstanding asynchronous processing on the device or
1127 * allocating special resources at fault time. So hopefully this is good
1128 * enough, since adding interfaces to intercept pagefaults and allow pte
1129 * shootdowns would increase the complexity quite a bit.
1130 *
1131 * Interface::
Daniel Vetter85804b72020-12-11 16:58:41 +01001132 *
Daniel Vetter0959a162016-12-09 19:53:08 +01001133 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1134 * unsigned long);
1135 *
1136 * If the importing subsystem simply provides a special-purpose mmap call to
Daniel Vetter85804b72020-12-11 16:58:41 +01001137 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
Daniel Vetter0959a162016-12-09 19:53:08 +01001138 * equally achieve that for a dma-buf object.
1139 */
1140
Chris Wilsonae4e46b2016-08-15 16:42:18 +01001141static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1142 enum dma_data_direction direction)
1143{
1144 bool write = (direction == DMA_BIDIRECTIONAL ||
1145 direction == DMA_TO_DEVICE);
Christian König52791ee2019-08-11 10:06:32 +02001146 struct dma_resv *resv = dmabuf->resv;
Chris Wilsonae4e46b2016-08-15 16:42:18 +01001147 long ret;
1148
1149 /* Wait on any implicit rendering fences */
Christian König52791ee2019-08-11 10:06:32 +02001150 ret = dma_resv_wait_timeout_rcu(resv, write, true,
Chris Wilsonae4e46b2016-08-15 16:42:18 +01001151 MAX_SCHEDULE_TIMEOUT);
1152 if (ret < 0)
1153 return ret;
1154
1155 return 0;
1156}
Daniel Vetterfc130202012-03-20 00:02:37 +01001157
1158/**
1159 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1160 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1161 * preparations. Coherency is only guaranteed in the specified range for the
1162 * specified access direction.
Randy Dunlapefb4df822012-04-17 17:03:30 -07001163 * @dmabuf: [in] buffer to prepare cpu access for.
Daniel Vetterfc130202012-03-20 00:02:37 +01001164 * @direction: [in] length of range for cpu access.
1165 *
Daniel Vetter0959a162016-12-09 19:53:08 +01001166 * After the cpu access is complete the caller should call
1167 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1168 * it guaranteed to be coherent with other DMA access.
1169 *
Daniel Vetterde9114e2020-12-11 16:58:40 +01001170 * This function will also wait for any DMA transactions tracked through
1171 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1172 * synchronization this function will only ensure cache coherency, callers must
1173 * ensure synchronization with such DMA transactions on their own.
1174 *
Daniel Vetterfc130202012-03-20 00:02:37 +01001175 * Can return negative error values, returns 0 on success.
1176 */
Tiago Vignatti831e9da2015-12-22 19:36:45 -02001177int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Daniel Vetterfc130202012-03-20 00:02:37 +01001178 enum dma_data_direction direction)
1179{
1180 int ret = 0;
1181
1182 if (WARN_ON(!dmabuf))
1183 return -EINVAL;
1184
Daniel Vetter8ccf0a22020-12-14 18:16:22 +01001185 might_lock(&dmabuf->resv->lock.base);
1186
Daniel Vetterfc130202012-03-20 00:02:37 +01001187 if (dmabuf->ops->begin_cpu_access)
Tiago Vignatti831e9da2015-12-22 19:36:45 -02001188 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
Daniel Vetterfc130202012-03-20 00:02:37 +01001189
Chris Wilsonae4e46b2016-08-15 16:42:18 +01001190 /* Ensure that all fences are waited upon - but we first allow
1191 * the native handler the chance to do so more efficiently if it
1192 * chooses. A double invocation here will be reasonably cheap no-op.
1193 */
1194 if (ret == 0)
1195 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1196
Daniel Vetterfc130202012-03-20 00:02:37 +01001197 return ret;
1198}
1199EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1200
1201/**
1202 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1203 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1204 * actions. Coherency is only guaranteed in the specified range for the
1205 * specified access direction.
Randy Dunlapefb4df822012-04-17 17:03:30 -07001206 * @dmabuf: [in] buffer to complete cpu access for.
Daniel Vetterfc130202012-03-20 00:02:37 +01001207 * @direction: [in] length of range for cpu access.
1208 *
Daniel Vetter0959a162016-12-09 19:53:08 +01001209 * This terminates CPU access started with dma_buf_begin_cpu_access().
1210 *
Daniel Vetter87e332d2016-03-21 08:24:22 +01001211 * Can return negative error values, returns 0 on success.
Daniel Vetterfc130202012-03-20 00:02:37 +01001212 */
Chris Wilson18b862d2016-03-18 20:02:39 +00001213int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1214 enum dma_data_direction direction)
Daniel Vetterfc130202012-03-20 00:02:37 +01001215{
Chris Wilson18b862d2016-03-18 20:02:39 +00001216 int ret = 0;
1217
Daniel Vetterfc130202012-03-20 00:02:37 +01001218 WARN_ON(!dmabuf);
1219
Daniel Vetter8ccf0a22020-12-14 18:16:22 +01001220 might_lock(&dmabuf->resv->lock.base);
1221
Daniel Vetterfc130202012-03-20 00:02:37 +01001222 if (dmabuf->ops->end_cpu_access)
Chris Wilson18b862d2016-03-18 20:02:39 +00001223 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1224
1225 return ret;
Daniel Vetterfc130202012-03-20 00:02:37 +01001226}
1227EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1228
Daniel Vetter4c785132012-04-24 14:38:52 +05301229
1230/**
1231 * dma_buf_mmap - Setup up a userspace mmap with the given vma
Sumit Semwal12c47272012-05-23 15:27:40 +05301232 * @dmabuf: [in] buffer that should back the vma
Daniel Vetter4c785132012-04-24 14:38:52 +05301233 * @vma: [in] vma for the mmap
1234 * @pgoff: [in] offset in pages where this mmap should start within the
Jagan Teki51366292015-05-21 01:09:31 +05301235 * dma-buf buffer.
Daniel Vetter4c785132012-04-24 14:38:52 +05301236 *
1237 * This function adjusts the passed in vma so that it points at the file of the
Javier Martinez Canillasecf1dba2014-04-10 01:30:05 +02001238 * dma_buf operation. It also adjusts the starting pgoff and does bounds
Daniel Vetter4c785132012-04-24 14:38:52 +05301239 * checking on the size of the vma. Then it calls the exporters mmap function to
1240 * set up the mapping.
1241 *
1242 * Can return negative error values, returns 0 on success.
1243 */
1244int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1245 unsigned long pgoff)
1246{
1247 if (WARN_ON(!dmabuf || !vma))
1248 return -EINVAL;
1249
Andrew F. Davise3a9d6c2019-03-29 11:52:01 -05001250 /* check if buffer supports mmap */
1251 if (!dmabuf->ops->mmap)
1252 return -EINVAL;
1253
Daniel Vetter4c785132012-04-24 14:38:52 +05301254 /* check for offset overflow */
Muhammad Falak R Wanib02da6f2016-05-23 17:08:42 +05301255 if (pgoff + vma_pages(vma) < pgoff)
Daniel Vetter4c785132012-04-24 14:38:52 +05301256 return -EOVERFLOW;
1257
1258 /* check for overflowing the buffer's size */
Muhammad Falak R Wanib02da6f2016-05-23 17:08:42 +05301259 if (pgoff + vma_pages(vma) >
Daniel Vetter4c785132012-04-24 14:38:52 +05301260 dmabuf->size >> PAGE_SHIFT)
1261 return -EINVAL;
1262
1263 /* readjust the vma */
Christian König295992f2020-09-14 15:09:33 +02001264 vma_set_file(vma, dmabuf->file);
Daniel Vetter4c785132012-04-24 14:38:52 +05301265 vma->vm_pgoff = pgoff;
1266
Christian König1527f922020-10-09 15:08:55 +02001267 return dmabuf->ops->mmap(dmabuf, vma);
Daniel Vetter4c785132012-04-24 14:38:52 +05301268}
1269EXPORT_SYMBOL_GPL(dma_buf_mmap);
Dave Airlie98f86c92012-05-20 12:33:56 +05301270
1271/**
Sumit Semwal12c47272012-05-23 15:27:40 +05301272 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1273 * address space. Same restrictions as for vmap and friends apply.
1274 * @dmabuf: [in] buffer to vmap
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001275 * @map: [out] returns the vmap pointer
Dave Airlie98f86c92012-05-20 12:33:56 +05301276 *
1277 * This call may fail due to lack of virtual mapping address space.
1278 * These calls are optional in drivers. The intended use for them
1279 * is for mapping objects linear in kernel space for high use objects.
Daniel Vetterde9114e2020-12-11 16:58:40 +01001280 *
1281 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1282 * dma_buf_end_cpu_access() around any cpu access performed through this
1283 * mapping.
Colin Crossfee0c542013-12-20 16:43:50 -08001284 *
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001285 * Returns 0 on success, or a negative errno code otherwise.
Dave Airlie98f86c92012-05-20 12:33:56 +05301286 */
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001287int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
Dave Airlie98f86c92012-05-20 12:33:56 +05301288{
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001289 struct dma_buf_map ptr;
1290 int ret = 0;
1291
1292 dma_buf_map_clear(map);
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001293
Dave Airlie98f86c92012-05-20 12:33:56 +05301294 if (WARN_ON(!dmabuf))
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001295 return -EINVAL;
Dave Airlie98f86c92012-05-20 12:33:56 +05301296
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001297 if (!dmabuf->ops->vmap)
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001298 return -EINVAL;
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001299
1300 mutex_lock(&dmabuf->lock);
1301 if (dmabuf->vmapping_counter) {
1302 dmabuf->vmapping_counter++;
Thomas Zimmermann01fd30d2020-09-25 13:55:58 +02001303 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001304 *map = dmabuf->vmap_ptr;
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001305 goto out_unlock;
1306 }
1307
Thomas Zimmermann01fd30d2020-09-25 13:55:58 +02001308 BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001309
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001310 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1311 if (WARN_ON_ONCE(ret))
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001312 goto out_unlock;
1313
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001314 dmabuf->vmap_ptr = ptr;
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001315 dmabuf->vmapping_counter = 1;
1316
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001317 *map = dmabuf->vmap_ptr;
1318
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001319out_unlock:
1320 mutex_unlock(&dmabuf->lock);
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +02001321 return ret;
Dave Airlie98f86c92012-05-20 12:33:56 +05301322}
1323EXPORT_SYMBOL_GPL(dma_buf_vmap);
1324
1325/**
1326 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
Sumit Semwal12c47272012-05-23 15:27:40 +05301327 * @dmabuf: [in] buffer to vunmap
Thomas Zimmermann20e76f12020-09-25 13:56:00 +02001328 * @map: [in] vmap pointer to vunmap
Dave Airlie98f86c92012-05-20 12:33:56 +05301329 */
Thomas Zimmermann20e76f12020-09-25 13:56:00 +02001330void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
Dave Airlie98f86c92012-05-20 12:33:56 +05301331{
1332 if (WARN_ON(!dmabuf))
1333 return;
1334
Thomas Zimmermann01fd30d2020-09-25 13:55:58 +02001335 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001336 BUG_ON(dmabuf->vmapping_counter == 0);
Thomas Zimmermann20e76f12020-09-25 13:56:00 +02001337 BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001338
1339 mutex_lock(&dmabuf->lock);
1340 if (--dmabuf->vmapping_counter == 0) {
1341 if (dmabuf->ops->vunmap)
Thomas Zimmermann20e76f12020-09-25 13:56:00 +02001342 dmabuf->ops->vunmap(dmabuf, map);
Thomas Zimmermann01fd30d2020-09-25 13:55:58 +02001343 dma_buf_map_clear(&dmabuf->vmap_ptr);
Daniel Vetterf00b4da2012-12-20 14:14:23 +01001344 }
1345 mutex_unlock(&dmabuf->lock);
Dave Airlie98f86c92012-05-20 12:33:56 +05301346}
1347EXPORT_SYMBOL_GPL(dma_buf_vunmap);
Sumit Semwalb89e35632013-04-04 11:44:37 +05301348
1349#ifdef CONFIG_DEBUG_FS
Mathias Krauseeb0b9472016-06-19 14:31:29 +02001350static int dma_buf_debug_show(struct seq_file *s, void *unused)
Sumit Semwalb89e35632013-04-04 11:44:37 +05301351{
1352 int ret;
1353 struct dma_buf *buf_obj;
1354 struct dma_buf_attachment *attach_obj;
Christian König52791ee2019-08-11 10:06:32 +02001355 struct dma_resv *robj;
1356 struct dma_resv_list *fobj;
Russell King5eb2c722017-03-31 11:00:42 +01001357 struct dma_fence *fence;
Chris Wilsonb016cd62019-08-14 19:24:01 +01001358 unsigned seq;
Russell King5eb2c722017-03-31 11:00:42 +01001359 int count = 0, attach_count, shared_count, i;
Sumit Semwalb89e35632013-04-04 11:44:37 +05301360 size_t size = 0;
1361
1362 ret = mutex_lock_interruptible(&db_list.lock);
1363
1364 if (ret)
1365 return ret;
1366
Sumit Semwalc0b00a52014-02-03 15:09:12 +05301367 seq_puts(s, "\nDma-buf Objects:\n");
Greg Hackmanned63bb12019-06-13 15:34:06 -07001368 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1369 "size", "flags", "mode", "count", "ino");
Sumit Semwalb89e35632013-04-04 11:44:37 +05301370
1371 list_for_each_entry(buf_obj, &db_list.head, list_node) {
Sumit Semwalb89e35632013-04-04 11:44:37 +05301372
Christian König15fd5522018-07-03 16:42:26 +02001373 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1374 if (ret)
Christian Königf45f57c2019-10-18 16:30:19 +02001375 goto error_unlock;
Sumit Semwalb89e35632013-04-04 11:44:37 +05301376
Greg Hackmannbb2bb902019-06-13 15:34:07 -07001377 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
Sumit Semwalc0b00a52014-02-03 15:09:12 +05301378 buf_obj->size,
Sumit Semwalb89e35632013-04-04 11:44:37 +05301379 buf_obj->file->f_flags, buf_obj->file->f_mode,
Al Viroa1f6dba2014-08-20 11:05:50 -04001380 file_count(buf_obj->file),
Greg Hackmanned63bb12019-06-13 15:34:06 -07001381 buf_obj->exp_name,
Greg Hackmannbb2bb902019-06-13 15:34:07 -07001382 file_inode(buf_obj->file)->i_ino,
1383 buf_obj->name ?: "");
Sumit Semwalb89e35632013-04-04 11:44:37 +05301384
Russell King5eb2c722017-03-31 11:00:42 +01001385 robj = buf_obj->resv;
Chris Wilsonb016cd62019-08-14 19:24:01 +01001386 while (true) {
1387 seq = read_seqcount_begin(&robj->seq);
1388 rcu_read_lock();
1389 fobj = rcu_dereference(robj->fence);
1390 shared_count = fobj ? fobj->shared_count : 0;
1391 fence = rcu_dereference(robj->fence_excl);
1392 if (!read_seqcount_retry(&robj->seq, seq))
1393 break;
1394 rcu_read_unlock();
1395 }
Russell King5eb2c722017-03-31 11:00:42 +01001396
1397 if (fence)
1398 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1399 fence->ops->get_driver_name(fence),
1400 fence->ops->get_timeline_name(fence),
1401 dma_fence_is_signaled(fence) ? "" : "un");
1402 for (i = 0; i < shared_count; i++) {
1403 fence = rcu_dereference(fobj->shared[i]);
1404 if (!dma_fence_get_rcu(fence))
1405 continue;
1406 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1407 fence->ops->get_driver_name(fence),
1408 fence->ops->get_timeline_name(fence),
1409 dma_fence_is_signaled(fence) ? "" : "un");
Jérôme Glisse5e383a92018-12-06 11:18:40 -05001410 dma_fence_put(fence);
Russell King5eb2c722017-03-31 11:00:42 +01001411 }
1412 rcu_read_unlock();
1413
Sumit Semwalc0b00a52014-02-03 15:09:12 +05301414 seq_puts(s, "\tAttached Devices:\n");
Sumit Semwalb89e35632013-04-04 11:44:37 +05301415 attach_count = 0;
1416
1417 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
Markus Elfring9eddb412017-05-08 10:32:44 +02001418 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
Sumit Semwalb89e35632013-04-04 11:44:37 +05301419 attach_count++;
1420 }
Christian König15fd5522018-07-03 16:42:26 +02001421 dma_resv_unlock(buf_obj->resv);
Sumit Semwalb89e35632013-04-04 11:44:37 +05301422
Sumit Semwalc0b00a52014-02-03 15:09:12 +05301423 seq_printf(s, "Total %d devices attached\n\n",
Sumit Semwalb89e35632013-04-04 11:44:37 +05301424 attach_count);
1425
1426 count++;
1427 size += buf_obj->size;
Sumit Semwalb89e35632013-04-04 11:44:37 +05301428 }
1429
1430 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1431
1432 mutex_unlock(&db_list.lock);
1433 return 0;
Christian König15fd5522018-07-03 16:42:26 +02001434
Christian Königf45f57c2019-10-18 16:30:19 +02001435error_unlock:
Christian König15fd5522018-07-03 16:42:26 +02001436 mutex_unlock(&db_list.lock);
1437 return ret;
Sumit Semwalb89e35632013-04-04 11:44:37 +05301438}
1439
Yangtao Li26743052018-11-30 11:11:01 -05001440DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
Sumit Semwalb89e35632013-04-04 11:44:37 +05301441
1442static struct dentry *dma_buf_debugfs_dir;
1443
1444static int dma_buf_init_debugfs(void)
1445{
Mathias Krausebd3e2202016-06-19 14:31:31 +02001446 struct dentry *d;
Sumit Semwalb89e35632013-04-04 11:44:37 +05301447 int err = 0;
Jagan Teki51366292015-05-21 01:09:31 +05301448
Mathias Krausebd3e2202016-06-19 14:31:31 +02001449 d = debugfs_create_dir("dma_buf", NULL);
1450 if (IS_ERR(d))
1451 return PTR_ERR(d);
Jagan Teki51366292015-05-21 01:09:31 +05301452
Mathias Krausebd3e2202016-06-19 14:31:31 +02001453 dma_buf_debugfs_dir = d;
Sumit Semwalb89e35632013-04-04 11:44:37 +05301454
Mathias Krausebd3e2202016-06-19 14:31:31 +02001455 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1456 NULL, &dma_buf_debug_fops);
1457 if (IS_ERR(d)) {
Sumit Semwalb89e35632013-04-04 11:44:37 +05301458 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
Mathias Krauseb7479992016-06-19 14:31:30 +02001459 debugfs_remove_recursive(dma_buf_debugfs_dir);
1460 dma_buf_debugfs_dir = NULL;
Mathias Krausebd3e2202016-06-19 14:31:31 +02001461 err = PTR_ERR(d);
Mathias Krauseb7479992016-06-19 14:31:30 +02001462 }
Sumit Semwalb89e35632013-04-04 11:44:37 +05301463
1464 return err;
1465}
1466
1467static void dma_buf_uninit_debugfs(void)
1468{
Vasyl Gomonovych298b6a82017-11-22 16:22:41 +01001469 debugfs_remove_recursive(dma_buf_debugfs_dir);
Sumit Semwalb89e35632013-04-04 11:44:37 +05301470}
Sumit Semwalb89e35632013-04-04 11:44:37 +05301471#else
1472static inline int dma_buf_init_debugfs(void)
1473{
1474 return 0;
1475}
1476static inline void dma_buf_uninit_debugfs(void)
1477{
1478}
1479#endif
1480
1481static int __init dma_buf_init(void)
1482{
Greg Hackmanned63bb12019-06-13 15:34:06 -07001483 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1484 if (IS_ERR(dma_buf_mnt))
1485 return PTR_ERR(dma_buf_mnt);
1486
Sumit Semwalb89e35632013-04-04 11:44:37 +05301487 mutex_init(&db_list.lock);
1488 INIT_LIST_HEAD(&db_list.head);
1489 dma_buf_init_debugfs();
1490 return 0;
1491}
1492subsys_initcall(dma_buf_init);
1493
1494static void __exit dma_buf_deinit(void)
1495{
1496 dma_buf_uninit_debugfs();
Greg Hackmanned63bb12019-06-13 15:34:06 -07001497 kern_unmount(dma_buf_mnt);
Sumit Semwalb89e35632013-04-04 11:44:37 +05301498}
1499__exitcall(dma_buf_deinit);