blob: 11428287bdf3298a194134c63fde128189934d39 [file] [log] [blame]
Pawel Osciak1a758d42010-10-11 10:59:36 -03001/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciak1a758d42010-10-11 10:59:36 -03007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
Sumit Semwal8c417d02012-06-14 10:37:45 -030013#include <linux/dma-buf.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030014#include <linux/module.h>
Elena Reshetova6c4bb652017-03-06 11:21:00 -030015#include <linux/refcount.h>
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030016#include <linux/scatterlist.h>
17#include <linux/sched.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030018#include <linux/slab.h>
19#include <linux/dma-mapping.h>
20
Junghak Sungc1399902015-09-22 10:30:29 -030021#include <media/videobuf2-v4l2.h>
H Hartley Sweetend0df3c32012-04-24 19:08:12 -030022#include <media/videobuf2-dma-contig.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030023#include <media/videobuf2-memops.h>
24
Pawel Osciak1a758d42010-10-11 10:59:36 -030025struct vb2_dc_buf {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -030026 struct device *dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -030027 void *vaddr;
Pawel Osciak1a758d42010-10-11 10:59:36 -030028 unsigned long size;
Tomasz Figaccc66e72016-02-01 22:34:42 +010029 void *cookie;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030030 dma_addr_t dma_addr;
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070031 unsigned long attrs;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030032 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
Jan Karafb639eb2015-07-13 11:55:49 -030034 struct frame_vector *vec;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030035
36 /* MMAP related */
Pawel Osciak1a758d42010-10-11 10:59:36 -030037 struct vb2_vmarea_handler handler;
Elena Reshetova6c4bb652017-03-06 11:21:00 -030038 refcount_t refcount;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -030039 struct sg_table *sgt_base;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030040
Sumit Semwal8c417d02012-06-14 10:37:45 -030041 /* DMABUF related */
42 struct dma_buf_attachment *db_attach;
Pawel Osciak1a758d42010-10-11 10:59:36 -030043};
44
Sergey Senozhatskyd5adf1b2020-05-14 18:01:49 +020045static inline bool vb2_dc_buffer_consistent(unsigned long attr)
46{
47 return !(attr & DMA_ATTR_NON_CONSISTENT);
48}
49
Laurent Pinchart40d8b762012-06-14 10:37:41 -030050/*********************************************/
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030051/* scatterlist table functions */
52/*********************************************/
53
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030054static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
55{
56 struct scatterlist *s;
57 dma_addr_t expected = sg_dma_address(sgt->sgl);
58 unsigned int i;
59 unsigned long size = 0;
60
61 for_each_sg(sgt->sgl, s, sgt->nents, i) {
62 if (sg_dma_address(s) != expected)
63 break;
64 expected = sg_dma_address(s) + sg_dma_len(s);
65 size += sg_dma_len(s);
66 }
67 return size;
68}
69
70/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -030071/* callbacks for all buffers */
72/*********************************************/
73
74static void *vb2_dc_cookie(void *buf_priv)
75{
76 struct vb2_dc_buf *buf = buf_priv;
77
78 return &buf->dma_addr;
79}
80
81static void *vb2_dc_vaddr(void *buf_priv)
82{
83 struct vb2_dc_buf *buf = buf_priv;
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +020084 struct dma_buf_map map;
85 int ret;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030086
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +020087 if (!buf->vaddr && buf->db_attach) {
88 ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
89 buf->vaddr = ret ? NULL : map.vaddr;
90 }
Philipp Zabel6bbd4fe2014-05-26 11:17:32 -030091
Laurent Pinchart40d8b762012-06-14 10:37:41 -030092 return buf->vaddr;
93}
94
95static unsigned int vb2_dc_num_users(void *buf_priv)
96{
97 struct vb2_dc_buf *buf = buf_priv;
98
Elena Reshetova6c4bb652017-03-06 11:21:00 -030099 return refcount_read(&buf->refcount);
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300100}
101
Marek Szyprowski199d1012012-06-14 10:37:44 -0300102static void vb2_dc_prepare(void *buf_priv)
103{
104 struct vb2_dc_buf *buf = buf_priv;
105 struct sg_table *sgt = buf->dma_sgt;
106
Sergey Senozhatskya9a2c822020-05-14 18:01:51 +0200107 if (!sgt)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300108 return;
109
Tiffany Lind9a98582015-09-24 06:02:36 -0300110 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
111 buf->dma_dir);
Marek Szyprowski199d1012012-06-14 10:37:44 -0300112}
113
114static void vb2_dc_finish(void *buf_priv)
115{
116 struct vb2_dc_buf *buf = buf_priv;
117 struct sg_table *sgt = buf->dma_sgt;
118
Sergey Senozhatskya9a2c822020-05-14 18:01:51 +0200119 if (!sgt)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300120 return;
121
Tiffany Lind9a98582015-09-24 06:02:36 -0300122 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
Marek Szyprowski199d1012012-06-14 10:37:44 -0300123}
124
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300125/*********************************************/
126/* callbacks for MMAP buffers */
127/*********************************************/
128
129static void vb2_dc_put(void *buf_priv)
130{
131 struct vb2_dc_buf *buf = buf_priv;
132
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300133 if (!refcount_dec_and_test(&buf->refcount))
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300134 return;
135
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300136 if (buf->sgt_base) {
137 sg_free_table(buf->sgt_base);
138 kfree(buf->sgt_base);
139 }
Tomasz Figaccc66e72016-02-01 22:34:42 +0100140 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700141 buf->attrs);
Tomasz Stanislawski67a5d0c2012-08-07 13:19:49 -0300142 put_device(buf->dev);
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300143 kfree(buf);
144}
Pawel Osciak1a758d42010-10-11 10:59:36 -0300145
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700146static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
Hans Verkuild16e8322016-04-15 09:15:05 -0300147 unsigned long size, enum dma_data_direction dma_dir,
148 gfp_t gfp_flags)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300149{
Pawel Osciak1a758d42010-10-11 10:59:36 -0300150 struct vb2_dc_buf *buf;
151
Hans Verkuil10791822016-07-21 09:14:03 -0300152 if (WARN_ON(!dev))
153 return ERR_PTR(-EINVAL);
154
Pawel Osciak1a758d42010-10-11 10:59:36 -0300155 buf = kzalloc(sizeof *buf, GFP_KERNEL);
156 if (!buf)
157 return ERR_PTR(-ENOMEM);
158
Sergey Senozhatsky2ff99ca2020-05-14 18:01:52 +0200159 buf->attrs = attrs;
Tomasz Figaccc66e72016-02-01 22:34:42 +0100160 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700161 GFP_KERNEL | gfp_flags, buf->attrs);
Tomasz Figaccc66e72016-02-01 22:34:42 +0100162 if (!buf->cookie) {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300163 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300164 kfree(buf);
165 return ERR_PTR(-ENOMEM);
166 }
167
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700168 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
Tomasz Figaccc66e72016-02-01 22:34:42 +0100169 buf->vaddr = buf->cookie;
170
Tomasz Stanislawski67a5d0c2012-08-07 13:19:49 -0300171 /* Prevent the device from being released while the buffer is used */
172 buf->dev = get_device(dev);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300173 buf->size = size;
Hans Verkuild935c572014-11-18 09:50:59 -0300174 buf->dma_dir = dma_dir;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300175
176 buf->handler.refcount = &buf->refcount;
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300177 buf->handler.put = vb2_dc_put;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300178 buf->handler.arg = buf;
179
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300180 refcount_set(&buf->refcount, 1);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300181
182 return buf;
183}
184
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300185static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300186{
187 struct vb2_dc_buf *buf = buf_priv;
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300188 int ret;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300189
190 if (!buf) {
191 printk(KERN_ERR "No buffer to map\n");
192 return -EINVAL;
193 }
194
Tomasz Figaccc66e72016-02-01 22:34:42 +0100195 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700196 buf->dma_addr, buf->size, buf->attrs);
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300197
198 if (ret) {
199 pr_err("Remapping memory failed, error: %d\n", ret);
200 return ret;
201 }
202
203 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
204 vma->vm_private_data = &buf->handler;
205 vma->vm_ops = &vb2_common_vm_ops;
206
207 vma->vm_ops->open(vma);
208
209 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
210 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
211 buf->size);
212
213 return 0;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300214}
215
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300216/*********************************************/
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300217/* DMABUF ops for exporters */
218/*********************************************/
219
220struct vb2_dc_attachment {
221 struct sg_table sgt;
Hans Verkuilcd474032014-11-18 09:50:58 -0300222 enum dma_data_direction dma_dir;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300223};
224
Christian Königa19741e2018-05-28 11:47:52 +0200225static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300226 struct dma_buf_attachment *dbuf_attach)
227{
228 struct vb2_dc_attachment *attach;
229 unsigned int i;
230 struct scatterlist *rd, *wr;
231 struct sg_table *sgt;
232 struct vb2_dc_buf *buf = dbuf->priv;
233 int ret;
234
235 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
236 if (!attach)
237 return -ENOMEM;
238
239 sgt = &attach->sgt;
240 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
241 * map the same scatter list to multiple attachments at the same time.
242 */
243 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
244 if (ret) {
245 kfree(attach);
246 return -ENOMEM;
247 }
248
249 rd = buf->sgt_base->sgl;
250 wr = sgt->sgl;
251 for (i = 0; i < sgt->orig_nents; ++i) {
252 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
253 rd = sg_next(rd);
254 wr = sg_next(wr);
255 }
256
Hans Verkuilcd474032014-11-18 09:50:58 -0300257 attach->dma_dir = DMA_NONE;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300258 dbuf_attach->priv = attach;
259
260 return 0;
261}
262
263static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
264 struct dma_buf_attachment *db_attach)
265{
266 struct vb2_dc_attachment *attach = db_attach->priv;
267 struct sg_table *sgt;
268
269 if (!attach)
270 return;
271
272 sgt = &attach->sgt;
273
274 /* release the scatterlist cache */
Hans Verkuilcd474032014-11-18 09:50:58 -0300275 if (attach->dma_dir != DMA_NONE)
Lucas Stach596a5a52019-07-08 09:07:42 -0400276 /*
277 * Cache sync can be skipped here, as the vb2_dc memory is
278 * allocated from device coherent memory, which means the
279 * memory locations do not require any explicit cache
280 * maintenance prior or after being used by the device.
281 */
282 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
283 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300284 sg_free_table(sgt);
285 kfree(attach);
286 db_attach->priv = NULL;
287}
288
289static struct sg_table *vb2_dc_dmabuf_ops_map(
Hans Verkuilcd474032014-11-18 09:50:58 -0300290 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300291{
292 struct vb2_dc_attachment *attach = db_attach->priv;
293 /* stealing dmabuf mutex to serialize map/unmap operations */
294 struct mutex *lock = &db_attach->dmabuf->lock;
295 struct sg_table *sgt;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300296
297 mutex_lock(lock);
298
299 sgt = &attach->sgt;
300 /* return previously mapped sg table */
Hans Verkuilcd474032014-11-18 09:50:58 -0300301 if (attach->dma_dir == dma_dir) {
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300302 mutex_unlock(lock);
303 return sgt;
304 }
305
306 /* release any previous cache */
Hans Verkuilcd474032014-11-18 09:50:58 -0300307 if (attach->dma_dir != DMA_NONE) {
Lucas Stach596a5a52019-07-08 09:07:42 -0400308 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
309 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Hans Verkuilcd474032014-11-18 09:50:58 -0300310 attach->dma_dir = DMA_NONE;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300311 }
312
Lucas Stach596a5a52019-07-08 09:07:42 -0400313 /*
314 * mapping to the client with new direction, no cache sync
315 * required see comment in vb2_dc_dmabuf_ops_detach()
316 */
317 sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
318 dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Ricardo Ribalda60a47192015-04-29 09:00:46 -0300319 if (!sgt->nents) {
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300320 pr_err("failed to map scatterlist\n");
321 mutex_unlock(lock);
322 return ERR_PTR(-EIO);
323 }
324
Hans Verkuilcd474032014-11-18 09:50:58 -0300325 attach->dma_dir = dma_dir;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300326
327 mutex_unlock(lock);
328
329 return sgt;
330}
331
332static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
Hans Verkuilcd474032014-11-18 09:50:58 -0300333 struct sg_table *sgt, enum dma_data_direction dma_dir)
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300334{
335 /* nothing to be done here */
336}
337
338static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
339{
340 /* drop reference obtained in vb2_dc_get_dmabuf */
341 vb2_dc_put(dbuf->priv);
342}
343
Sergey Senozhatskyd5adf1b2020-05-14 18:01:49 +0200344static int
345vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
346 enum dma_data_direction direction)
347{
348 struct vb2_dc_buf *buf = dbuf->priv;
349 struct sg_table *sgt = buf->dma_sgt;
350
351 if (vb2_dc_buffer_consistent(buf->attrs))
352 return 0;
353
354 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
355 return 0;
356}
357
358static int
359vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
360 enum dma_data_direction direction)
361{
362 struct vb2_dc_buf *buf = dbuf->priv;
363 struct sg_table *sgt = buf->dma_sgt;
364
365 if (vb2_dc_buffer_consistent(buf->attrs))
366 return 0;
367
368 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
369 return 0;
370}
371
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +0200372static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300373{
374 struct vb2_dc_buf *buf = dbuf->priv;
375
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +0200376 dma_buf_map_set_vaddr(map, buf->vaddr);
377
378 return 0;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300379}
380
381static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
382 struct vm_area_struct *vma)
383{
384 return vb2_dc_mmap(dbuf->priv, vma);
385}
386
Arvind Yadav6e03db32017-07-01 07:27:13 -0400387static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300388 .attach = vb2_dc_dmabuf_ops_attach,
389 .detach = vb2_dc_dmabuf_ops_detach,
390 .map_dma_buf = vb2_dc_dmabuf_ops_map,
391 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
Sergey Senozhatskyd5adf1b2020-05-14 18:01:49 +0200392 .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
393 .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300394 .vmap = vb2_dc_dmabuf_ops_vmap,
395 .mmap = vb2_dc_dmabuf_ops_mmap,
396 .release = vb2_dc_dmabuf_ops_release,
397};
398
399static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
400{
401 int ret;
402 struct sg_table *sgt;
403
404 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
405 if (!sgt) {
406 dev_err(buf->dev, "failed to alloc sg table\n");
407 return NULL;
408 }
409
Tomasz Figaccc66e72016-02-01 22:34:42 +0100410 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700411 buf->size, buf->attrs);
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300412 if (ret < 0) {
413 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
414 kfree(sgt);
415 return NULL;
416 }
417
418 return sgt;
419}
420
Philipp Zabelc1b96a22013-05-21 05:11:35 -0300421static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300422{
423 struct vb2_dc_buf *buf = buf_priv;
424 struct dma_buf *dbuf;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530425 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
426
427 exp_info.ops = &vb2_dc_dmabuf_ops;
428 exp_info.size = buf->size;
429 exp_info.flags = flags;
430 exp_info.priv = buf;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300431
432 if (!buf->sgt_base)
433 buf->sgt_base = vb2_dc_get_base_sgt(buf);
434
435 if (WARN_ON(!buf->sgt_base))
436 return NULL;
437
Sumit Semwald8fbe342015-01-23 12:53:43 +0530438 dbuf = dma_buf_export(&exp_info);
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300439 if (IS_ERR(dbuf))
440 return NULL;
441
442 /* dmabuf keeps reference to vb2 buffer */
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300443 refcount_inc(&buf->refcount);
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300444
445 return dbuf;
446}
447
448/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300449/* callbacks for USERPTR buffers */
450/*********************************************/
451
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300452static void vb2_dc_put_userptr(void *buf_priv)
453{
454 struct vb2_dc_buf *buf = buf_priv;
455 struct sg_table *sgt = buf->dma_sgt;
Jan Karafb639eb2015-07-13 11:55:49 -0300456 int i;
457 struct page **pages;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300458
Marek Szyprowski774d2302013-06-19 08:56:46 -0300459 if (sgt) {
Hans Verkuil251a79f2014-11-18 09:51:08 -0300460 /*
461 * No need to sync to CPU, it's already synced to the CPU
462 * since the finish() memop will have been called before this.
463 */
464 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700465 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Jan Karafb639eb2015-07-13 11:55:49 -0300466 pages = frame_vector_pages(buf->vec);
467 /* sgt should exist only if vector contains pages... */
468 BUG_ON(IS_ERR(pages));
Stanimir Varbanovc0cb7652017-08-29 07:26:03 -0400469 if (buf->dma_dir == DMA_FROM_DEVICE ||
470 buf->dma_dir == DMA_BIDIRECTIONAL)
471 for (i = 0; i < frame_vector_count(buf->vec); i++)
472 set_page_dirty_lock(pages[i]);
Marek Szyprowski774d2302013-06-19 08:56:46 -0300473 sg_free_table(sgt);
474 kfree(sgt);
Christoph Hellwig55ea5442019-01-04 10:42:49 +0100475 } else {
476 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
477 buf->dma_dir, 0);
Marek Szyprowski774d2302013-06-19 08:56:46 -0300478 }
Jan Karafb639eb2015-07-13 11:55:49 -0300479 vb2_destroy_framevec(buf->vec);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300480 kfree(buf);
481}
482
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300483static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
Hans Verkuilcd474032014-11-18 09:50:58 -0300484 unsigned long size, enum dma_data_direction dma_dir)
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300485{
Pawel Osciak1a758d42010-10-11 10:59:36 -0300486 struct vb2_dc_buf *buf;
Jan Karafb639eb2015-07-13 11:55:49 -0300487 struct frame_vector *vec;
Tvrtko Ursulinc4860ad2017-07-31 19:55:08 +0100488 unsigned int offset;
Jan Karafb639eb2015-07-13 11:55:49 -0300489 int n_pages, i;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300490 int ret = 0;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300491 struct sg_table *sgt;
492 unsigned long contig_size;
Marek Szyprowskid81e8702012-06-12 10:18:16 -0300493 unsigned long dma_align = dma_get_cache_alignment();
494
495 /* Only cache aligned DMA transfers are reliable */
496 if (!IS_ALIGNED(vaddr | size, dma_align)) {
497 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
498 return ERR_PTR(-EINVAL);
499 }
500
501 if (!size) {
502 pr_debug("size is zero\n");
503 return ERR_PTR(-EINVAL);
504 }
Pawel Osciak1a758d42010-10-11 10:59:36 -0300505
Hans Verkuil10791822016-07-21 09:14:03 -0300506 if (WARN_ON(!dev))
507 return ERR_PTR(-EINVAL);
508
Pawel Osciak1a758d42010-10-11 10:59:36 -0300509 buf = kzalloc(sizeof *buf, GFP_KERNEL);
510 if (!buf)
511 return ERR_PTR(-ENOMEM);
512
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300513 buf->dev = dev;
Hans Verkuilcd474032014-11-18 09:50:58 -0300514 buf->dma_dir = dma_dir;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300515
Tvrtko Ursulinc4860ad2017-07-31 19:55:08 +0100516 offset = lower_32_bits(offset_in_page(vaddr));
Hans Verkuil70794722019-04-04 09:15:00 -0400517 vec = vb2_create_framevec(vaddr, size);
Jan Karafb639eb2015-07-13 11:55:49 -0300518 if (IS_ERR(vec)) {
519 ret = PTR_ERR(vec);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300520 goto fail_buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300521 }
Jan Karafb639eb2015-07-13 11:55:49 -0300522 buf->vec = vec;
523 n_pages = frame_vector_count(vec);
524 ret = frame_vector_to_pages(vec);
525 if (ret < 0) {
526 unsigned long *nums = frame_vector_pfns(vec);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300527
Jan Karafb639eb2015-07-13 11:55:49 -0300528 /*
529 * Failed to convert to pages... Check the memory is physically
530 * contiguous and use direct mapping
531 */
532 for (i = 1; i < n_pages; i++)
533 if (nums[i-1] + 1 != nums[i])
534 goto fail_pfnvec;
Christoph Hellwig55ea5442019-01-04 10:42:49 +0100535 buf->dma_addr = dma_map_resource(buf->dev,
536 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
537 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
538 ret = -ENOMEM;
539 goto fail_pfnvec;
540 }
Jan Karafb639eb2015-07-13 11:55:49 -0300541 goto out;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300542 }
543
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300544 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
545 if (!sgt) {
546 pr_err("failed to allocate sg table\n");
547 ret = -ENOMEM;
Jan Karafb639eb2015-07-13 11:55:49 -0300548 goto fail_pfnvec;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300549 }
550
Jan Karafb639eb2015-07-13 11:55:49 -0300551 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300552 offset, size, GFP_KERNEL);
553 if (ret) {
554 pr_err("failed to initialize sg table\n");
555 goto fail_sgt;
556 }
557
Hans Verkuil251a79f2014-11-18 09:51:08 -0300558 /*
559 * No need to sync to the device, this will happen later when the
560 * prepare() memop is called.
561 */
562 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700563 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300564 if (sgt->nents <= 0) {
565 pr_err("failed to map scatterlist\n");
566 ret = -EIO;
567 goto fail_sgt_init;
568 }
569
570 contig_size = vb2_dc_get_contiguous_size(sgt);
571 if (contig_size < size) {
572 pr_err("contiguous mapping is too small %lu/%lu\n",
573 contig_size, size);
574 ret = -EFAULT;
575 goto fail_map_sg;
576 }
577
578 buf->dma_addr = sg_dma_address(sgt->sgl);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300579 buf->dma_sgt = sgt;
Jan Karafb639eb2015-07-13 11:55:49 -0300580out:
581 buf->size = size;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300582
583 return buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300584
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300585fail_map_sg:
Hans Verkuil251a79f2014-11-18 09:51:08 -0300586 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700587 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300588
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300589fail_sgt_init:
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300590 sg_free_table(sgt);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300591
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300592fail_sgt:
593 kfree(sgt);
594
Jan Karafb639eb2015-07-13 11:55:49 -0300595fail_pfnvec:
596 vb2_destroy_framevec(vec);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300597
598fail_buf:
Pawel Osciak1a758d42010-10-11 10:59:36 -0300599 kfree(buf);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300600
601 return ERR_PTR(ret);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300602}
603
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300604/*********************************************/
Sumit Semwal8c417d02012-06-14 10:37:45 -0300605/* callbacks for DMABUF buffers */
606/*********************************************/
607
608static int vb2_dc_map_dmabuf(void *mem_priv)
609{
610 struct vb2_dc_buf *buf = mem_priv;
611 struct sg_table *sgt;
612 unsigned long contig_size;
613
614 if (WARN_ON(!buf->db_attach)) {
615 pr_err("trying to pin a non attached buffer\n");
616 return -EINVAL;
617 }
618
619 if (WARN_ON(buf->dma_sgt)) {
620 pr_err("dmabuf buffer is already pinned\n");
621 return 0;
622 }
623
624 /* get the associated scatterlist for this buffer */
625 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
Colin Crossfee0c542013-12-20 16:43:50 -0800626 if (IS_ERR(sgt)) {
Sumit Semwal8c417d02012-06-14 10:37:45 -0300627 pr_err("Error getting dmabuf scatterlist\n");
628 return -EINVAL;
629 }
630
631 /* checking if dmabuf is big enough to store contiguous chunk */
632 contig_size = vb2_dc_get_contiguous_size(sgt);
633 if (contig_size < buf->size) {
Hans Verkuil364152d2020-02-17 15:21:52 +0100634 pr_err("contiguous chunk is too small %lu/%lu\n",
635 contig_size, buf->size);
Sumit Semwal8c417d02012-06-14 10:37:45 -0300636 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
637 return -EFAULT;
638 }
639
640 buf->dma_addr = sg_dma_address(sgt->sgl);
641 buf->dma_sgt = sgt;
Philipp Zabel6bbd4fe2014-05-26 11:17:32 -0300642 buf->vaddr = NULL;
Sumit Semwal8c417d02012-06-14 10:37:45 -0300643
644 return 0;
645}
646
647static void vb2_dc_unmap_dmabuf(void *mem_priv)
648{
649 struct vb2_dc_buf *buf = mem_priv;
650 struct sg_table *sgt = buf->dma_sgt;
651
652 if (WARN_ON(!buf->db_attach)) {
653 pr_err("trying to unpin a not attached buffer\n");
654 return;
655 }
656
657 if (WARN_ON(!sgt)) {
658 pr_err("dmabuf buffer is already unpinned\n");
659 return;
660 }
661
Philipp Zabel6bbd4fe2014-05-26 11:17:32 -0300662 if (buf->vaddr) {
663 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
664 buf->vaddr = NULL;
665 }
Sumit Semwal8c417d02012-06-14 10:37:45 -0300666 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
667
668 buf->dma_addr = 0;
669 buf->dma_sgt = NULL;
670}
671
672static void vb2_dc_detach_dmabuf(void *mem_priv)
673{
674 struct vb2_dc_buf *buf = mem_priv;
675
676 /* if vb2 works correctly you should never detach mapped buffer */
677 if (WARN_ON(buf->dma_addr))
678 vb2_dc_unmap_dmabuf(buf);
679
680 /* detach this attachment */
681 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
682 kfree(buf);
683}
684
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300685static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
Hans Verkuilcd474032014-11-18 09:50:58 -0300686 unsigned long size, enum dma_data_direction dma_dir)
Sumit Semwal8c417d02012-06-14 10:37:45 -0300687{
Sumit Semwal8c417d02012-06-14 10:37:45 -0300688 struct vb2_dc_buf *buf;
689 struct dma_buf_attachment *dba;
690
691 if (dbuf->size < size)
692 return ERR_PTR(-EFAULT);
693
Hans Verkuil10791822016-07-21 09:14:03 -0300694 if (WARN_ON(!dev))
695 return ERR_PTR(-EINVAL);
696
Sumit Semwal8c417d02012-06-14 10:37:45 -0300697 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
698 if (!buf)
699 return ERR_PTR(-ENOMEM);
700
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300701 buf->dev = dev;
Sumit Semwal8c417d02012-06-14 10:37:45 -0300702 /* create attachment for the dmabuf with the user device */
703 dba = dma_buf_attach(dbuf, buf->dev);
704 if (IS_ERR(dba)) {
705 pr_err("failed to attach dmabuf\n");
706 kfree(buf);
707 return dba;
708 }
709
Hans Verkuilcd474032014-11-18 09:50:58 -0300710 buf->dma_dir = dma_dir;
Sumit Semwal8c417d02012-06-14 10:37:45 -0300711 buf->size = size;
712 buf->db_attach = dba;
713
714 return buf;
715}
716
717/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300718/* DMA CONTIG exported functions */
719/*********************************************/
720
Pawel Osciak1a758d42010-10-11 10:59:36 -0300721const struct vb2_mem_ops vb2_dma_contig_memops = {
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300722 .alloc = vb2_dc_alloc,
723 .put = vb2_dc_put,
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300724 .get_dmabuf = vb2_dc_get_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300725 .cookie = vb2_dc_cookie,
726 .vaddr = vb2_dc_vaddr,
727 .mmap = vb2_dc_mmap,
728 .get_userptr = vb2_dc_get_userptr,
729 .put_userptr = vb2_dc_put_userptr,
Marek Szyprowski199d1012012-06-14 10:37:44 -0300730 .prepare = vb2_dc_prepare,
731 .finish = vb2_dc_finish,
Sumit Semwal8c417d02012-06-14 10:37:45 -0300732 .map_dmabuf = vb2_dc_map_dmabuf,
733 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
734 .attach_dmabuf = vb2_dc_attach_dmabuf,
735 .detach_dmabuf = vb2_dc_detach_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300736 .num_users = vb2_dc_num_users,
Pawel Osciak1a758d42010-10-11 10:59:36 -0300737};
738EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
739
Marek Szyprowski3f033962016-05-24 09:16:06 +0200740/**
741 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
742 * @dev: device for configuring DMA parameters
743 * @size: size of DMA max segment size to set
744 *
745 * To allow mapping the scatter-list into a single chunk in the DMA
746 * address space, the device is required to have the DMA max segment
747 * size parameter set to a value larger than the buffer size. Otherwise,
748 * the DMA-mapping subsystem will split the mapping into max segment
749 * size chunks. This function sets the DMA max segment size
750 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
751 * address space.
752 * This code assumes that the DMA-mapping subsystem will merge all
753 * scatterlist segments if this is really possible (for example when
754 * an IOMMU is available and enabled).
755 * Ideally, this parameter should be set by the generic bus code, but it
756 * is left with the default 64KiB value due to historical litmiations in
757 * other subsystems (like limited USB host drivers) and there no good
758 * place to set it to the proper value.
759 * This function should be called from the drivers, which are known to
760 * operate on platforms with IOMMU and provide access to shared buffers
761 * (either USERPTR or DMABUF). This should be done before initializing
762 * videobuf2 queue.
763 */
764int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
765{
766 if (!dev->dma_parms) {
Tomi Valkeinen0d966872020-05-27 10:23:34 +0200767 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
768 return -ENODEV;
Marek Szyprowski3f033962016-05-24 09:16:06 +0200769 }
770 if (dma_get_max_seg_size(dev) < size)
771 return dma_set_max_seg_size(dev, size);
772
773 return 0;
774}
775EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
776
Pawel Osciak1a758d42010-10-11 10:59:36 -0300777MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
Pawel Osciak95072082011-03-13 15:23:32 -0300778MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
Pawel Osciak1a758d42010-10-11 10:59:36 -0300779MODULE_LICENSE("GPL");