blob: 82389aead6edad14459c9ec1bf1e5769dbbea76e [file] [log] [blame]
Pawel Osciak1a758d42010-10-11 10:59:36 -03001/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciak1a758d42010-10-11 10:59:36 -03007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
Sumit Semwal8c417d02012-06-14 10:37:45 -030013#include <linux/dma-buf.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030014#include <linux/module.h>
Elena Reshetova6c4bb652017-03-06 11:21:00 -030015#include <linux/refcount.h>
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030016#include <linux/scatterlist.h>
17#include <linux/sched.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030018#include <linux/slab.h>
19#include <linux/dma-mapping.h>
20
Junghak Sungc1399902015-09-22 10:30:29 -030021#include <media/videobuf2-v4l2.h>
H Hartley Sweetend0df3c32012-04-24 19:08:12 -030022#include <media/videobuf2-dma-contig.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030023#include <media/videobuf2-memops.h>
24
Pawel Osciak1a758d42010-10-11 10:59:36 -030025struct vb2_dc_buf {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -030026 struct device *dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -030027 void *vaddr;
Pawel Osciak1a758d42010-10-11 10:59:36 -030028 unsigned long size;
Tomasz Figaccc66e72016-02-01 22:34:42 +010029 void *cookie;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030030 dma_addr_t dma_addr;
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070031 unsigned long attrs;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030032 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
Jan Karafb639eb2015-07-13 11:55:49 -030034 struct frame_vector *vec;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030035
36 /* MMAP related */
Pawel Osciak1a758d42010-10-11 10:59:36 -030037 struct vb2_vmarea_handler handler;
Elena Reshetova6c4bb652017-03-06 11:21:00 -030038 refcount_t refcount;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -030039 struct sg_table *sgt_base;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030040
Sumit Semwal8c417d02012-06-14 10:37:45 -030041 /* DMABUF related */
42 struct dma_buf_attachment *db_attach;
Pawel Osciak1a758d42010-10-11 10:59:36 -030043};
44
Laurent Pinchart40d8b762012-06-14 10:37:41 -030045/*********************************************/
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030046/* scatterlist table functions */
47/*********************************************/
48
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030049static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50{
51 struct scatterlist *s;
52 dma_addr_t expected = sg_dma_address(sgt->sgl);
53 unsigned int i;
54 unsigned long size = 0;
55
56 for_each_sg(sgt->sgl, s, sgt->nents, i) {
57 if (sg_dma_address(s) != expected)
58 break;
59 expected = sg_dma_address(s) + sg_dma_len(s);
60 size += sg_dma_len(s);
61 }
62 return size;
63}
64
65/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -030066/* callbacks for all buffers */
67/*********************************************/
68
69static void *vb2_dc_cookie(void *buf_priv)
70{
71 struct vb2_dc_buf *buf = buf_priv;
72
73 return &buf->dma_addr;
74}
75
76static void *vb2_dc_vaddr(void *buf_priv)
77{
78 struct vb2_dc_buf *buf = buf_priv;
79
Philipp Zabel6bbd4fe2014-05-26 11:17:32 -030080 if (!buf->vaddr && buf->db_attach)
81 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
82
Laurent Pinchart40d8b762012-06-14 10:37:41 -030083 return buf->vaddr;
84}
85
86static unsigned int vb2_dc_num_users(void *buf_priv)
87{
88 struct vb2_dc_buf *buf = buf_priv;
89
Elena Reshetova6c4bb652017-03-06 11:21:00 -030090 return refcount_read(&buf->refcount);
Laurent Pinchart40d8b762012-06-14 10:37:41 -030091}
92
Marek Szyprowski199d1012012-06-14 10:37:44 -030093static void vb2_dc_prepare(void *buf_priv)
94{
95 struct vb2_dc_buf *buf = buf_priv;
96 struct sg_table *sgt = buf->dma_sgt;
97
Sumit Semwal8c417d02012-06-14 10:37:45 -030098 /* DMABUF exporter will flush the cache for us */
99 if (!sgt || buf->db_attach)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300100 return;
101
Tiffany Lind9a98582015-09-24 06:02:36 -0300102 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
103 buf->dma_dir);
Marek Szyprowski199d1012012-06-14 10:37:44 -0300104}
105
106static void vb2_dc_finish(void *buf_priv)
107{
108 struct vb2_dc_buf *buf = buf_priv;
109 struct sg_table *sgt = buf->dma_sgt;
110
Sumit Semwal8c417d02012-06-14 10:37:45 -0300111 /* DMABUF exporter will flush the cache for us */
112 if (!sgt || buf->db_attach)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300113 return;
114
Tiffany Lind9a98582015-09-24 06:02:36 -0300115 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
Marek Szyprowski199d1012012-06-14 10:37:44 -0300116}
117
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300118/*********************************************/
119/* callbacks for MMAP buffers */
120/*********************************************/
121
122static void vb2_dc_put(void *buf_priv)
123{
124 struct vb2_dc_buf *buf = buf_priv;
125
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300126 if (!refcount_dec_and_test(&buf->refcount))
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300127 return;
128
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300129 if (buf->sgt_base) {
130 sg_free_table(buf->sgt_base);
131 kfree(buf->sgt_base);
132 }
Tomasz Figaccc66e72016-02-01 22:34:42 +0100133 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700134 buf->attrs);
Tomasz Stanislawski67a5d0c2012-08-07 13:19:49 -0300135 put_device(buf->dev);
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300136 kfree(buf);
137}
Pawel Osciak1a758d42010-10-11 10:59:36 -0300138
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700139static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
Hans Verkuild16e8322016-04-15 09:15:05 -0300140 unsigned long size, enum dma_data_direction dma_dir,
141 gfp_t gfp_flags)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300142{
Pawel Osciak1a758d42010-10-11 10:59:36 -0300143 struct vb2_dc_buf *buf;
144
Hans Verkuil10791822016-07-21 09:14:03 -0300145 if (WARN_ON(!dev))
146 return ERR_PTR(-EINVAL);
147
Pawel Osciak1a758d42010-10-11 10:59:36 -0300148 buf = kzalloc(sizeof *buf, GFP_KERNEL);
149 if (!buf)
150 return ERR_PTR(-ENOMEM);
151
Hans Verkuild16e8322016-04-15 09:15:05 -0300152 if (attrs)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700153 buf->attrs = attrs;
Tomasz Figaccc66e72016-02-01 22:34:42 +0100154 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700155 GFP_KERNEL | gfp_flags, buf->attrs);
Tomasz Figaccc66e72016-02-01 22:34:42 +0100156 if (!buf->cookie) {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300157 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300158 kfree(buf);
159 return ERR_PTR(-ENOMEM);
160 }
161
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700162 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
Tomasz Figaccc66e72016-02-01 22:34:42 +0100163 buf->vaddr = buf->cookie;
164
Tomasz Stanislawski67a5d0c2012-08-07 13:19:49 -0300165 /* Prevent the device from being released while the buffer is used */
166 buf->dev = get_device(dev);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300167 buf->size = size;
Hans Verkuild935c572014-11-18 09:50:59 -0300168 buf->dma_dir = dma_dir;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300169
170 buf->handler.refcount = &buf->refcount;
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300171 buf->handler.put = vb2_dc_put;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300172 buf->handler.arg = buf;
173
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300174 refcount_set(&buf->refcount, 1);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300175
176 return buf;
177}
178
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300179static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300180{
181 struct vb2_dc_buf *buf = buf_priv;
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300182 int ret;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300183
184 if (!buf) {
185 printk(KERN_ERR "No buffer to map\n");
186 return -EINVAL;
187 }
188
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300189 /*
190 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
191 * map whole buffer
192 */
193 vma->vm_pgoff = 0;
194
Tomasz Figaccc66e72016-02-01 22:34:42 +0100195 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700196 buf->dma_addr, buf->size, buf->attrs);
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300197
198 if (ret) {
199 pr_err("Remapping memory failed, error: %d\n", ret);
200 return ret;
201 }
202
203 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
204 vma->vm_private_data = &buf->handler;
205 vma->vm_ops = &vb2_common_vm_ops;
206
207 vma->vm_ops->open(vma);
208
209 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
210 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
211 buf->size);
212
213 return 0;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300214}
215
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300216/*********************************************/
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300217/* DMABUF ops for exporters */
218/*********************************************/
219
220struct vb2_dc_attachment {
221 struct sg_table sgt;
Hans Verkuilcd474032014-11-18 09:50:58 -0300222 enum dma_data_direction dma_dir;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300223};
224
Christian Königa19741e2018-05-28 11:47:52 +0200225static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300226 struct dma_buf_attachment *dbuf_attach)
227{
228 struct vb2_dc_attachment *attach;
229 unsigned int i;
230 struct scatterlist *rd, *wr;
231 struct sg_table *sgt;
232 struct vb2_dc_buf *buf = dbuf->priv;
233 int ret;
234
235 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
236 if (!attach)
237 return -ENOMEM;
238
239 sgt = &attach->sgt;
240 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
241 * map the same scatter list to multiple attachments at the same time.
242 */
243 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
244 if (ret) {
245 kfree(attach);
246 return -ENOMEM;
247 }
248
249 rd = buf->sgt_base->sgl;
250 wr = sgt->sgl;
251 for (i = 0; i < sgt->orig_nents; ++i) {
252 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
253 rd = sg_next(rd);
254 wr = sg_next(wr);
255 }
256
Hans Verkuilcd474032014-11-18 09:50:58 -0300257 attach->dma_dir = DMA_NONE;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300258 dbuf_attach->priv = attach;
259
260 return 0;
261}
262
263static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
264 struct dma_buf_attachment *db_attach)
265{
266 struct vb2_dc_attachment *attach = db_attach->priv;
267 struct sg_table *sgt;
268
269 if (!attach)
270 return;
271
272 sgt = &attach->sgt;
273
274 /* release the scatterlist cache */
Hans Verkuilcd474032014-11-18 09:50:58 -0300275 if (attach->dma_dir != DMA_NONE)
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300276 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
Hans Verkuilcd474032014-11-18 09:50:58 -0300277 attach->dma_dir);
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300278 sg_free_table(sgt);
279 kfree(attach);
280 db_attach->priv = NULL;
281}
282
283static struct sg_table *vb2_dc_dmabuf_ops_map(
Hans Verkuilcd474032014-11-18 09:50:58 -0300284 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300285{
286 struct vb2_dc_attachment *attach = db_attach->priv;
287 /* stealing dmabuf mutex to serialize map/unmap operations */
288 struct mutex *lock = &db_attach->dmabuf->lock;
289 struct sg_table *sgt;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300290
291 mutex_lock(lock);
292
293 sgt = &attach->sgt;
294 /* return previously mapped sg table */
Hans Verkuilcd474032014-11-18 09:50:58 -0300295 if (attach->dma_dir == dma_dir) {
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300296 mutex_unlock(lock);
297 return sgt;
298 }
299
300 /* release any previous cache */
Hans Verkuilcd474032014-11-18 09:50:58 -0300301 if (attach->dma_dir != DMA_NONE) {
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300302 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
Hans Verkuilcd474032014-11-18 09:50:58 -0300303 attach->dma_dir);
304 attach->dma_dir = DMA_NONE;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300305 }
306
307 /* mapping to the client with new direction */
Ricardo Ribalda60a47192015-04-29 09:00:46 -0300308 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
309 dma_dir);
310 if (!sgt->nents) {
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300311 pr_err("failed to map scatterlist\n");
312 mutex_unlock(lock);
313 return ERR_PTR(-EIO);
314 }
315
Hans Verkuilcd474032014-11-18 09:50:58 -0300316 attach->dma_dir = dma_dir;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300317
318 mutex_unlock(lock);
319
320 return sgt;
321}
322
323static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
Hans Verkuilcd474032014-11-18 09:50:58 -0300324 struct sg_table *sgt, enum dma_data_direction dma_dir)
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300325{
326 /* nothing to be done here */
327}
328
329static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
330{
331 /* drop reference obtained in vb2_dc_get_dmabuf */
332 vb2_dc_put(dbuf->priv);
333}
334
335static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
336{
337 struct vb2_dc_buf *buf = dbuf->priv;
338
Tomasz Figaccc66e72016-02-01 22:34:42 +0100339 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300340}
341
342static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
343{
344 struct vb2_dc_buf *buf = dbuf->priv;
345
346 return buf->vaddr;
347}
348
349static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
350 struct vm_area_struct *vma)
351{
352 return vb2_dc_mmap(dbuf->priv, vma);
353}
354
Arvind Yadav6e03db32017-07-01 07:27:13 -0400355static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300356 .attach = vb2_dc_dmabuf_ops_attach,
357 .detach = vb2_dc_dmabuf_ops_detach,
358 .map_dma_buf = vb2_dc_dmabuf_ops_map,
359 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
Logan Gunthorpef9b67f02017-04-19 13:36:10 -0600360 .map = vb2_dc_dmabuf_ops_kmap,
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300361 .vmap = vb2_dc_dmabuf_ops_vmap,
362 .mmap = vb2_dc_dmabuf_ops_mmap,
363 .release = vb2_dc_dmabuf_ops_release,
364};
365
366static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
367{
368 int ret;
369 struct sg_table *sgt;
370
371 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
372 if (!sgt) {
373 dev_err(buf->dev, "failed to alloc sg table\n");
374 return NULL;
375 }
376
Tomasz Figaccc66e72016-02-01 22:34:42 +0100377 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700378 buf->size, buf->attrs);
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300379 if (ret < 0) {
380 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
381 kfree(sgt);
382 return NULL;
383 }
384
385 return sgt;
386}
387
Philipp Zabelc1b96a22013-05-21 05:11:35 -0300388static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300389{
390 struct vb2_dc_buf *buf = buf_priv;
391 struct dma_buf *dbuf;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530392 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
393
394 exp_info.ops = &vb2_dc_dmabuf_ops;
395 exp_info.size = buf->size;
396 exp_info.flags = flags;
397 exp_info.priv = buf;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300398
399 if (!buf->sgt_base)
400 buf->sgt_base = vb2_dc_get_base_sgt(buf);
401
402 if (WARN_ON(!buf->sgt_base))
403 return NULL;
404
Sumit Semwald8fbe342015-01-23 12:53:43 +0530405 dbuf = dma_buf_export(&exp_info);
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300406 if (IS_ERR(dbuf))
407 return NULL;
408
409 /* dmabuf keeps reference to vb2 buffer */
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300410 refcount_inc(&buf->refcount);
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300411
412 return dbuf;
413}
414
415/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300416/* callbacks for USERPTR buffers */
417/*********************************************/
418
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300419static void vb2_dc_put_userptr(void *buf_priv)
420{
421 struct vb2_dc_buf *buf = buf_priv;
422 struct sg_table *sgt = buf->dma_sgt;
Jan Karafb639eb2015-07-13 11:55:49 -0300423 int i;
424 struct page **pages;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300425
Marek Szyprowski774d2302013-06-19 08:56:46 -0300426 if (sgt) {
Hans Verkuil251a79f2014-11-18 09:51:08 -0300427 /*
428 * No need to sync to CPU, it's already synced to the CPU
429 * since the finish() memop will have been called before this.
430 */
431 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700432 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Jan Karafb639eb2015-07-13 11:55:49 -0300433 pages = frame_vector_pages(buf->vec);
434 /* sgt should exist only if vector contains pages... */
435 BUG_ON(IS_ERR(pages));
Stanimir Varbanovc0cb7652017-08-29 07:26:03 -0400436 if (buf->dma_dir == DMA_FROM_DEVICE ||
437 buf->dma_dir == DMA_BIDIRECTIONAL)
438 for (i = 0; i < frame_vector_count(buf->vec); i++)
439 set_page_dirty_lock(pages[i]);
Marek Szyprowski774d2302013-06-19 08:56:46 -0300440 sg_free_table(sgt);
441 kfree(sgt);
Christoph Hellwig55ea5442019-01-04 10:42:49 +0100442 } else {
443 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
444 buf->dma_dir, 0);
Marek Szyprowski774d2302013-06-19 08:56:46 -0300445 }
Jan Karafb639eb2015-07-13 11:55:49 -0300446 vb2_destroy_framevec(buf->vec);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300447 kfree(buf);
448}
449
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300450static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
Hans Verkuilcd474032014-11-18 09:50:58 -0300451 unsigned long size, enum dma_data_direction dma_dir)
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300452{
Pawel Osciak1a758d42010-10-11 10:59:36 -0300453 struct vb2_dc_buf *buf;
Jan Karafb639eb2015-07-13 11:55:49 -0300454 struct frame_vector *vec;
Tvrtko Ursulinc4860ad2017-07-31 19:55:08 +0100455 unsigned int offset;
Jan Karafb639eb2015-07-13 11:55:49 -0300456 int n_pages, i;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300457 int ret = 0;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300458 struct sg_table *sgt;
459 unsigned long contig_size;
Marek Szyprowskid81e8702012-06-12 10:18:16 -0300460 unsigned long dma_align = dma_get_cache_alignment();
461
462 /* Only cache aligned DMA transfers are reliable */
463 if (!IS_ALIGNED(vaddr | size, dma_align)) {
464 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
465 return ERR_PTR(-EINVAL);
466 }
467
468 if (!size) {
469 pr_debug("size is zero\n");
470 return ERR_PTR(-EINVAL);
471 }
Pawel Osciak1a758d42010-10-11 10:59:36 -0300472
Hans Verkuil10791822016-07-21 09:14:03 -0300473 if (WARN_ON(!dev))
474 return ERR_PTR(-EINVAL);
475
Pawel Osciak1a758d42010-10-11 10:59:36 -0300476 buf = kzalloc(sizeof *buf, GFP_KERNEL);
477 if (!buf)
478 return ERR_PTR(-ENOMEM);
479
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300480 buf->dev = dev;
Hans Verkuilcd474032014-11-18 09:50:58 -0300481 buf->dma_dir = dma_dir;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300482
Tvrtko Ursulinc4860ad2017-07-31 19:55:08 +0100483 offset = lower_32_bits(offset_in_page(vaddr));
Stanimir Varbanov5b6f9abe2017-08-21 07:34:10 -0400484 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
485 dma_dir == DMA_BIDIRECTIONAL);
Jan Karafb639eb2015-07-13 11:55:49 -0300486 if (IS_ERR(vec)) {
487 ret = PTR_ERR(vec);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300488 goto fail_buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300489 }
Jan Karafb639eb2015-07-13 11:55:49 -0300490 buf->vec = vec;
491 n_pages = frame_vector_count(vec);
492 ret = frame_vector_to_pages(vec);
493 if (ret < 0) {
494 unsigned long *nums = frame_vector_pfns(vec);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300495
Jan Karafb639eb2015-07-13 11:55:49 -0300496 /*
497 * Failed to convert to pages... Check the memory is physically
498 * contiguous and use direct mapping
499 */
500 for (i = 1; i < n_pages; i++)
501 if (nums[i-1] + 1 != nums[i])
502 goto fail_pfnvec;
Christoph Hellwig55ea5442019-01-04 10:42:49 +0100503 buf->dma_addr = dma_map_resource(buf->dev,
504 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
505 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
506 ret = -ENOMEM;
507 goto fail_pfnvec;
508 }
Jan Karafb639eb2015-07-13 11:55:49 -0300509 goto out;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300510 }
511
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300512 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
513 if (!sgt) {
514 pr_err("failed to allocate sg table\n");
515 ret = -ENOMEM;
Jan Karafb639eb2015-07-13 11:55:49 -0300516 goto fail_pfnvec;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300517 }
518
Jan Karafb639eb2015-07-13 11:55:49 -0300519 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300520 offset, size, GFP_KERNEL);
521 if (ret) {
522 pr_err("failed to initialize sg table\n");
523 goto fail_sgt;
524 }
525
Hans Verkuil251a79f2014-11-18 09:51:08 -0300526 /*
527 * No need to sync to the device, this will happen later when the
528 * prepare() memop is called.
529 */
530 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700531 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300532 if (sgt->nents <= 0) {
533 pr_err("failed to map scatterlist\n");
534 ret = -EIO;
535 goto fail_sgt_init;
536 }
537
538 contig_size = vb2_dc_get_contiguous_size(sgt);
539 if (contig_size < size) {
540 pr_err("contiguous mapping is too small %lu/%lu\n",
541 contig_size, size);
542 ret = -EFAULT;
543 goto fail_map_sg;
544 }
545
546 buf->dma_addr = sg_dma_address(sgt->sgl);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300547 buf->dma_sgt = sgt;
Jan Karafb639eb2015-07-13 11:55:49 -0300548out:
549 buf->size = size;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300550
551 return buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300552
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300553fail_map_sg:
Hans Verkuil251a79f2014-11-18 09:51:08 -0300554 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700555 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300556
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300557fail_sgt_init:
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300558 sg_free_table(sgt);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300559
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300560fail_sgt:
561 kfree(sgt);
562
Jan Karafb639eb2015-07-13 11:55:49 -0300563fail_pfnvec:
564 vb2_destroy_framevec(vec);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300565
566fail_buf:
Pawel Osciak1a758d42010-10-11 10:59:36 -0300567 kfree(buf);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300568
569 return ERR_PTR(ret);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300570}
571
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300572/*********************************************/
Sumit Semwal8c417d02012-06-14 10:37:45 -0300573/* callbacks for DMABUF buffers */
574/*********************************************/
575
576static int vb2_dc_map_dmabuf(void *mem_priv)
577{
578 struct vb2_dc_buf *buf = mem_priv;
579 struct sg_table *sgt;
580 unsigned long contig_size;
581
582 if (WARN_ON(!buf->db_attach)) {
583 pr_err("trying to pin a non attached buffer\n");
584 return -EINVAL;
585 }
586
587 if (WARN_ON(buf->dma_sgt)) {
588 pr_err("dmabuf buffer is already pinned\n");
589 return 0;
590 }
591
592 /* get the associated scatterlist for this buffer */
593 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
Colin Crossfee0c542013-12-20 16:43:50 -0800594 if (IS_ERR(sgt)) {
Sumit Semwal8c417d02012-06-14 10:37:45 -0300595 pr_err("Error getting dmabuf scatterlist\n");
596 return -EINVAL;
597 }
598
599 /* checking if dmabuf is big enough to store contiguous chunk */
600 contig_size = vb2_dc_get_contiguous_size(sgt);
601 if (contig_size < buf->size) {
602 pr_err("contiguous chunk is too small %lu/%lu b\n",
603 contig_size, buf->size);
604 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
605 return -EFAULT;
606 }
607
608 buf->dma_addr = sg_dma_address(sgt->sgl);
609 buf->dma_sgt = sgt;
Philipp Zabel6bbd4fe2014-05-26 11:17:32 -0300610 buf->vaddr = NULL;
Sumit Semwal8c417d02012-06-14 10:37:45 -0300611
612 return 0;
613}
614
615static void vb2_dc_unmap_dmabuf(void *mem_priv)
616{
617 struct vb2_dc_buf *buf = mem_priv;
618 struct sg_table *sgt = buf->dma_sgt;
619
620 if (WARN_ON(!buf->db_attach)) {
621 pr_err("trying to unpin a not attached buffer\n");
622 return;
623 }
624
625 if (WARN_ON(!sgt)) {
626 pr_err("dmabuf buffer is already unpinned\n");
627 return;
628 }
629
Philipp Zabel6bbd4fe2014-05-26 11:17:32 -0300630 if (buf->vaddr) {
631 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
632 buf->vaddr = NULL;
633 }
Sumit Semwal8c417d02012-06-14 10:37:45 -0300634 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
635
636 buf->dma_addr = 0;
637 buf->dma_sgt = NULL;
638}
639
640static void vb2_dc_detach_dmabuf(void *mem_priv)
641{
642 struct vb2_dc_buf *buf = mem_priv;
643
644 /* if vb2 works correctly you should never detach mapped buffer */
645 if (WARN_ON(buf->dma_addr))
646 vb2_dc_unmap_dmabuf(buf);
647
648 /* detach this attachment */
649 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
650 kfree(buf);
651}
652
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300653static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
Hans Verkuilcd474032014-11-18 09:50:58 -0300654 unsigned long size, enum dma_data_direction dma_dir)
Sumit Semwal8c417d02012-06-14 10:37:45 -0300655{
Sumit Semwal8c417d02012-06-14 10:37:45 -0300656 struct vb2_dc_buf *buf;
657 struct dma_buf_attachment *dba;
658
659 if (dbuf->size < size)
660 return ERR_PTR(-EFAULT);
661
Hans Verkuil10791822016-07-21 09:14:03 -0300662 if (WARN_ON(!dev))
663 return ERR_PTR(-EINVAL);
664
Sumit Semwal8c417d02012-06-14 10:37:45 -0300665 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
666 if (!buf)
667 return ERR_PTR(-ENOMEM);
668
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300669 buf->dev = dev;
Sumit Semwal8c417d02012-06-14 10:37:45 -0300670 /* create attachment for the dmabuf with the user device */
671 dba = dma_buf_attach(dbuf, buf->dev);
672 if (IS_ERR(dba)) {
673 pr_err("failed to attach dmabuf\n");
674 kfree(buf);
675 return dba;
676 }
677
Hans Verkuilcd474032014-11-18 09:50:58 -0300678 buf->dma_dir = dma_dir;
Sumit Semwal8c417d02012-06-14 10:37:45 -0300679 buf->size = size;
680 buf->db_attach = dba;
681
682 return buf;
683}
684
685/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300686/* DMA CONTIG exported functions */
687/*********************************************/
688
Pawel Osciak1a758d42010-10-11 10:59:36 -0300689const struct vb2_mem_ops vb2_dma_contig_memops = {
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300690 .alloc = vb2_dc_alloc,
691 .put = vb2_dc_put,
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300692 .get_dmabuf = vb2_dc_get_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300693 .cookie = vb2_dc_cookie,
694 .vaddr = vb2_dc_vaddr,
695 .mmap = vb2_dc_mmap,
696 .get_userptr = vb2_dc_get_userptr,
697 .put_userptr = vb2_dc_put_userptr,
Marek Szyprowski199d1012012-06-14 10:37:44 -0300698 .prepare = vb2_dc_prepare,
699 .finish = vb2_dc_finish,
Sumit Semwal8c417d02012-06-14 10:37:45 -0300700 .map_dmabuf = vb2_dc_map_dmabuf,
701 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
702 .attach_dmabuf = vb2_dc_attach_dmabuf,
703 .detach_dmabuf = vb2_dc_detach_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300704 .num_users = vb2_dc_num_users,
Pawel Osciak1a758d42010-10-11 10:59:36 -0300705};
706EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
707
Marek Szyprowski3f033962016-05-24 09:16:06 +0200708/**
709 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
710 * @dev: device for configuring DMA parameters
711 * @size: size of DMA max segment size to set
712 *
713 * To allow mapping the scatter-list into a single chunk in the DMA
714 * address space, the device is required to have the DMA max segment
715 * size parameter set to a value larger than the buffer size. Otherwise,
716 * the DMA-mapping subsystem will split the mapping into max segment
717 * size chunks. This function sets the DMA max segment size
718 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
719 * address space.
720 * This code assumes that the DMA-mapping subsystem will merge all
721 * scatterlist segments if this is really possible (for example when
722 * an IOMMU is available and enabled).
723 * Ideally, this parameter should be set by the generic bus code, but it
724 * is left with the default 64KiB value due to historical litmiations in
725 * other subsystems (like limited USB host drivers) and there no good
726 * place to set it to the proper value.
727 * This function should be called from the drivers, which are known to
728 * operate on platforms with IOMMU and provide access to shared buffers
729 * (either USERPTR or DMABUF). This should be done before initializing
730 * videobuf2 queue.
731 */
732int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
733{
734 if (!dev->dma_parms) {
Vincent Stehlé6cb164f2016-07-18 14:54:04 -0300735 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
Marek Szyprowski3f033962016-05-24 09:16:06 +0200736 if (!dev->dma_parms)
737 return -ENOMEM;
738 }
739 if (dma_get_max_seg_size(dev) < size)
740 return dma_set_max_seg_size(dev, size);
741
742 return 0;
743}
744EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
745
746/*
747 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
748 * @dev: device for configuring DMA parameters
749 *
750 * This function releases resources allocated to configure DMA parameters
751 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
752 * device drivers on driver remove.
753 */
754void vb2_dma_contig_clear_max_seg_size(struct device *dev)
755{
756 kfree(dev->dma_parms);
757 dev->dma_parms = NULL;
758}
759EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
760
Pawel Osciak1a758d42010-10-11 10:59:36 -0300761MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
Pawel Osciak95072082011-03-13 15:23:32 -0300762MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
Pawel Osciak1a758d42010-10-11 10:59:36 -0300763MODULE_LICENSE("GPL");