blob: 556f62662aa92b41fefb738c1036dbd2eef18432 [file] [log] [blame]
Haixia Shiebfdd6d2014-11-12 18:33:53 -08001/*
2 * udl_dmabuf.c
3 *
4 * Copyright (c) 2014 The Chromium OS Authors
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <drm/drmP.h>
21#include "udl_drv.h"
22#include <linux/shmem_fs.h>
23#include <linux/dma-buf.h>
24
25struct udl_drm_dmabuf_attachment {
26 struct sg_table sgt;
27 enum dma_data_direction dir;
28 bool is_mapped;
29};
30
31static int udl_attach_dma_buf(struct dma_buf *dmabuf,
Haixia Shiebfdd6d2014-11-12 18:33:53 -080032 struct dma_buf_attachment *attach)
33{
34 struct udl_drm_dmabuf_attachment *udl_attach;
35
36 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
37 attach->dmabuf->size);
38
39 udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
40 if (!udl_attach)
41 return -ENOMEM;
42
43 udl_attach->dir = DMA_NONE;
44 attach->priv = udl_attach;
45
46 return 0;
47}
48
49static void udl_detach_dma_buf(struct dma_buf *dmabuf,
50 struct dma_buf_attachment *attach)
51{
52 struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
53 struct sg_table *sgt;
54
55 if (!udl_attach)
56 return;
57
58 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
59 attach->dmabuf->size);
60
61 sgt = &udl_attach->sgt;
62
63 if (udl_attach->dir != DMA_NONE)
64 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
65 udl_attach->dir);
66
67 sg_free_table(sgt);
68 kfree(udl_attach);
69 attach->priv = NULL;
70}
71
72static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
73 enum dma_data_direction dir)
74{
75 struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
76 struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
77 struct drm_device *dev = obj->base.dev;
Daniel Vetterae358da2018-03-27 10:23:54 +020078 struct udl_device *udl = dev->dev_private;
Haixia Shiebfdd6d2014-11-12 18:33:53 -080079 struct scatterlist *rd, *wr;
80 struct sg_table *sgt = NULL;
81 unsigned int i;
82 int page_count;
83 int nents, ret;
84
85 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
86 attach->dmabuf->size, dir);
87
88 /* just return current sgt if already requested. */
89 if (udl_attach->dir == dir && udl_attach->is_mapped)
90 return &udl_attach->sgt;
91
92 if (!obj->pages) {
Haixia Shi4bc158e2014-11-25 12:04:02 -080093 ret = udl_gem_get_pages(obj);
94 if (ret) {
95 DRM_ERROR("failed to map pages.\n");
96 return ERR_PTR(ret);
97 }
Haixia Shiebfdd6d2014-11-12 18:33:53 -080098 }
99
100 page_count = obj->base.size / PAGE_SIZE;
101 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
Haixia Shie38648f2014-11-25 12:04:03 -0800102 if (IS_ERR(obj->sg)) {
103 DRM_ERROR("failed to allocate sgt.\n");
104 return ERR_CAST(obj->sg);
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800105 }
106
107 sgt = &udl_attach->sgt;
108
109 ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
110 if (ret) {
111 DRM_ERROR("failed to alloc sgt.\n");
112 return ERR_PTR(-ENOMEM);
113 }
114
Daniel Vetterae358da2018-03-27 10:23:54 +0200115 mutex_lock(&udl->gem_lock);
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800116
117 rd = obj->sg->sgl;
118 wr = sgt->sgl;
119 for (i = 0; i < sgt->orig_nents; ++i) {
120 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
121 rd = sg_next(rd);
122 wr = sg_next(wr);
123 }
124
125 if (dir != DMA_NONE) {
126 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
127 if (!nents) {
128 DRM_ERROR("failed to map sgl with iommu.\n");
129 sg_free_table(sgt);
130 sgt = ERR_PTR(-EIO);
131 goto err_unlock;
132 }
133 }
134
135 udl_attach->is_mapped = true;
136 udl_attach->dir = dir;
137 attach->priv = udl_attach;
138
139err_unlock:
Daniel Vetterae358da2018-03-27 10:23:54 +0200140 mutex_unlock(&udl->gem_lock);
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800141 return sgt;
142}
143
144static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
145 struct sg_table *sgt,
146 enum dma_data_direction dir)
147{
148 /* Nothing to do. */
149 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
150 attach->dmabuf->size, dir);
151}
152
153static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
154{
155 /* TODO */
156
157 return NULL;
158}
159
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800160static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
161 unsigned long page_num, void *addr)
162{
163 /* TODO */
164}
165
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800166static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
167 struct vm_area_struct *vma)
168{
169 /* TODO */
170
171 return -EINVAL;
172}
173
Arvind Yadav7b4e1ea2017-07-01 18:06:46 +0530174static const struct dma_buf_ops udl_dmabuf_ops = {
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800175 .attach = udl_attach_dma_buf,
176 .detach = udl_detach_dma_buf,
177 .map_dma_buf = udl_map_dma_buf,
178 .unmap_dma_buf = udl_unmap_dma_buf,
Logan Gunthorpef9b67f02017-04-19 13:36:10 -0600179 .map = udl_dmabuf_kmap,
Logan Gunthorpef9b67f02017-04-19 13:36:10 -0600180 .unmap = udl_dmabuf_kunmap,
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800181 .mmap = udl_dmabuf_mmap,
182 .release = drm_gem_dmabuf_release,
183};
184
185struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
186 struct drm_gem_object *obj, int flags)
187{
Sumit Semwald8fbe342015-01-23 12:53:43 +0530188 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
189
190 exp_info.ops = &udl_dmabuf_ops;
191 exp_info.size = obj->size;
192 exp_info.flags = flags;
193 exp_info.priv = obj;
194
Chris Wilsona4fce9c2016-10-05 13:21:44 +0100195 return drm_gem_dmabuf_export(dev, &exp_info);
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800196}
197
198static int udl_prime_create(struct drm_device *dev,
199 size_t size,
200 struct sg_table *sg,
201 struct udl_gem_object **obj_p)
202{
203 struct udl_gem_object *obj;
204 int npages;
205
206 npages = size / PAGE_SIZE;
207
208 *obj_p = NULL;
209 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
210 if (!obj)
211 return -ENOMEM;
212
213 obj->sg = sg;
Michal Hocko20981052017-05-17 14:23:12 +0200214 obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Haixia Shiebfdd6d2014-11-12 18:33:53 -0800215 if (obj->pages == NULL) {
216 DRM_ERROR("obj pages is NULL %d\n", npages);
217 return -ENOMEM;
218 }
219
220 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
221
222 *obj_p = obj;
223 return 0;
224}
225
226struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
227 struct dma_buf *dma_buf)
228{
229 struct dma_buf_attachment *attach;
230 struct sg_table *sg;
231 struct udl_gem_object *uobj;
232 int ret;
233
234 /* need to attach */
235 get_device(dev->dev);
236 attach = dma_buf_attach(dma_buf, dev->dev);
237 if (IS_ERR(attach)) {
238 put_device(dev->dev);
239 return ERR_CAST(attach);
240 }
241
242 get_dma_buf(dma_buf);
243
244 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
245 if (IS_ERR(sg)) {
246 ret = PTR_ERR(sg);
247 goto fail_detach;
248 }
249
250 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
251 if (ret)
252 goto fail_unmap;
253
254 uobj->base.import_attach = attach;
255 uobj->flags = UDL_BO_WC;
256
257 return &uobj->base;
258
259fail_unmap:
260 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
261fail_detach:
262 dma_buf_detach(dma_buf, attach);
263 dma_buf_put(dma_buf);
264 put_device(dev->dev);
265 return ERR_PTR(ret);
266}