blob: c3b43f4d4f1f6440065a19daa5fa4b19f4912995 [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Rob Clark871d8122013-11-16 12:56:06 -050029static dma_addr_t physaddr(struct drm_gem_object *obj)
30{
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34 priv->vram.paddr;
35}
36
Rob Clark072f1f92015-03-03 15:04:25 -050037static bool use_pages(struct drm_gem_object *obj)
38{
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
41}
42
Rob Clark871d8122013-11-16 12:56:06 -050043/* allocate pages from VRAM carveout, used when no IOMMU: */
44static struct page **get_pages_vram(struct drm_gem_object *obj,
45 int npages)
46{
47 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private;
49 dma_addr_t paddr;
50 struct page **p;
51 int ret, i;
52
53 p = drm_malloc_ab(npages, sizeof(struct page *));
54 if (!p)
55 return ERR_PTR(-ENOMEM);
56
Chris Wilson4e64e552017-02-02 21:04:38 +000057 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Rob Clark871d8122013-11-16 12:56:06 -050058 if (ret) {
59 drm_free_large(p);
60 return ERR_PTR(ret);
61 }
62
63 paddr = physaddr(obj);
64 for (i = 0; i < npages; i++) {
65 p[i] = phys_to_page(paddr);
66 paddr += PAGE_SIZE;
67 }
68
69 return p;
70}
Rob Clarkc8afe682013-06-26 12:44:06 -040071
72/* called with dev->struct_mutex held */
73static struct page **get_pages(struct drm_gem_object *obj)
74{
75 struct msm_gem_object *msm_obj = to_msm_bo(obj);
76
77 if (!msm_obj->pages) {
78 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050079 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040080 int npages = obj->size >> PAGE_SHIFT;
81
Rob Clark072f1f92015-03-03 15:04:25 -050082 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020083 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050084 else
85 p = get_pages_vram(obj, npages);
86
Rob Clarkc8afe682013-06-26 12:44:06 -040087 if (IS_ERR(p)) {
88 dev_err(dev->dev, "could not get pages: %ld\n",
89 PTR_ERR(p));
90 return p;
91 }
92
93 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080094 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -040095 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +080096 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -040097 }
98
99 msm_obj->pages = p;
100
101 /* For non-cached buffers, ensure the new pages are clean
102 * because display controller, GPU, etc. are not coherent:
103 */
104 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
105 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
106 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
107 }
108
109 return msm_obj->pages;
110}
111
112static void put_pages(struct drm_gem_object *obj)
113{
114 struct msm_gem_object *msm_obj = to_msm_bo(obj);
115
116 if (msm_obj->pages) {
117 /* For non-cached buffers, ensure the new pages are clean
118 * because display controller, GPU, etc. are not coherent:
119 */
120 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
121 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
122 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
123 sg_free_table(msm_obj->sgt);
124 kfree(msm_obj->sgt);
125
Rob Clark072f1f92015-03-03 15:04:25 -0500126 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500127 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Micah Richert1ffa2422014-04-09 14:11:31 -0700128 else {
Rob Clark871d8122013-11-16 12:56:06 -0500129 drm_mm_remove_node(msm_obj->vram_node);
Micah Richert1ffa2422014-04-09 14:11:31 -0700130 drm_free_large(msm_obj->pages);
131 }
Rob Clark871d8122013-11-16 12:56:06 -0500132
Rob Clarkc8afe682013-06-26 12:44:06 -0400133 msm_obj->pages = NULL;
134 }
135}
136
Rob Clark05b84912013-09-28 11:28:35 -0400137struct page **msm_gem_get_pages(struct drm_gem_object *obj)
138{
139 struct drm_device *dev = obj->dev;
140 struct page **p;
141 mutex_lock(&dev->struct_mutex);
142 p = get_pages(obj);
143 mutex_unlock(&dev->struct_mutex);
144 return p;
145}
146
147void msm_gem_put_pages(struct drm_gem_object *obj)
148{
149 /* when we start tracking the pin count, then do something here */
150}
151
Rob Clarkc8afe682013-06-26 12:44:06 -0400152int msm_gem_mmap_obj(struct drm_gem_object *obj,
153 struct vm_area_struct *vma)
154{
155 struct msm_gem_object *msm_obj = to_msm_bo(obj);
156
157 vma->vm_flags &= ~VM_PFNMAP;
158 vma->vm_flags |= VM_MIXEDMAP;
159
160 if (msm_obj->flags & MSM_BO_WC) {
161 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
162 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
163 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
164 } else {
165 /*
166 * Shunt off cached objs to shmem file so they have their own
167 * address_space (so unmap_mapping_range does what we want,
168 * in particular in the case of mmap'd dmabufs)
169 */
170 fput(vma->vm_file);
171 get_file(obj->filp);
172 vma->vm_pgoff = 0;
173 vma->vm_file = obj->filp;
174
175 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
176 }
177
178 return 0;
179}
180
181int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
182{
183 int ret;
184
185 ret = drm_gem_mmap(filp, vma);
186 if (ret) {
187 DBG("mmap failed: %d", ret);
188 return ret;
189 }
190
191 return msm_gem_mmap_obj(vma->vm_private_data, vma);
192}
193
194int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
195{
196 struct drm_gem_object *obj = vma->vm_private_data;
Rob Clarkc8afe682013-06-26 12:44:06 -0400197 struct drm_device *dev = obj->dev;
Rob Clarkd78d3832016-08-22 15:28:38 -0400198 struct msm_drm_private *priv = dev->dev_private;
Rob Clarkc8afe682013-06-26 12:44:06 -0400199 struct page **pages;
200 unsigned long pfn;
201 pgoff_t pgoff;
202 int ret;
203
Rob Clarkd78d3832016-08-22 15:28:38 -0400204 /* This should only happen if userspace tries to pass a mmap'd
205 * but unfaulted gem bo vaddr into submit ioctl, triggering
206 * a page fault while struct_mutex is already held. This is
207 * not a valid use-case so just bail.
208 */
209 if (priv->struct_mutex_task == current)
210 return VM_FAULT_SIGBUS;
211
Rob Clarkc8afe682013-06-26 12:44:06 -0400212 /* Make sure we don't parallel update on a fault, nor move or remove
213 * something from beneath our feet
214 */
215 ret = mutex_lock_interruptible(&dev->struct_mutex);
216 if (ret)
217 goto out;
218
219 /* make sure we have pages attached now */
220 pages = get_pages(obj);
221 if (IS_ERR(pages)) {
222 ret = PTR_ERR(pages);
223 goto out_unlock;
224 }
225
226 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800227 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400228
Rob Clark871d8122013-11-16 12:56:06 -0500229 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400230
Jan Kara1a29d852016-12-14 15:07:01 -0800231 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400232 pfn, pfn << PAGE_SHIFT);
233
Jan Kara1a29d852016-12-14 15:07:01 -0800234 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400235
236out_unlock:
237 mutex_unlock(&dev->struct_mutex);
238out:
239 switch (ret) {
240 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400241 case 0:
242 case -ERESTARTSYS:
243 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400244 case -EBUSY:
245 /*
246 * EBUSY is ok: this just means that another thread
247 * already did the job.
248 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400249 return VM_FAULT_NOPAGE;
250 case -ENOMEM:
251 return VM_FAULT_OOM;
252 default:
253 return VM_FAULT_SIGBUS;
254 }
255}
256
257/** get mmap offset */
258static uint64_t mmap_offset(struct drm_gem_object *obj)
259{
260 struct drm_device *dev = obj->dev;
261 int ret;
262
263 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
264
265 /* Make it mmapable */
266 ret = drm_gem_create_mmap_offset(obj);
267
268 if (ret) {
269 dev_err(dev->dev, "could not allocate mmap offset\n");
270 return 0;
271 }
272
273 return drm_vma_node_offset_addr(&obj->vma_node);
274}
275
276uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
277{
278 uint64_t offset;
279 mutex_lock(&obj->dev->struct_mutex);
280 offset = mmap_offset(obj);
281 mutex_unlock(&obj->dev->struct_mutex);
282 return offset;
283}
284
Rob Clark4fe5f652016-06-01 11:38:28 -0400285static void
286put_iova(struct drm_gem_object *obj)
287{
288 struct drm_device *dev = obj->dev;
289 struct msm_drm_private *priv = obj->dev->dev_private;
290 struct msm_gem_object *msm_obj = to_msm_bo(obj);
291 int id;
292
293 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
294
295 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
Rob Clarkde85d2b2017-01-12 17:41:44 -0500296 if (!priv->aspace[id])
297 continue;
Rob Clark667ce332016-09-28 19:58:32 -0400298 msm_gem_unmap_vma(priv->aspace[id],
299 &msm_obj->domain[id], msm_obj->sgt);
Rob Clark4fe5f652016-06-01 11:38:28 -0400300 }
301}
302
Rob Clarkc8afe682013-06-26 12:44:06 -0400303/* should be called under struct_mutex.. although it can be called
304 * from atomic context without struct_mutex to acquire an extra
305 * iova ref if you know one is already held.
306 *
307 * That means when I do eventually need to add support for unpinning
308 * the refcnt counter needs to be atomic_t.
309 */
310int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
Rob Clark78babc12016-11-11 12:06:46 -0500311 uint64_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400312{
313 struct msm_gem_object *msm_obj = to_msm_bo(obj);
314 int ret = 0;
315
316 if (!msm_obj->domain[id].iova) {
317 struct msm_drm_private *priv = obj->dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500318 struct page **pages = get_pages(obj);
319
Rob Clarkc8afe682013-06-26 12:44:06 -0400320 if (IS_ERR(pages))
321 return PTR_ERR(pages);
Rob Clark871d8122013-11-16 12:56:06 -0500322
323 if (iommu_present(&platform_bus_type)) {
Rob Clark667ce332016-09-28 19:58:32 -0400324 ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
325 msm_obj->sgt, obj->size >> PAGE_SHIFT);
Rob Clark871d8122013-11-16 12:56:06 -0500326 } else {
327 msm_obj->domain[id].iova = physaddr(obj);
328 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400329 }
330
331 if (!ret)
332 *iova = msm_obj->domain[id].iova;
333
334 return ret;
335}
336
Rob Clark2638d902014-11-08 09:13:37 -0500337/* get iova, taking a reference. Should have a matching put */
Rob Clark78babc12016-11-11 12:06:46 -0500338int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400339{
Rob Clarkedd4fc62013-09-14 14:01:55 -0400340 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400341 int ret;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400342
343 /* this is safe right now because we don't unmap until the
344 * bo is deleted:
345 */
346 if (msm_obj->domain[id].iova) {
347 *iova = msm_obj->domain[id].iova;
348 return 0;
349 }
350
Rob Clarkc8afe682013-06-26 12:44:06 -0400351 mutex_lock(&obj->dev->struct_mutex);
352 ret = msm_gem_get_iova_locked(obj, id, iova);
353 mutex_unlock(&obj->dev->struct_mutex);
354 return ret;
355}
356
Rob Clark2638d902014-11-08 09:13:37 -0500357/* get iova without taking a reference, used in places where you have
358 * already done a 'msm_gem_get_iova()'.
359 */
Rob Clark78babc12016-11-11 12:06:46 -0500360uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
Rob Clark2638d902014-11-08 09:13:37 -0500361{
362 struct msm_gem_object *msm_obj = to_msm_bo(obj);
363 WARN_ON(!msm_obj->domain[id].iova);
364 return msm_obj->domain[id].iova;
365}
366
Rob Clarkc8afe682013-06-26 12:44:06 -0400367void msm_gem_put_iova(struct drm_gem_object *obj, int id)
368{
369 // XXX TODO ..
370 // NOTE: probably don't need a _locked() version.. we wouldn't
371 // normally unmap here, but instead just mark that it could be
372 // unmapped (if the iova refcnt drops to zero), but then later
373 // if another _get_iova_locked() fails we can start unmapping
374 // things that are no longer needed..
375}
376
377int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
378 struct drm_mode_create_dumb *args)
379{
380 args->pitch = align_pitch(args->width, args->bpp);
381 args->size = PAGE_ALIGN(args->pitch * args->height);
382 return msm_gem_new_handle(dev, file, args->size,
383 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
384}
385
Rob Clarkc8afe682013-06-26 12:44:06 -0400386int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
387 uint32_t handle, uint64_t *offset)
388{
389 struct drm_gem_object *obj;
390 int ret = 0;
391
392 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100393 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400394 if (obj == NULL) {
395 ret = -ENOENT;
396 goto fail;
397 }
398
399 *offset = msm_gem_mmap_offset(obj);
400
401 drm_gem_object_unreference_unlocked(obj);
402
403fail:
404 return ret;
405}
406
Rob Clark18f23042016-05-26 16:24:35 -0400407void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400408{
409 struct msm_gem_object *msm_obj = to_msm_bo(obj);
410 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
411 if (!msm_obj->vaddr) {
412 struct page **pages = get_pages(obj);
413 if (IS_ERR(pages))
414 return ERR_CAST(pages);
415 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
416 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
Rob Clark69a834c2016-05-24 18:29:38 -0400417 if (msm_obj->vaddr == NULL)
418 return ERR_PTR(-ENOMEM);
Rob Clarkc8afe682013-06-26 12:44:06 -0400419 }
Rob Clarke1e9db22016-05-27 11:16:28 -0400420 msm_obj->vmap_count++;
Rob Clarkc8afe682013-06-26 12:44:06 -0400421 return msm_obj->vaddr;
422}
423
Rob Clark18f23042016-05-26 16:24:35 -0400424void *msm_gem_get_vaddr(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400425{
426 void *ret;
427 mutex_lock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400428 ret = msm_gem_get_vaddr_locked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400429 mutex_unlock(&obj->dev->struct_mutex);
430 return ret;
431}
432
Rob Clark18f23042016-05-26 16:24:35 -0400433void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
434{
Rob Clarke1e9db22016-05-27 11:16:28 -0400435 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark18f23042016-05-26 16:24:35 -0400436 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarke1e9db22016-05-27 11:16:28 -0400437 WARN_ON(msm_obj->vmap_count < 1);
438 msm_obj->vmap_count--;
Rob Clark18f23042016-05-26 16:24:35 -0400439}
440
441void msm_gem_put_vaddr(struct drm_gem_object *obj)
442{
Rob Clarke1e9db22016-05-27 11:16:28 -0400443 mutex_lock(&obj->dev->struct_mutex);
444 msm_gem_put_vaddr_locked(obj);
445 mutex_unlock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400446}
447
Rob Clark4cd33c42016-05-17 15:44:49 -0400448/* Update madvise status, returns true if not purged, else
449 * false or -errno.
450 */
451int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
452{
453 struct msm_gem_object *msm_obj = to_msm_bo(obj);
454
455 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
456
457 if (msm_obj->madv != __MSM_MADV_PURGED)
458 msm_obj->madv = madv;
459
460 return (msm_obj->madv != __MSM_MADV_PURGED);
461}
462
Rob Clark68209392016-05-17 16:19:32 -0400463void msm_gem_purge(struct drm_gem_object *obj)
464{
465 struct drm_device *dev = obj->dev;
466 struct msm_gem_object *msm_obj = to_msm_bo(obj);
467
468 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
469 WARN_ON(!is_purgeable(msm_obj));
470 WARN_ON(obj->import_attach);
471
472 put_iova(obj);
473
Rob Clarke1e9db22016-05-27 11:16:28 -0400474 msm_gem_vunmap(obj);
Rob Clark68209392016-05-17 16:19:32 -0400475
476 put_pages(obj);
477
478 msm_obj->madv = __MSM_MADV_PURGED;
479
480 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
481 drm_gem_free_mmap_offset(obj);
482
483 /* Our goal here is to return as much of the memory as
484 * is possible back to the system as we are called from OOM.
485 * To do this we must instruct the shmfs to drop all of its
486 * backing pages, *now*.
487 */
488 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
489
490 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
491 0, (loff_t)-1);
492}
493
Rob Clarke1e9db22016-05-27 11:16:28 -0400494void msm_gem_vunmap(struct drm_gem_object *obj)
495{
496 struct msm_gem_object *msm_obj = to_msm_bo(obj);
497
498 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
499 return;
500
501 vunmap(msm_obj->vaddr);
502 msm_obj->vaddr = NULL;
503}
504
Rob Clarkb6295f92016-03-15 18:26:28 -0400505/* must be called before _move_to_active().. */
506int msm_gem_sync_object(struct drm_gem_object *obj,
507 struct msm_fence_context *fctx, bool exclusive)
508{
509 struct msm_gem_object *msm_obj = to_msm_bo(obj);
510 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100511 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400512 int i, ret;
513
514 if (!exclusive) {
515 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
516 * which makes this a slightly strange place to call it. OTOH this
517 * is a convenient can-fail point to hook it in. (And similar to
518 * how etnaviv and nouveau handle this.)
519 */
520 ret = reservation_object_reserve_shared(msm_obj->resv);
521 if (ret)
522 return ret;
523 }
524
525 fobj = reservation_object_get_list(msm_obj->resv);
526 if (!fobj || (fobj->shared_count == 0)) {
527 fence = reservation_object_get_excl(msm_obj->resv);
528 /* don't need to wait on our own fences, since ring is fifo */
529 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100530 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400531 if (ret)
532 return ret;
533 }
534 }
535
536 if (!exclusive || !fobj)
537 return 0;
538
539 for (i = 0; i < fobj->shared_count; i++) {
540 fence = rcu_dereference_protected(fobj->shared[i],
541 reservation_object_held(msm_obj->resv));
542 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100543 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400544 if (ret)
545 return ret;
546 }
547 }
548
549 return 0;
550}
551
Rob Clark7198e6b2013-07-19 12:59:32 -0400552void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100553 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400554{
555 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400556 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400557 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400558 if (exclusive)
559 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400560 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400561 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400562 list_del_init(&msm_obj->mm_list);
563 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
564}
565
566void msm_gem_move_to_inactive(struct drm_gem_object *obj)
567{
568 struct drm_device *dev = obj->dev;
569 struct msm_drm_private *priv = dev->dev_private;
570 struct msm_gem_object *msm_obj = to_msm_bo(obj);
571
572 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
573
574 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400575 list_del_init(&msm_obj->mm_list);
576 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400577}
578
Rob Clarkba00c3f2016-03-16 18:18:17 -0400579int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
580{
Rob Clarkb6295f92016-03-15 18:26:28 -0400581 struct msm_gem_object *msm_obj = to_msm_bo(obj);
582 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100583 unsigned long remain =
584 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
585 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400586
Chris Wilsonf755e222016-08-29 08:08:26 +0100587 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
588 true, remain);
589 if (ret == 0)
590 return remain == 0 ? -EBUSY : -ETIMEDOUT;
591 else if (ret < 0)
592 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400593
Rob Clark7198e6b2013-07-19 12:59:32 -0400594 /* TODO cache maintenance */
595
Rob Clarkb6295f92016-03-15 18:26:28 -0400596 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400597}
598
599int msm_gem_cpu_fini(struct drm_gem_object *obj)
600{
601 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400602 return 0;
603}
604
605#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100606static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400607 struct seq_file *m)
608{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100609 if (!dma_fence_is_signaled(fence))
Rob Clarkb6295f92016-03-15 18:26:28 -0400610 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
611 fence->ops->get_driver_name(fence),
612 fence->ops->get_timeline_name(fence),
613 fence->seqno);
614}
615
Rob Clarkc8afe682013-06-26 12:44:06 -0400616void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
617{
Rob Clarkc8afe682013-06-26 12:44:06 -0400618 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400619 struct reservation_object *robj = msm_obj->resv;
620 struct reservation_object_list *fobj;
Rob Clark667ce332016-09-28 19:58:32 -0400621 struct msm_drm_private *priv = obj->dev->dev_private;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100622 struct dma_fence *fence;
Rob Clarkc8afe682013-06-26 12:44:06 -0400623 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400624 const char *madv;
Rob Clark667ce332016-09-28 19:58:32 -0400625 unsigned id;
Rob Clarkc8afe682013-06-26 12:44:06 -0400626
Rob Clarkb6295f92016-03-15 18:26:28 -0400627 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
628
Rob Clark4cd33c42016-05-17 15:44:49 -0400629 switch (msm_obj->madv) {
630 case __MSM_MADV_PURGED:
631 madv = " purged";
632 break;
633 case MSM_MADV_DONTNEED:
634 madv = " purgeable";
635 break;
636 case MSM_MADV_WILLNEED:
637 default:
638 madv = "";
639 break;
640 }
641
Rob Clark667ce332016-09-28 19:58:32 -0400642 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
Rob Clark7198e6b2013-07-19 12:59:32 -0400643 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400644 obj->name, obj->refcount.refcount.counter,
Rob Clark667ce332016-09-28 19:58:32 -0400645 off, msm_obj->vaddr);
646
647 for (id = 0; id < priv->num_aspaces; id++)
648 seq_printf(m, " %08llx", msm_obj->domain[id].iova);
649
650 seq_printf(m, " %zu%s\n", obj->size, madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400651
652 rcu_read_lock();
653 fobj = rcu_dereference(robj->fence);
654 if (fobj) {
655 unsigned int i, shared_count = fobj->shared_count;
656
657 for (i = 0; i < shared_count; i++) {
658 fence = rcu_dereference(fobj->shared[i]);
659 describe_fence(fence, "Shared", m);
660 }
661 }
662
663 fence = rcu_dereference(robj->fence_excl);
664 if (fence)
665 describe_fence(fence, "Exclusive", m);
666 rcu_read_unlock();
Rob Clarkc8afe682013-06-26 12:44:06 -0400667}
668
669void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
670{
671 struct msm_gem_object *msm_obj;
672 int count = 0;
673 size_t size = 0;
674
675 list_for_each_entry(msm_obj, list, mm_list) {
676 struct drm_gem_object *obj = &msm_obj->base;
677 seq_printf(m, " ");
678 msm_gem_describe(obj, m);
679 count++;
680 size += obj->size;
681 }
682
683 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
684}
685#endif
686
687void msm_gem_free_object(struct drm_gem_object *obj)
688{
689 struct drm_device *dev = obj->dev;
690 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400691
692 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
693
Rob Clark7198e6b2013-07-19 12:59:32 -0400694 /* object should not be on active list: */
695 WARN_ON(is_active(msm_obj));
696
Rob Clarkc8afe682013-06-26 12:44:06 -0400697 list_del(&msm_obj->mm_list);
698
Rob Clark4fe5f652016-06-01 11:38:28 -0400699 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400700
Rob Clark05b84912013-09-28 11:28:35 -0400701 if (obj->import_attach) {
702 if (msm_obj->vaddr)
703 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400704
Rob Clark05b84912013-09-28 11:28:35 -0400705 /* Don't drop the pages for imported dmabuf, as they are not
706 * ours, just free the array we allocated:
707 */
708 if (msm_obj->pages)
709 drm_free_large(msm_obj->pages);
710
jilai wangf28730c2015-04-07 13:51:32 -0400711 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400712 } else {
Rob Clarke1e9db22016-05-27 11:16:28 -0400713 msm_gem_vunmap(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400714 put_pages(obj);
715 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400716
Rob Clark7198e6b2013-07-19 12:59:32 -0400717 if (msm_obj->resv == &msm_obj->_resv)
718 reservation_object_fini(msm_obj->resv);
719
Rob Clarkc8afe682013-06-26 12:44:06 -0400720 drm_gem_object_release(obj);
721
722 kfree(msm_obj);
723}
724
725/* convenience method to construct a GEM buffer object, and userspace handle */
726int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
727 uint32_t size, uint32_t flags, uint32_t *handle)
728{
729 struct drm_gem_object *obj;
730 int ret;
731
732 ret = mutex_lock_interruptible(&dev->struct_mutex);
733 if (ret)
734 return ret;
735
736 obj = msm_gem_new(dev, size, flags);
737
738 mutex_unlock(&dev->struct_mutex);
739
740 if (IS_ERR(obj))
741 return PTR_ERR(obj);
742
743 ret = drm_gem_handle_create(file, obj, handle);
744
745 /* drop reference from allocate - handle holds it now */
746 drm_gem_object_unreference_unlocked(obj);
747
748 return ret;
749}
750
Rob Clark05b84912013-09-28 11:28:35 -0400751static int msm_gem_new_impl(struct drm_device *dev,
752 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400753 struct reservation_object *resv,
Rob Clark05b84912013-09-28 11:28:35 -0400754 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400755{
756 struct msm_drm_private *priv = dev->dev_private;
757 struct msm_gem_object *msm_obj;
Rob Clark072f1f92015-03-03 15:04:25 -0500758 bool use_vram = false;
Rob Clarkc8afe682013-06-26 12:44:06 -0400759
760 switch (flags & MSM_BO_CACHE_MASK) {
761 case MSM_BO_UNCACHED:
762 case MSM_BO_CACHED:
763 case MSM_BO_WC:
764 break;
765 default:
766 dev_err(dev->dev, "invalid cache flag: %x\n",
767 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400768 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400769 }
770
Rob Clark871d8122013-11-16 12:56:06 -0500771 if (!iommu_present(&platform_bus_type))
Rob Clark072f1f92015-03-03 15:04:25 -0500772 use_vram = true;
773 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
774 use_vram = true;
775
776 if (WARN_ON(use_vram && !priv->vram.size))
777 return -EINVAL;
778
Rob Clark667ce332016-09-28 19:58:32 -0400779 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400780 if (!msm_obj)
781 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400782
Rob Clark072f1f92015-03-03 15:04:25 -0500783 if (use_vram)
Rob Clark667ce332016-09-28 19:58:32 -0400784 msm_obj->vram_node = &msm_obj->domain[0].node;
Rob Clark871d8122013-11-16 12:56:06 -0500785
Rob Clarkc8afe682013-06-26 12:44:06 -0400786 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400787 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400788
Rob Clark79f0e202016-03-16 12:40:35 -0400789 if (resv) {
790 msm_obj->resv = resv;
791 } else {
792 msm_obj->resv = &msm_obj->_resv;
793 reservation_object_init(msm_obj->resv);
794 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400795
Rob Clark7198e6b2013-07-19 12:59:32 -0400796 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clarkc8afe682013-06-26 12:44:06 -0400797 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
798
Rob Clark05b84912013-09-28 11:28:35 -0400799 *obj = &msm_obj->base;
800
801 return 0;
802}
803
804struct drm_gem_object *msm_gem_new(struct drm_device *dev,
805 uint32_t size, uint32_t flags)
806{
Rob Clark871d8122013-11-16 12:56:06 -0500807 struct drm_gem_object *obj = NULL;
Rob Clark05b84912013-09-28 11:28:35 -0400808 int ret;
809
810 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
811
812 size = PAGE_ALIGN(size);
813
Rob Clark79f0e202016-03-16 12:40:35 -0400814 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400815 if (ret)
816 goto fail;
817
Rob Clark072f1f92015-03-03 15:04:25 -0500818 if (use_pages(obj)) {
Rob Clark871d8122013-11-16 12:56:06 -0500819 ret = drm_gem_object_init(dev, obj, size);
820 if (ret)
821 goto fail;
822 } else {
823 drm_gem_private_object_init(dev, obj, size);
824 }
Rob Clark05b84912013-09-28 11:28:35 -0400825
826 return obj;
827
828fail:
Markus Elfring0a677122016-07-13 19:29:19 +0200829 drm_gem_object_unreference(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400830 return ERR_PTR(ret);
831}
832
833struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400834 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400835{
836 struct msm_gem_object *msm_obj;
837 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -0400838 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -0400839 int ret, npages;
840
Rob Clark871d8122013-11-16 12:56:06 -0500841 /* if we don't have IOMMU, don't bother pretending we can import: */
842 if (!iommu_present(&platform_bus_type)) {
843 dev_err(dev->dev, "cannot import without IOMMU\n");
844 return ERR_PTR(-EINVAL);
845 }
846
Rob Clark79f0e202016-03-16 12:40:35 -0400847 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -0400848
Rob Clark79f0e202016-03-16 12:40:35 -0400849 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400850 if (ret)
851 goto fail;
852
853 drm_gem_private_object_init(dev, obj, size);
854
855 npages = size / PAGE_SIZE;
856
857 msm_obj = to_msm_bo(obj);
858 msm_obj->sgt = sgt;
859 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
860 if (!msm_obj->pages) {
861 ret = -ENOMEM;
862 goto fail;
863 }
864
865 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
866 if (ret)
867 goto fail;
868
Rob Clarkc8afe682013-06-26 12:44:06 -0400869 return obj;
870
871fail:
Markus Elfringe73a8562016-07-13 19:15:35 +0200872 drm_gem_object_unreference_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400873 return ERR_PTR(ret);
874}