blob: 49a019939ccdc13497c3c1b5fefbd7f5149fd15c [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clarkc8afe682013-06-26 12:44:06 -04002/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Rob Clarkc8afe682013-06-26 12:44:06 -04005 */
6
7#include <linux/spinlock.h>
8#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -04009#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080010#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040011
12#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040013#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040014#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040015#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050016#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040017
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060018static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
19
20
Rob Clark871d8122013-11-16 12:56:06 -050021static dma_addr_t physaddr(struct drm_gem_object *obj)
22{
23 struct msm_gem_object *msm_obj = to_msm_bo(obj);
24 struct msm_drm_private *priv = obj->dev->dev_private;
25 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
26 priv->vram.paddr;
27}
28
Rob Clark072f1f92015-03-03 15:04:25 -050029static bool use_pages(struct drm_gem_object *obj)
30{
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 return !msm_obj->vram_node;
33}
34
Rob Clark871d8122013-11-16 12:56:06 -050035/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060036static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050037{
38 struct msm_gem_object *msm_obj = to_msm_bo(obj);
39 struct msm_drm_private *priv = obj->dev->dev_private;
40 dma_addr_t paddr;
41 struct page **p;
42 int ret, i;
43
Michal Hocko20981052017-05-17 14:23:12 +020044 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050045 if (!p)
46 return ERR_PTR(-ENOMEM);
47
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060048 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000049 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060050 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050051 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020052 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050053 return ERR_PTR(ret);
54 }
55
56 paddr = physaddr(obj);
57 for (i = 0; i < npages; i++) {
58 p[i] = phys_to_page(paddr);
59 paddr += PAGE_SIZE;
60 }
61
62 return p;
63}
Rob Clarkc8afe682013-06-26 12:44:06 -040064
Rob Clarkc8afe682013-06-26 12:44:06 -040065static struct page **get_pages(struct drm_gem_object *obj)
66{
67 struct msm_gem_object *msm_obj = to_msm_bo(obj);
68
69 if (!msm_obj->pages) {
70 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050071 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040072 int npages = obj->size >> PAGE_SHIFT;
73
Rob Clark072f1f92015-03-03 15:04:25 -050074 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020075 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050076 else
77 p = get_pages_vram(obj, npages);
78
Rob Clarkc8afe682013-06-26 12:44:06 -040079 if (IS_ERR(p)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +053080 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
Rob Clarkc8afe682013-06-26 12:44:06 -040081 PTR_ERR(p));
82 return p;
83 }
84
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +053085 msm_obj->pages = p;
86
Rob Clarkc8afe682013-06-26 12:44:06 -040087 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080088 if (IS_ERR(msm_obj->sgt)) {
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +053089 void *ptr = ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -040090
Mamta Shukla6a41da12018-10-20 23:19:26 +053091 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +053092 msm_obj->sgt = NULL;
93 return ptr;
94 }
Rob Clarkc8afe682013-06-26 12:44:06 -040095
96 /* For non-cached buffers, ensure the new pages are clean
97 * because display controller, GPU, etc. are not coherent:
98 */
99 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
100 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
101 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
102 }
103
104 return msm_obj->pages;
105}
106
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600107static void put_pages_vram(struct drm_gem_object *obj)
108{
109 struct msm_gem_object *msm_obj = to_msm_bo(obj);
110 struct msm_drm_private *priv = obj->dev->dev_private;
111
112 spin_lock(&priv->vram.lock);
113 drm_mm_remove_node(msm_obj->vram_node);
114 spin_unlock(&priv->vram.lock);
115
116 kvfree(msm_obj->pages);
117}
118
Rob Clarkc8afe682013-06-26 12:44:06 -0400119static void put_pages(struct drm_gem_object *obj)
120{
121 struct msm_gem_object *msm_obj = to_msm_bo(obj);
122
123 if (msm_obj->pages) {
Ben Hutchings39766262018-04-03 23:38:45 +0100124 if (msm_obj->sgt) {
125 /* For non-cached buffers, ensure the new
126 * pages are clean because display controller,
127 * GPU, etc. are not coherent:
128 */
129 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
130 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
131 msm_obj->sgt->nents,
132 DMA_BIDIRECTIONAL);
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530133
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530134 sg_free_table(msm_obj->sgt);
Ben Hutchings39766262018-04-03 23:38:45 +0100135 kfree(msm_obj->sgt);
136 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400137
Rob Clark072f1f92015-03-03 15:04:25 -0500138 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500139 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600140 else
141 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500142
Rob Clarkc8afe682013-06-26 12:44:06 -0400143 msm_obj->pages = NULL;
144 }
145}
146
Rob Clark05b84912013-09-28 11:28:35 -0400147struct page **msm_gem_get_pages(struct drm_gem_object *obj)
148{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600149 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400150 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600151
152 mutex_lock(&msm_obj->lock);
153
154 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
155 mutex_unlock(&msm_obj->lock);
156 return ERR_PTR(-EBUSY);
157 }
158
Rob Clark05b84912013-09-28 11:28:35 -0400159 p = get_pages(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600160 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -0400161 return p;
162}
163
164void msm_gem_put_pages(struct drm_gem_object *obj)
165{
166 /* when we start tracking the pin count, then do something here */
167}
168
Rob Clarkc8afe682013-06-26 12:44:06 -0400169int msm_gem_mmap_obj(struct drm_gem_object *obj,
170 struct vm_area_struct *vma)
171{
172 struct msm_gem_object *msm_obj = to_msm_bo(obj);
173
174 vma->vm_flags &= ~VM_PFNMAP;
175 vma->vm_flags |= VM_MIXEDMAP;
176
177 if (msm_obj->flags & MSM_BO_WC) {
178 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
179 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
180 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
181 } else {
182 /*
183 * Shunt off cached objs to shmem file so they have their own
184 * address_space (so unmap_mapping_range does what we want,
185 * in particular in the case of mmap'd dmabufs)
186 */
187 fput(vma->vm_file);
188 get_file(obj->filp);
189 vma->vm_pgoff = 0;
190 vma->vm_file = obj->filp;
191
192 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
193 }
194
195 return 0;
196}
197
198int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
199{
200 int ret;
201
202 ret = drm_gem_mmap(filp, vma);
203 if (ret) {
204 DBG("mmap failed: %d", ret);
205 return ret;
206 }
207
208 return msm_gem_mmap_obj(vma->vm_private_data, vma);
209}
210
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530211vm_fault_t msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400212{
Dave Jiang11bac802017-02-24 14:56:41 -0800213 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400214 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600215 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400216 struct page **pages;
217 unsigned long pfn;
218 pgoff_t pgoff;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530219 int err;
220 vm_fault_t ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400221
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600222 /*
223 * vm_ops.open/drm_gem_mmap_obj and close get and put
224 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400225 */
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530226 err = mutex_lock_interruptible(&msm_obj->lock);
227 if (err) {
228 ret = VM_FAULT_NOPAGE;
Rob Clarkc8afe682013-06-26 12:44:06 -0400229 goto out;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530230 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400231
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600232 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
233 mutex_unlock(&msm_obj->lock);
234 return VM_FAULT_SIGBUS;
235 }
236
Rob Clarkc8afe682013-06-26 12:44:06 -0400237 /* make sure we have pages attached now */
238 pages = get_pages(obj);
239 if (IS_ERR(pages)) {
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530240 ret = vmf_error(PTR_ERR(pages));
Rob Clarkc8afe682013-06-26 12:44:06 -0400241 goto out_unlock;
242 }
243
244 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800245 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400246
Rob Clark871d8122013-11-16 12:56:06 -0500247 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400248
Jan Kara1a29d852016-12-14 15:07:01 -0800249 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400250 pfn, pfn << PAGE_SHIFT);
251
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530252 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400253out_unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600254 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400255out:
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530256 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400257}
258
259/** get mmap offset */
260static uint64_t mmap_offset(struct drm_gem_object *obj)
261{
262 struct drm_device *dev = obj->dev;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600263 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400264 int ret;
265
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600266 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkc8afe682013-06-26 12:44:06 -0400267
268 /* Make it mmapable */
269 ret = drm_gem_create_mmap_offset(obj);
270
271 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530272 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400273 return 0;
274 }
275
276 return drm_vma_node_offset_addr(&obj->vma_node);
277}
278
279uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
280{
281 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600282 struct msm_gem_object *msm_obj = to_msm_bo(obj);
283
284 mutex_lock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400285 offset = mmap_offset(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600286 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400287 return offset;
288}
289
Rob Clark4b85f7f2017-06-13 13:54:13 -0400290static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
291 struct msm_gem_address_space *aspace)
292{
293 struct msm_gem_object *msm_obj = to_msm_bo(obj);
294 struct msm_gem_vma *vma;
295
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600296 WARN_ON(!mutex_is_locked(&msm_obj->lock));
297
Rob Clark4b85f7f2017-06-13 13:54:13 -0400298 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
299 if (!vma)
300 return ERR_PTR(-ENOMEM);
301
302 vma->aspace = aspace;
303
304 list_add_tail(&vma->list, &msm_obj->vmas);
305
306 return vma;
307}
308
309static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
310 struct msm_gem_address_space *aspace)
311{
312 struct msm_gem_object *msm_obj = to_msm_bo(obj);
313 struct msm_gem_vma *vma;
314
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600315 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400316
317 list_for_each_entry(vma, &msm_obj->vmas, list) {
318 if (vma->aspace == aspace)
319 return vma;
320 }
321
322 return NULL;
323}
324
325static void del_vma(struct msm_gem_vma *vma)
326{
327 if (!vma)
328 return;
329
330 list_del(&vma->list);
331 kfree(vma);
332}
333
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600334/* Called with msm_obj->lock locked */
Rob Clark4fe5f652016-06-01 11:38:28 -0400335static void
336put_iova(struct drm_gem_object *obj)
337{
Rob Clark4fe5f652016-06-01 11:38:28 -0400338 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400339 struct msm_gem_vma *vma, *tmp;
Rob Clark4fe5f652016-06-01 11:38:28 -0400340
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600341 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4fe5f652016-06-01 11:38:28 -0400342
Rob Clark4b85f7f2017-06-13 13:54:13 -0400343 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700344 msm_gem_purge_vma(vma->aspace, vma);
345 msm_gem_close_vma(vma->aspace, vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400346 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400347 }
348}
349
Jordan Crousec0ee9792018-11-07 15:35:48 -0700350static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400351 struct msm_gem_address_space *aspace, uint64_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400352{
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400354 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400355 int ret = 0;
356
Jordan Crousec0ee9792018-11-07 15:35:48 -0700357 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkcb1e3812017-06-13 09:15:36 -0400358
Rob Clark4b85f7f2017-06-13 13:54:13 -0400359 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500360
Rob Clark4b85f7f2017-06-13 13:54:13 -0400361 if (!vma) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400362 vma = add_vma(obj, aspace);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700363 if (IS_ERR(vma))
364 return PTR_ERR(vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400365
Jordan Crousec0ee9792018-11-07 15:35:48 -0700366 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
367 if (ret) {
368 del_vma(vma);
369 return ret;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400370 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400371 }
372
Rob Clark4b85f7f2017-06-13 13:54:13 -0400373 *iova = vma->iova;
374 return 0;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700375}
Rob Clark4b85f7f2017-06-13 13:54:13 -0400376
Jordan Crousec0ee9792018-11-07 15:35:48 -0700377static int msm_gem_pin_iova(struct drm_gem_object *obj,
378 struct msm_gem_address_space *aspace)
379{
380 struct msm_gem_object *msm_obj = to_msm_bo(obj);
381 struct msm_gem_vma *vma;
382 struct page **pages;
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500383 int prot = IOMMU_READ;
384
385 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
386 prot |= IOMMU_WRITE;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700387
388 WARN_ON(!mutex_is_locked(&msm_obj->lock));
389
390 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
391 return -EBUSY;
392
393 vma = lookup_vma(obj, aspace);
394 if (WARN_ON(!vma))
395 return -EINVAL;
396
397 pages = get_pages(obj);
398 if (IS_ERR(pages))
399 return PTR_ERR(pages);
400
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500401 return msm_gem_map_vma(aspace, vma, prot,
402 msm_obj->sgt, obj->size >> PAGE_SHIFT);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700403}
404
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700405/* get iova and pin it. Should have a matching put */
406int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
Jordan Crousec0ee9792018-11-07 15:35:48 -0700407 struct msm_gem_address_space *aspace, uint64_t *iova)
408{
409 struct msm_gem_object *msm_obj = to_msm_bo(obj);
410 u64 local;
411 int ret;
412
413 mutex_lock(&msm_obj->lock);
414
415 ret = msm_gem_get_iova_locked(obj, aspace, &local);
416
417 if (!ret)
418 ret = msm_gem_pin_iova(obj, aspace);
419
420 if (!ret)
421 *iova = local;
422
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600423 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400424 return ret;
425}
426
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700427/*
428 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
429 * valid for the life of the object
430 */
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700431int msm_gem_get_iova(struct drm_gem_object *obj,
432 struct msm_gem_address_space *aspace, uint64_t *iova)
433{
434 struct msm_gem_object *msm_obj = to_msm_bo(obj);
435 int ret;
436
437 mutex_lock(&msm_obj->lock);
438 ret = msm_gem_get_iova_locked(obj, aspace, iova);
439 mutex_unlock(&msm_obj->lock);
440
441 return ret;
442}
443
Rob Clark2638d902014-11-08 09:13:37 -0500444/* get iova without taking a reference, used in places where you have
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700445 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
Rob Clark2638d902014-11-08 09:13:37 -0500446 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400447uint64_t msm_gem_iova(struct drm_gem_object *obj,
448 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500449{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600450 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400451 struct msm_gem_vma *vma;
452
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600453 mutex_lock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400454 vma = lookup_vma(obj, aspace);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600455 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400456 WARN_ON(!vma);
457
458 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500459}
460
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700461/*
462 * Unpin a iova by updating the reference counts. The memory isn't actually
463 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
464 * to get rid of it
465 */
466void msm_gem_unpin_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400467 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400468{
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700469 struct msm_gem_object *msm_obj = to_msm_bo(obj);
470 struct msm_gem_vma *vma;
471
472 mutex_lock(&msm_obj->lock);
473 vma = lookup_vma(obj, aspace);
474
475 if (!WARN_ON(!vma))
476 msm_gem_unmap_vma(aspace, vma);
477
478 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400479}
480
481int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
482 struct drm_mode_create_dumb *args)
483{
484 args->pitch = align_pitch(args->width, args->bpp);
485 args->size = PAGE_ALIGN(args->pitch * args->height);
486 return msm_gem_new_handle(dev, file, args->size,
Jordan Crouse0815d772018-11-07 15:35:52 -0700487 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
Rob Clarkc8afe682013-06-26 12:44:06 -0400488}
489
Rob Clarkc8afe682013-06-26 12:44:06 -0400490int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
491 uint32_t handle, uint64_t *offset)
492{
493 struct drm_gem_object *obj;
494 int ret = 0;
495
496 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100497 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400498 if (obj == NULL) {
499 ret = -ENOENT;
500 goto fail;
501 }
502
503 *offset = msm_gem_mmap_offset(obj);
504
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100505 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400506
507fail:
508 return ret;
509}
510
Rob Clarkfad33f42017-09-15 08:38:20 -0400511static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400512{
Rob Clarke1e9db22016-05-27 11:16:28 -0400513 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600514 int ret = 0;
515
516 mutex_lock(&msm_obj->lock);
517
Rob Clarkfad33f42017-09-15 08:38:20 -0400518 if (WARN_ON(msm_obj->madv > madv)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530519 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
Rob Clarkfad33f42017-09-15 08:38:20 -0400520 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600521 mutex_unlock(&msm_obj->lock);
522 return ERR_PTR(-EBUSY);
523 }
524
525 /* increment vmap_count *before* vmap() call, so shrinker can
526 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
527 * This guarantees that we won't try to msm_gem_vunmap() this
528 * same object from within the vmap() call (while we already
529 * hold msm_obj->lock)
530 */
531 msm_obj->vmap_count++;
532
533 if (!msm_obj->vaddr) {
534 struct page **pages = get_pages(obj);
535 if (IS_ERR(pages)) {
536 ret = PTR_ERR(pages);
537 goto fail;
538 }
539 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
540 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
541 if (msm_obj->vaddr == NULL) {
542 ret = -ENOMEM;
543 goto fail;
544 }
545 }
546
547 mutex_unlock(&msm_obj->lock);
548 return msm_obj->vaddr;
549
550fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400551 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600552 mutex_unlock(&msm_obj->lock);
553 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400554}
555
Rob Clarkfad33f42017-09-15 08:38:20 -0400556void *msm_gem_get_vaddr(struct drm_gem_object *obj)
557{
558 return get_vaddr(obj, MSM_MADV_WILLNEED);
559}
560
561/*
562 * Don't use this! It is for the very special case of dumping
563 * submits from GPU hangs or faults, were the bo may already
564 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
565 * active list.
566 */
567void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
568{
569 return get_vaddr(obj, __MSM_MADV_PURGED);
570}
571
Rob Clark18f23042016-05-26 16:24:35 -0400572void msm_gem_put_vaddr(struct drm_gem_object *obj)
573{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600574 struct msm_gem_object *msm_obj = to_msm_bo(obj);
575
576 mutex_lock(&msm_obj->lock);
577 WARN_ON(msm_obj->vmap_count < 1);
578 msm_obj->vmap_count--;
579 mutex_unlock(&msm_obj->lock);
Rob Clark18f23042016-05-26 16:24:35 -0400580}
581
Rob Clark4cd33c42016-05-17 15:44:49 -0400582/* Update madvise status, returns true if not purged, else
583 * false or -errno.
584 */
585int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
586{
587 struct msm_gem_object *msm_obj = to_msm_bo(obj);
588
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600589 mutex_lock(&msm_obj->lock);
590
Rob Clark4cd33c42016-05-17 15:44:49 -0400591 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
592
593 if (msm_obj->madv != __MSM_MADV_PURGED)
594 msm_obj->madv = madv;
595
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600596 madv = msm_obj->madv;
597
598 mutex_unlock(&msm_obj->lock);
599
600 return (madv != __MSM_MADV_PURGED);
Rob Clark4cd33c42016-05-17 15:44:49 -0400601}
602
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600603void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
Rob Clark68209392016-05-17 16:19:32 -0400604{
605 struct drm_device *dev = obj->dev;
606 struct msm_gem_object *msm_obj = to_msm_bo(obj);
607
608 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
609 WARN_ON(!is_purgeable(msm_obj));
610 WARN_ON(obj->import_attach);
611
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600612 mutex_lock_nested(&msm_obj->lock, subclass);
613
Rob Clark68209392016-05-17 16:19:32 -0400614 put_iova(obj);
615
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600616 msm_gem_vunmap_locked(obj);
Rob Clark68209392016-05-17 16:19:32 -0400617
618 put_pages(obj);
619
620 msm_obj->madv = __MSM_MADV_PURGED;
621
622 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
623 drm_gem_free_mmap_offset(obj);
624
625 /* Our goal here is to return as much of the memory as
626 * is possible back to the system as we are called from OOM.
627 * To do this we must instruct the shmfs to drop all of its
628 * backing pages, *now*.
629 */
630 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
631
632 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
633 0, (loff_t)-1);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600634
635 mutex_unlock(&msm_obj->lock);
Rob Clark68209392016-05-17 16:19:32 -0400636}
637
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600638static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400639{
640 struct msm_gem_object *msm_obj = to_msm_bo(obj);
641
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600642 WARN_ON(!mutex_is_locked(&msm_obj->lock));
643
Rob Clarke1e9db22016-05-27 11:16:28 -0400644 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
645 return;
646
647 vunmap(msm_obj->vaddr);
648 msm_obj->vaddr = NULL;
649}
650
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600651void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
652{
653 struct msm_gem_object *msm_obj = to_msm_bo(obj);
654
655 mutex_lock_nested(&msm_obj->lock, subclass);
656 msm_gem_vunmap_locked(obj);
657 mutex_unlock(&msm_obj->lock);
658}
659
Rob Clarkb6295f92016-03-15 18:26:28 -0400660/* must be called before _move_to_active().. */
661int msm_gem_sync_object(struct drm_gem_object *obj,
662 struct msm_fence_context *fctx, bool exclusive)
663{
Rob Clarkb6295f92016-03-15 18:26:28 -0400664 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100665 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400666 int i, ret;
667
Rob Herringdd55cf62019-02-02 09:41:56 -0600668 fobj = reservation_object_get_list(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400669 if (!fobj || (fobj->shared_count == 0)) {
Rob Herringdd55cf62019-02-02 09:41:56 -0600670 fence = reservation_object_get_excl(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400671 /* don't need to wait on our own fences, since ring is fifo */
672 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100673 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400674 if (ret)
675 return ret;
676 }
677 }
678
679 if (!exclusive || !fobj)
680 return 0;
681
682 for (i = 0; i < fobj->shared_count; i++) {
683 fence = rcu_dereference_protected(fobj->shared[i],
Rob Herringdd55cf62019-02-02 09:41:56 -0600684 reservation_object_held(obj->resv));
Rob Clarkb6295f92016-03-15 18:26:28 -0400685 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100686 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400687 if (ret)
688 return ret;
689 }
690 }
691
692 return 0;
693}
694
Rob Clark7198e6b2013-07-19 12:59:32 -0400695void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100696 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400697{
698 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400699 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400700 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400701 if (exclusive)
Rob Herringdd55cf62019-02-02 09:41:56 -0600702 reservation_object_add_excl_fence(obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400703 else
Rob Herringdd55cf62019-02-02 09:41:56 -0600704 reservation_object_add_shared_fence(obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400705 list_del_init(&msm_obj->mm_list);
706 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
707}
708
709void msm_gem_move_to_inactive(struct drm_gem_object *obj)
710{
711 struct drm_device *dev = obj->dev;
712 struct msm_drm_private *priv = dev->dev_private;
713 struct msm_gem_object *msm_obj = to_msm_bo(obj);
714
715 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
716
717 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400718 list_del_init(&msm_obj->mm_list);
719 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400720}
721
Rob Clarkba00c3f2016-03-16 18:18:17 -0400722int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
723{
Rob Clarkb6295f92016-03-15 18:26:28 -0400724 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100725 unsigned long remain =
726 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
727 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400728
Rob Herringdd55cf62019-02-02 09:41:56 -0600729 ret = reservation_object_wait_timeout_rcu(obj->resv, write,
Chris Wilsonf755e222016-08-29 08:08:26 +0100730 true, remain);
731 if (ret == 0)
732 return remain == 0 ? -EBUSY : -ETIMEDOUT;
733 else if (ret < 0)
734 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400735
Rob Clark7198e6b2013-07-19 12:59:32 -0400736 /* TODO cache maintenance */
737
Rob Clarkb6295f92016-03-15 18:26:28 -0400738 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400739}
740
741int msm_gem_cpu_fini(struct drm_gem_object *obj)
742{
743 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400744 return 0;
745}
746
747#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100748static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400749 struct seq_file *m)
750{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100751 if (!dma_fence_is_signaled(fence))
Dave Airliea3115622019-01-10 06:20:15 +1000752 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400753 fence->ops->get_driver_name(fence),
754 fence->ops->get_timeline_name(fence),
755 fence->seqno);
756}
757
Rob Clarkc8afe682013-06-26 12:44:06 -0400758void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
759{
Rob Clarkc8afe682013-06-26 12:44:06 -0400760 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Herringdd55cf62019-02-02 09:41:56 -0600761 struct reservation_object *robj = obj->resv;
Rob Clarkb6295f92016-03-15 18:26:28 -0400762 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100763 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400764 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400765 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400766 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400767
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600768 mutex_lock(&msm_obj->lock);
Rob Clarkb6295f92016-03-15 18:26:28 -0400769
Rob Clark4cd33c42016-05-17 15:44:49 -0400770 switch (msm_obj->madv) {
771 case __MSM_MADV_PURGED:
772 madv = " purged";
773 break;
774 case MSM_MADV_DONTNEED:
775 madv = " purgeable";
776 break;
777 case MSM_MADV_WILLNEED:
778 default:
779 madv = "";
780 break;
781 }
782
Jordan Crouse575f0482018-11-07 15:35:49 -0700783 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
Rob Clark7198e6b2013-07-19 12:59:32 -0400784 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100785 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400786 off, msm_obj->vaddr);
787
Jordan Crouse0815d772018-11-07 15:35:52 -0700788 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
Rob Clark667ce332016-09-28 19:58:32 -0400789
Jordan Crouse575f0482018-11-07 15:35:49 -0700790 if (!list_empty(&msm_obj->vmas)) {
791
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700792 seq_puts(m, " vmas:");
Jordan Crouse575f0482018-11-07 15:35:49 -0700793
794 list_for_each_entry(vma, &msm_obj->vmas, list)
Brian Masney90f94662019-05-13 19:41:05 -0400795 seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
796 vma->aspace != NULL ? vma->aspace->name : NULL,
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700797 vma->iova, vma->mapped ? "mapped" : "unmapped",
798 vma->inuse);
Jordan Crouse575f0482018-11-07 15:35:49 -0700799
800 seq_puts(m, "\n");
801 }
Rob Clarkb6295f92016-03-15 18:26:28 -0400802
803 rcu_read_lock();
804 fobj = rcu_dereference(robj->fence);
805 if (fobj) {
806 unsigned int i, shared_count = fobj->shared_count;
807
808 for (i = 0; i < shared_count; i++) {
809 fence = rcu_dereference(fobj->shared[i]);
810 describe_fence(fence, "Shared", m);
811 }
812 }
813
814 fence = rcu_dereference(robj->fence_excl);
815 if (fence)
816 describe_fence(fence, "Exclusive", m);
817 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600818
819 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400820}
821
822void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
823{
824 struct msm_gem_object *msm_obj;
825 int count = 0;
826 size_t size = 0;
827
Jordan Crouse0815d772018-11-07 15:35:52 -0700828 seq_puts(m, " flags id ref offset kaddr size madv name\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400829 list_for_each_entry(msm_obj, list, mm_list) {
830 struct drm_gem_object *obj = &msm_obj->base;
Jordan Crouse575f0482018-11-07 15:35:49 -0700831 seq_puts(m, " ");
Rob Clarkc8afe682013-06-26 12:44:06 -0400832 msm_gem_describe(obj, m);
833 count++;
834 size += obj->size;
835 }
836
837 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
838}
839#endif
840
Rob Clarkd71b6bd2018-02-14 11:14:23 -0500841/* don't call directly! Use drm_gem_object_put() and friends */
Rob Clarkc8afe682013-06-26 12:44:06 -0400842void msm_gem_free_object(struct drm_gem_object *obj)
843{
Rob Clarkc8afe682013-06-26 12:44:06 -0400844 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700845 struct drm_device *dev = obj->dev;
846 struct msm_drm_private *priv = dev->dev_private;
847
848 if (llist_add(&msm_obj->freed, &priv->free_list))
849 queue_work(priv->wq, &priv->free_work);
850}
851
852static void free_object(struct msm_gem_object *msm_obj)
853{
854 struct drm_gem_object *obj = &msm_obj->base;
855 struct drm_device *dev = obj->dev;
Rob Clarkc8afe682013-06-26 12:44:06 -0400856
857 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
858
Rob Clark7198e6b2013-07-19 12:59:32 -0400859 /* object should not be on active list: */
860 WARN_ON(is_active(msm_obj));
861
Rob Clarkc8afe682013-06-26 12:44:06 -0400862 list_del(&msm_obj->mm_list);
863
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600864 mutex_lock(&msm_obj->lock);
865
Rob Clark4fe5f652016-06-01 11:38:28 -0400866 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400867
Rob Clark05b84912013-09-28 11:28:35 -0400868 if (obj->import_attach) {
869 if (msm_obj->vaddr)
870 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400871
Rob Clark05b84912013-09-28 11:28:35 -0400872 /* Don't drop the pages for imported dmabuf, as they are not
873 * ours, just free the array we allocated:
874 */
875 if (msm_obj->pages)
Michal Hocko20981052017-05-17 14:23:12 +0200876 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -0400877
jilai wangf28730c2015-04-07 13:51:32 -0400878 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400879 } else {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600880 msm_gem_vunmap_locked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400881 put_pages(obj);
882 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400883
884 drm_gem_object_release(obj);
885
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600886 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400887 kfree(msm_obj);
888}
889
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700890void msm_gem_free_work(struct work_struct *work)
891{
892 struct msm_drm_private *priv =
893 container_of(work, struct msm_drm_private, free_work);
894 struct drm_device *dev = priv->dev;
895 struct llist_node *freed;
896 struct msm_gem_object *msm_obj, *next;
897
898 while ((freed = llist_del_all(&priv->free_list))) {
899
900 mutex_lock(&dev->struct_mutex);
901
902 llist_for_each_entry_safe(msm_obj, next,
903 freed, freed)
904 free_object(msm_obj);
905
906 mutex_unlock(&dev->struct_mutex);
907
908 if (need_resched())
909 break;
910 }
911}
912
Rob Clarkc8afe682013-06-26 12:44:06 -0400913/* convenience method to construct a GEM buffer object, and userspace handle */
914int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Jordan Crouse0815d772018-11-07 15:35:52 -0700915 uint32_t size, uint32_t flags, uint32_t *handle,
916 char *name)
Rob Clarkc8afe682013-06-26 12:44:06 -0400917{
918 struct drm_gem_object *obj;
919 int ret;
920
Rob Clarkc8afe682013-06-26 12:44:06 -0400921 obj = msm_gem_new(dev, size, flags);
922
Rob Clarkc8afe682013-06-26 12:44:06 -0400923 if (IS_ERR(obj))
924 return PTR_ERR(obj);
925
Jordan Crouse0815d772018-11-07 15:35:52 -0700926 if (name)
927 msm_gem_object_set_name(obj, "%s", name);
928
Rob Clarkc8afe682013-06-26 12:44:06 -0400929 ret = drm_gem_handle_create(file, obj, handle);
930
931 /* drop reference from allocate - handle holds it now */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100932 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400933
934 return ret;
935}
936
Rob Clark05b84912013-09-28 11:28:35 -0400937static int msm_gem_new_impl(struct drm_device *dev,
938 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400939 struct reservation_object *resv,
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600940 struct drm_gem_object **obj,
941 bool struct_mutex_locked)
Rob Clarkc8afe682013-06-26 12:44:06 -0400942{
943 struct msm_drm_private *priv = dev->dev_private;
944 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -0400945
946 switch (flags & MSM_BO_CACHE_MASK) {
947 case MSM_BO_UNCACHED:
948 case MSM_BO_CACHED:
949 case MSM_BO_WC:
950 break;
951 default:
Mamta Shukla6a41da12018-10-20 23:19:26 +0530952 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
Rob Clarkc8afe682013-06-26 12:44:06 -0400953 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400954 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400955 }
956
Rob Clark667ce332016-09-28 19:58:32 -0400957 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400958 if (!msm_obj)
959 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400960
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600961 mutex_init(&msm_obj->lock);
962
Rob Clarkc8afe682013-06-26 12:44:06 -0400963 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400964 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400965
Rob Herringdd55cf62019-02-02 09:41:56 -0600966 if (resv)
967 msm_obj->base.resv = resv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400968
Rob Clark7198e6b2013-07-19 12:59:32 -0400969 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400970 INIT_LIST_HEAD(&msm_obj->vmas);
971
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600972 if (struct_mutex_locked) {
973 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
974 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
975 } else {
976 mutex_lock(&dev->struct_mutex);
977 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
978 mutex_unlock(&dev->struct_mutex);
979 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400980
Rob Clark05b84912013-09-28 11:28:35 -0400981 *obj = &msm_obj->base;
982
983 return 0;
984}
985
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600986static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
987 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -0400988{
Rob Clarkf4839bd2017-06-13 11:50:05 -0400989 struct msm_drm_private *priv = dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500990 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -0400991 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -0400992 int ret;
993
Rob Clark05b84912013-09-28 11:28:35 -0400994 size = PAGE_ALIGN(size);
995
Jonathan Marekc2052a42018-11-14 17:08:04 -0500996 if (!msm_use_mmu(dev))
Rob Clarkf4839bd2017-06-13 11:50:05 -0400997 use_vram = true;
Jonathan Marek86f46f22018-11-21 20:52:30 -0500998 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
Rob Clarkf4839bd2017-06-13 11:50:05 -0400999 use_vram = true;
1000
1001 if (WARN_ON(use_vram && !priv->vram.size))
1002 return ERR_PTR(-EINVAL);
1003
Jordan Crouse1a5dff52017-03-07 10:02:51 -07001004 /* Disallow zero sized objects as they make the underlying
1005 * infrastructure grumpy
1006 */
1007 if (size == 0)
1008 return ERR_PTR(-EINVAL);
1009
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001010 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
Rob Clark05b84912013-09-28 11:28:35 -04001011 if (ret)
1012 goto fail;
1013
Rob Clarkf4839bd2017-06-13 11:50:05 -04001014 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -04001015 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001016 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +02001017 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1018
1019 mutex_lock(&msm_obj->lock);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001020
Rob Clark4b85f7f2017-06-13 13:54:13 -04001021 vma = add_vma(obj, NULL);
Hans Verkuilb3949a92017-07-30 14:42:36 +02001022 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001023 if (IS_ERR(vma)) {
1024 ret = PTR_ERR(vma);
1025 goto fail;
1026 }
1027
1028 to_msm_bo(obj)->vram_node = &vma->node;
1029
Rob Clarkf4839bd2017-06-13 11:50:05 -04001030 drm_gem_private_object_init(dev, obj, size);
1031
Rob Clarkf4839bd2017-06-13 11:50:05 -04001032 pages = get_pages(obj);
1033 if (IS_ERR(pages)) {
1034 ret = PTR_ERR(pages);
1035 goto fail;
1036 }
Rob Clark4b85f7f2017-06-13 13:54:13 -04001037
1038 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001039 } else {
Rob Clark871d8122013-11-16 12:56:06 -05001040 ret = drm_gem_object_init(dev, obj, size);
1041 if (ret)
1042 goto fail;
Lucas Stach0abdba42019-02-28 07:23:29 +01001043 /*
1044 * Our buffers are kept pinned, so allocating them from the
1045 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1046 * See comments above new_inode() why this is required _and_
1047 * expected if you're going to pin these pages.
1048 */
1049 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
Rob Clark871d8122013-11-16 12:56:06 -05001050 }
Rob Clark05b84912013-09-28 11:28:35 -04001051
1052 return obj;
1053
1054fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001055 drm_gem_object_put_unlocked(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001056 return ERR_PTR(ret);
1057}
1058
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001059struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1060 uint32_t size, uint32_t flags)
1061{
1062 return _msm_gem_new(dev, size, flags, true);
1063}
1064
1065struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1066 uint32_t size, uint32_t flags)
1067{
1068 return _msm_gem_new(dev, size, flags, false);
1069}
1070
Rob Clark05b84912013-09-28 11:28:35 -04001071struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -04001072 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -04001073{
1074 struct msm_gem_object *msm_obj;
1075 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -04001076 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -04001077 int ret, npages;
1078
Rob Clark871d8122013-11-16 12:56:06 -05001079 /* if we don't have IOMMU, don't bother pretending we can import: */
Jonathan Marekc2052a42018-11-14 17:08:04 -05001080 if (!msm_use_mmu(dev)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301081 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
Rob Clark871d8122013-11-16 12:56:06 -05001082 return ERR_PTR(-EINVAL);
1083 }
1084
Rob Clark79f0e202016-03-16 12:40:35 -04001085 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001086
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001087 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
Rob Clark05b84912013-09-28 11:28:35 -04001088 if (ret)
1089 goto fail;
1090
1091 drm_gem_private_object_init(dev, obj, size);
1092
1093 npages = size / PAGE_SIZE;
1094
1095 msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001096 mutex_lock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001097 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001098 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001099 if (!msm_obj->pages) {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001100 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001101 ret = -ENOMEM;
1102 goto fail;
1103 }
1104
1105 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001106 if (ret) {
1107 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001108 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001109 }
Rob Clark05b84912013-09-28 11:28:35 -04001110
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001111 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001112 return obj;
1113
1114fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001115 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001116 return ERR_PTR(ret);
1117}
Jordan Crouse82232862017-07-27 10:42:40 -06001118
1119static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1120 uint32_t flags, struct msm_gem_address_space *aspace,
1121 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1122{
1123 void *vaddr;
1124 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1125 int ret;
1126
1127 if (IS_ERR(obj))
1128 return ERR_CAST(obj);
1129
1130 if (iova) {
Jordan Crouse9fe041f2018-11-07 15:35:50 -07001131 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001132 if (ret)
1133 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001134 }
1135
1136 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001137 if (IS_ERR(vaddr)) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001138 msm_gem_unpin_iova(obj, aspace);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001139 ret = PTR_ERR(vaddr);
1140 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001141 }
1142
1143 if (bo)
1144 *bo = obj;
1145
1146 return vaddr;
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001147err:
1148 if (locked)
1149 drm_gem_object_put(obj);
1150 else
1151 drm_gem_object_put_unlocked(obj);
1152
1153 return ERR_PTR(ret);
1154
Jordan Crouse82232862017-07-27 10:42:40 -06001155}
1156
1157void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1158 uint32_t flags, struct msm_gem_address_space *aspace,
1159 struct drm_gem_object **bo, uint64_t *iova)
1160{
1161 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1162}
1163
1164void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1165 uint32_t flags, struct msm_gem_address_space *aspace,
1166 struct drm_gem_object **bo, uint64_t *iova)
1167{
1168 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1169}
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001170
1171void msm_gem_kernel_put(struct drm_gem_object *bo,
1172 struct msm_gem_address_space *aspace, bool locked)
1173{
1174 if (IS_ERR_OR_NULL(bo))
1175 return;
1176
1177 msm_gem_put_vaddr(bo);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001178 msm_gem_unpin_iova(bo, aspace);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001179
1180 if (locked)
1181 drm_gem_object_put(bo);
1182 else
1183 drm_gem_object_put_unlocked(bo);
1184}
Jordan Crouse0815d772018-11-07 15:35:52 -07001185
1186void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1187{
1188 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1189 va_list ap;
1190
1191 if (!fmt)
1192 return;
1193
1194 va_start(ap, fmt);
1195 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1196 va_end(ap);
1197}