blob: d4dae0f0238eead1002bca16515c93d6da3d6511 [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060029static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
Rob Clark871d8122013-11-16 12:56:06 -050032static dma_addr_t physaddr(struct drm_gem_object *obj)
33{
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 priv->vram.paddr;
38}
39
Rob Clark072f1f92015-03-03 15:04:25 -050040static bool use_pages(struct drm_gem_object *obj)
41{
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
44}
45
Rob Clark871d8122013-11-16 12:56:06 -050046/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060047static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050048{
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
51 dma_addr_t paddr;
52 struct page **p;
53 int ret, i;
54
Michal Hocko20981052017-05-17 14:23:12 +020055 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050056 if (!p)
57 return ERR_PTR(-ENOMEM);
58
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060059 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000060 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060061 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050062 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020063 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050064 return ERR_PTR(ret);
65 }
66
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
70 paddr += PAGE_SIZE;
71 }
72
73 return p;
74}
Rob Clarkc8afe682013-06-26 12:44:06 -040075
Rob Clarkc8afe682013-06-26 12:44:06 -040076static struct page **get_pages(struct drm_gem_object *obj)
77{
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050082 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040083 int npages = obj->size >> PAGE_SHIFT;
84
Rob Clark072f1f92015-03-03 15:04:25 -050085 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020086 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050087 else
88 p = get_pages_vram(obj, npages);
89
Rob Clarkc8afe682013-06-26 12:44:06 -040090 if (IS_ERR(p)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +053091 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
Rob Clarkc8afe682013-06-26 12:44:06 -040092 PTR_ERR(p));
93 return p;
94 }
95
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +053096 msm_obj->pages = p;
97
Rob Clarkc8afe682013-06-26 12:44:06 -040098 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080099 if (IS_ERR(msm_obj->sgt)) {
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530100 void *ptr = ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400101
Mamta Shukla6a41da12018-10-20 23:19:26 +0530102 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530103 msm_obj->sgt = NULL;
104 return ptr;
105 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400106
107 /* For non-cached buffers, ensure the new pages are clean
108 * because display controller, GPU, etc. are not coherent:
109 */
110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
113 }
114
115 return msm_obj->pages;
116}
117
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600118static void put_pages_vram(struct drm_gem_object *obj)
119{
120 struct msm_gem_object *msm_obj = to_msm_bo(obj);
121 struct msm_drm_private *priv = obj->dev->dev_private;
122
123 spin_lock(&priv->vram.lock);
124 drm_mm_remove_node(msm_obj->vram_node);
125 spin_unlock(&priv->vram.lock);
126
127 kvfree(msm_obj->pages);
128}
129
Rob Clarkc8afe682013-06-26 12:44:06 -0400130static void put_pages(struct drm_gem_object *obj)
131{
132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133
134 if (msm_obj->pages) {
Ben Hutchings39766262018-04-03 23:38:45 +0100135 if (msm_obj->sgt) {
136 /* For non-cached buffers, ensure the new
137 * pages are clean because display controller,
138 * GPU, etc. are not coherent:
139 */
140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 msm_obj->sgt->nents,
143 DMA_BIDIRECTIONAL);
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530144
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530145 sg_free_table(msm_obj->sgt);
Ben Hutchings39766262018-04-03 23:38:45 +0100146 kfree(msm_obj->sgt);
147 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400148
Rob Clark072f1f92015-03-03 15:04:25 -0500149 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600151 else
152 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500153
Rob Clarkc8afe682013-06-26 12:44:06 -0400154 msm_obj->pages = NULL;
155 }
156}
157
Rob Clark05b84912013-09-28 11:28:35 -0400158struct page **msm_gem_get_pages(struct drm_gem_object *obj)
159{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600160 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400161 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600162
163 mutex_lock(&msm_obj->lock);
164
165 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
166 mutex_unlock(&msm_obj->lock);
167 return ERR_PTR(-EBUSY);
168 }
169
Rob Clark05b84912013-09-28 11:28:35 -0400170 p = get_pages(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600171 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -0400172 return p;
173}
174
175void msm_gem_put_pages(struct drm_gem_object *obj)
176{
177 /* when we start tracking the pin count, then do something here */
178}
179
Rob Clarkc8afe682013-06-26 12:44:06 -0400180int msm_gem_mmap_obj(struct drm_gem_object *obj,
181 struct vm_area_struct *vma)
182{
183 struct msm_gem_object *msm_obj = to_msm_bo(obj);
184
185 vma->vm_flags &= ~VM_PFNMAP;
186 vma->vm_flags |= VM_MIXEDMAP;
187
188 if (msm_obj->flags & MSM_BO_WC) {
189 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
190 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
191 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
192 } else {
193 /*
194 * Shunt off cached objs to shmem file so they have their own
195 * address_space (so unmap_mapping_range does what we want,
196 * in particular in the case of mmap'd dmabufs)
197 */
198 fput(vma->vm_file);
199 get_file(obj->filp);
200 vma->vm_pgoff = 0;
201 vma->vm_file = obj->filp;
202
203 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
204 }
205
206 return 0;
207}
208
209int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
210{
211 int ret;
212
213 ret = drm_gem_mmap(filp, vma);
214 if (ret) {
215 DBG("mmap failed: %d", ret);
216 return ret;
217 }
218
219 return msm_gem_mmap_obj(vma->vm_private_data, vma);
220}
221
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530222vm_fault_t msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400223{
Dave Jiang11bac802017-02-24 14:56:41 -0800224 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400225 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600226 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400227 struct page **pages;
228 unsigned long pfn;
229 pgoff_t pgoff;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530230 int err;
231 vm_fault_t ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400232
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600233 /*
234 * vm_ops.open/drm_gem_mmap_obj and close get and put
235 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400236 */
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530237 err = mutex_lock_interruptible(&msm_obj->lock);
238 if (err) {
239 ret = VM_FAULT_NOPAGE;
Rob Clarkc8afe682013-06-26 12:44:06 -0400240 goto out;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530241 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400242
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600243 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
244 mutex_unlock(&msm_obj->lock);
245 return VM_FAULT_SIGBUS;
246 }
247
Rob Clarkc8afe682013-06-26 12:44:06 -0400248 /* make sure we have pages attached now */
249 pages = get_pages(obj);
250 if (IS_ERR(pages)) {
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530251 ret = vmf_error(PTR_ERR(pages));
Rob Clarkc8afe682013-06-26 12:44:06 -0400252 goto out_unlock;
253 }
254
255 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800256 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400257
Rob Clark871d8122013-11-16 12:56:06 -0500258 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400259
Jan Kara1a29d852016-12-14 15:07:01 -0800260 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400261 pfn, pfn << PAGE_SHIFT);
262
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530263 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400264out_unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600265 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400266out:
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530267 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400268}
269
270/** get mmap offset */
271static uint64_t mmap_offset(struct drm_gem_object *obj)
272{
273 struct drm_device *dev = obj->dev;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600274 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400275 int ret;
276
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600277 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkc8afe682013-06-26 12:44:06 -0400278
279 /* Make it mmapable */
280 ret = drm_gem_create_mmap_offset(obj);
281
282 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530283 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400284 return 0;
285 }
286
287 return drm_vma_node_offset_addr(&obj->vma_node);
288}
289
290uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
291{
292 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600293 struct msm_gem_object *msm_obj = to_msm_bo(obj);
294
295 mutex_lock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400296 offset = mmap_offset(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600297 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400298 return offset;
299}
300
Rob Clark4b85f7f2017-06-13 13:54:13 -0400301static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
302 struct msm_gem_address_space *aspace)
303{
304 struct msm_gem_object *msm_obj = to_msm_bo(obj);
305 struct msm_gem_vma *vma;
306
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600307 WARN_ON(!mutex_is_locked(&msm_obj->lock));
308
Rob Clark4b85f7f2017-06-13 13:54:13 -0400309 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
310 if (!vma)
311 return ERR_PTR(-ENOMEM);
312
313 vma->aspace = aspace;
314
315 list_add_tail(&vma->list, &msm_obj->vmas);
316
317 return vma;
318}
319
320static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
321 struct msm_gem_address_space *aspace)
322{
323 struct msm_gem_object *msm_obj = to_msm_bo(obj);
324 struct msm_gem_vma *vma;
325
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600326 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400327
328 list_for_each_entry(vma, &msm_obj->vmas, list) {
329 if (vma->aspace == aspace)
330 return vma;
331 }
332
333 return NULL;
334}
335
336static void del_vma(struct msm_gem_vma *vma)
337{
338 if (!vma)
339 return;
340
341 list_del(&vma->list);
342 kfree(vma);
343}
344
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600345/* Called with msm_obj->lock locked */
Rob Clark4fe5f652016-06-01 11:38:28 -0400346static void
347put_iova(struct drm_gem_object *obj)
348{
Rob Clark4fe5f652016-06-01 11:38:28 -0400349 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400350 struct msm_gem_vma *vma, *tmp;
Rob Clark4fe5f652016-06-01 11:38:28 -0400351
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600352 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4fe5f652016-06-01 11:38:28 -0400353
Rob Clark4b85f7f2017-06-13 13:54:13 -0400354 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700355 msm_gem_purge_vma(vma->aspace, vma);
356 msm_gem_close_vma(vma->aspace, vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400357 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400358 }
359}
360
Jordan Crousec0ee9792018-11-07 15:35:48 -0700361static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400362 struct msm_gem_address_space *aspace, uint64_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400363{
364 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400365 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400366 int ret = 0;
367
Jordan Crousec0ee9792018-11-07 15:35:48 -0700368 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkcb1e3812017-06-13 09:15:36 -0400369
Rob Clark4b85f7f2017-06-13 13:54:13 -0400370 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500371
Rob Clark4b85f7f2017-06-13 13:54:13 -0400372 if (!vma) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400373 vma = add_vma(obj, aspace);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700374 if (IS_ERR(vma))
375 return PTR_ERR(vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400376
Jordan Crousec0ee9792018-11-07 15:35:48 -0700377 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
378 if (ret) {
379 del_vma(vma);
380 return ret;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400381 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400382 }
383
Rob Clark4b85f7f2017-06-13 13:54:13 -0400384 *iova = vma->iova;
385 return 0;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700386}
Rob Clark4b85f7f2017-06-13 13:54:13 -0400387
Jordan Crousec0ee9792018-11-07 15:35:48 -0700388static int msm_gem_pin_iova(struct drm_gem_object *obj,
389 struct msm_gem_address_space *aspace)
390{
391 struct msm_gem_object *msm_obj = to_msm_bo(obj);
392 struct msm_gem_vma *vma;
393 struct page **pages;
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500394 int prot = IOMMU_READ;
395
396 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
397 prot |= IOMMU_WRITE;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700398
399 WARN_ON(!mutex_is_locked(&msm_obj->lock));
400
401 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
402 return -EBUSY;
403
404 vma = lookup_vma(obj, aspace);
405 if (WARN_ON(!vma))
406 return -EINVAL;
407
408 pages = get_pages(obj);
409 if (IS_ERR(pages))
410 return PTR_ERR(pages);
411
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500412 return msm_gem_map_vma(aspace, vma, prot,
413 msm_obj->sgt, obj->size >> PAGE_SHIFT);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700414}
415
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700416/* get iova and pin it. Should have a matching put */
417int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
Jordan Crousec0ee9792018-11-07 15:35:48 -0700418 struct msm_gem_address_space *aspace, uint64_t *iova)
419{
420 struct msm_gem_object *msm_obj = to_msm_bo(obj);
421 u64 local;
422 int ret;
423
424 mutex_lock(&msm_obj->lock);
425
426 ret = msm_gem_get_iova_locked(obj, aspace, &local);
427
428 if (!ret)
429 ret = msm_gem_pin_iova(obj, aspace);
430
431 if (!ret)
432 *iova = local;
433
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600434 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400435 return ret;
436}
437
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700438/*
439 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
440 * valid for the life of the object
441 */
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700442int msm_gem_get_iova(struct drm_gem_object *obj,
443 struct msm_gem_address_space *aspace, uint64_t *iova)
444{
445 struct msm_gem_object *msm_obj = to_msm_bo(obj);
446 int ret;
447
448 mutex_lock(&msm_obj->lock);
449 ret = msm_gem_get_iova_locked(obj, aspace, iova);
450 mutex_unlock(&msm_obj->lock);
451
452 return ret;
453}
454
Rob Clark2638d902014-11-08 09:13:37 -0500455/* get iova without taking a reference, used in places where you have
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700456 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
Rob Clark2638d902014-11-08 09:13:37 -0500457 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400458uint64_t msm_gem_iova(struct drm_gem_object *obj,
459 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500460{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600461 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400462 struct msm_gem_vma *vma;
463
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600464 mutex_lock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400465 vma = lookup_vma(obj, aspace);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600466 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400467 WARN_ON(!vma);
468
469 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500470}
471
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700472/*
473 * Unpin a iova by updating the reference counts. The memory isn't actually
474 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
475 * to get rid of it
476 */
477void msm_gem_unpin_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400478 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400479{
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700480 struct msm_gem_object *msm_obj = to_msm_bo(obj);
481 struct msm_gem_vma *vma;
482
483 mutex_lock(&msm_obj->lock);
484 vma = lookup_vma(obj, aspace);
485
486 if (!WARN_ON(!vma))
487 msm_gem_unmap_vma(aspace, vma);
488
489 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400490}
491
492int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
493 struct drm_mode_create_dumb *args)
494{
495 args->pitch = align_pitch(args->width, args->bpp);
496 args->size = PAGE_ALIGN(args->pitch * args->height);
497 return msm_gem_new_handle(dev, file, args->size,
Jordan Crouse0815d772018-11-07 15:35:52 -0700498 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
Rob Clarkc8afe682013-06-26 12:44:06 -0400499}
500
Rob Clarkc8afe682013-06-26 12:44:06 -0400501int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
502 uint32_t handle, uint64_t *offset)
503{
504 struct drm_gem_object *obj;
505 int ret = 0;
506
507 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100508 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400509 if (obj == NULL) {
510 ret = -ENOENT;
511 goto fail;
512 }
513
514 *offset = msm_gem_mmap_offset(obj);
515
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100516 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400517
518fail:
519 return ret;
520}
521
Rob Clarkfad33f42017-09-15 08:38:20 -0400522static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400523{
Rob Clarke1e9db22016-05-27 11:16:28 -0400524 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600525 int ret = 0;
526
527 mutex_lock(&msm_obj->lock);
528
Rob Clarkfad33f42017-09-15 08:38:20 -0400529 if (WARN_ON(msm_obj->madv > madv)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530530 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
Rob Clarkfad33f42017-09-15 08:38:20 -0400531 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600532 mutex_unlock(&msm_obj->lock);
533 return ERR_PTR(-EBUSY);
534 }
535
536 /* increment vmap_count *before* vmap() call, so shrinker can
537 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
538 * This guarantees that we won't try to msm_gem_vunmap() this
539 * same object from within the vmap() call (while we already
540 * hold msm_obj->lock)
541 */
542 msm_obj->vmap_count++;
543
544 if (!msm_obj->vaddr) {
545 struct page **pages = get_pages(obj);
546 if (IS_ERR(pages)) {
547 ret = PTR_ERR(pages);
548 goto fail;
549 }
550 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
551 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
552 if (msm_obj->vaddr == NULL) {
553 ret = -ENOMEM;
554 goto fail;
555 }
556 }
557
558 mutex_unlock(&msm_obj->lock);
559 return msm_obj->vaddr;
560
561fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400562 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600563 mutex_unlock(&msm_obj->lock);
564 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400565}
566
Rob Clarkfad33f42017-09-15 08:38:20 -0400567void *msm_gem_get_vaddr(struct drm_gem_object *obj)
568{
569 return get_vaddr(obj, MSM_MADV_WILLNEED);
570}
571
572/*
573 * Don't use this! It is for the very special case of dumping
574 * submits from GPU hangs or faults, were the bo may already
575 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
576 * active list.
577 */
578void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
579{
580 return get_vaddr(obj, __MSM_MADV_PURGED);
581}
582
Rob Clark18f23042016-05-26 16:24:35 -0400583void msm_gem_put_vaddr(struct drm_gem_object *obj)
584{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600585 struct msm_gem_object *msm_obj = to_msm_bo(obj);
586
587 mutex_lock(&msm_obj->lock);
588 WARN_ON(msm_obj->vmap_count < 1);
589 msm_obj->vmap_count--;
590 mutex_unlock(&msm_obj->lock);
Rob Clark18f23042016-05-26 16:24:35 -0400591}
592
Rob Clark4cd33c42016-05-17 15:44:49 -0400593/* Update madvise status, returns true if not purged, else
594 * false or -errno.
595 */
596int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
597{
598 struct msm_gem_object *msm_obj = to_msm_bo(obj);
599
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600600 mutex_lock(&msm_obj->lock);
601
Rob Clark4cd33c42016-05-17 15:44:49 -0400602 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
603
604 if (msm_obj->madv != __MSM_MADV_PURGED)
605 msm_obj->madv = madv;
606
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600607 madv = msm_obj->madv;
608
609 mutex_unlock(&msm_obj->lock);
610
611 return (madv != __MSM_MADV_PURGED);
Rob Clark4cd33c42016-05-17 15:44:49 -0400612}
613
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600614void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
Rob Clark68209392016-05-17 16:19:32 -0400615{
616 struct drm_device *dev = obj->dev;
617 struct msm_gem_object *msm_obj = to_msm_bo(obj);
618
619 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
620 WARN_ON(!is_purgeable(msm_obj));
621 WARN_ON(obj->import_attach);
622
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600623 mutex_lock_nested(&msm_obj->lock, subclass);
624
Rob Clark68209392016-05-17 16:19:32 -0400625 put_iova(obj);
626
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600627 msm_gem_vunmap_locked(obj);
Rob Clark68209392016-05-17 16:19:32 -0400628
629 put_pages(obj);
630
631 msm_obj->madv = __MSM_MADV_PURGED;
632
633 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
634 drm_gem_free_mmap_offset(obj);
635
636 /* Our goal here is to return as much of the memory as
637 * is possible back to the system as we are called from OOM.
638 * To do this we must instruct the shmfs to drop all of its
639 * backing pages, *now*.
640 */
641 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
642
643 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
644 0, (loff_t)-1);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600645
646 mutex_unlock(&msm_obj->lock);
Rob Clark68209392016-05-17 16:19:32 -0400647}
648
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600649static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400650{
651 struct msm_gem_object *msm_obj = to_msm_bo(obj);
652
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600653 WARN_ON(!mutex_is_locked(&msm_obj->lock));
654
Rob Clarke1e9db22016-05-27 11:16:28 -0400655 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
656 return;
657
658 vunmap(msm_obj->vaddr);
659 msm_obj->vaddr = NULL;
660}
661
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600662void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
663{
664 struct msm_gem_object *msm_obj = to_msm_bo(obj);
665
666 mutex_lock_nested(&msm_obj->lock, subclass);
667 msm_gem_vunmap_locked(obj);
668 mutex_unlock(&msm_obj->lock);
669}
670
Rob Clarkb6295f92016-03-15 18:26:28 -0400671/* must be called before _move_to_active().. */
672int msm_gem_sync_object(struct drm_gem_object *obj,
673 struct msm_fence_context *fctx, bool exclusive)
674{
675 struct msm_gem_object *msm_obj = to_msm_bo(obj);
676 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100677 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400678 int i, ret;
679
Rob Clarkb6295f92016-03-15 18:26:28 -0400680 fobj = reservation_object_get_list(msm_obj->resv);
681 if (!fobj || (fobj->shared_count == 0)) {
682 fence = reservation_object_get_excl(msm_obj->resv);
683 /* don't need to wait on our own fences, since ring is fifo */
684 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100685 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400686 if (ret)
687 return ret;
688 }
689 }
690
691 if (!exclusive || !fobj)
692 return 0;
693
694 for (i = 0; i < fobj->shared_count; i++) {
695 fence = rcu_dereference_protected(fobj->shared[i],
696 reservation_object_held(msm_obj->resv));
697 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100698 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400699 if (ret)
700 return ret;
701 }
702 }
703
704 return 0;
705}
706
Rob Clark7198e6b2013-07-19 12:59:32 -0400707void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100708 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400709{
710 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400711 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400712 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400713 if (exclusive)
714 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400715 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400716 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400717 list_del_init(&msm_obj->mm_list);
718 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
719}
720
721void msm_gem_move_to_inactive(struct drm_gem_object *obj)
722{
723 struct drm_device *dev = obj->dev;
724 struct msm_drm_private *priv = dev->dev_private;
725 struct msm_gem_object *msm_obj = to_msm_bo(obj);
726
727 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
728
729 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400730 list_del_init(&msm_obj->mm_list);
731 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400732}
733
Rob Clarkba00c3f2016-03-16 18:18:17 -0400734int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
735{
Rob Clarkb6295f92016-03-15 18:26:28 -0400736 struct msm_gem_object *msm_obj = to_msm_bo(obj);
737 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100738 unsigned long remain =
739 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
740 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400741
Chris Wilsonf755e222016-08-29 08:08:26 +0100742 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
743 true, remain);
744 if (ret == 0)
745 return remain == 0 ? -EBUSY : -ETIMEDOUT;
746 else if (ret < 0)
747 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400748
Rob Clark7198e6b2013-07-19 12:59:32 -0400749 /* TODO cache maintenance */
750
Rob Clarkb6295f92016-03-15 18:26:28 -0400751 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400752}
753
754int msm_gem_cpu_fini(struct drm_gem_object *obj)
755{
756 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400757 return 0;
758}
759
760#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100761static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400762 struct seq_file *m)
763{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100764 if (!dma_fence_is_signaled(fence))
Dave Airliea3115622019-01-10 06:20:15 +1000765 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400766 fence->ops->get_driver_name(fence),
767 fence->ops->get_timeline_name(fence),
768 fence->seqno);
769}
770
Rob Clarkc8afe682013-06-26 12:44:06 -0400771void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
772{
Rob Clarkc8afe682013-06-26 12:44:06 -0400773 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400774 struct reservation_object *robj = msm_obj->resv;
775 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100776 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400777 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400778 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400779 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400780
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600781 mutex_lock(&msm_obj->lock);
Rob Clarkb6295f92016-03-15 18:26:28 -0400782
Rob Clark4cd33c42016-05-17 15:44:49 -0400783 switch (msm_obj->madv) {
784 case __MSM_MADV_PURGED:
785 madv = " purged";
786 break;
787 case MSM_MADV_DONTNEED:
788 madv = " purgeable";
789 break;
790 case MSM_MADV_WILLNEED:
791 default:
792 madv = "";
793 break;
794 }
795
Jordan Crouse575f0482018-11-07 15:35:49 -0700796 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
Rob Clark7198e6b2013-07-19 12:59:32 -0400797 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100798 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400799 off, msm_obj->vaddr);
800
Jordan Crouse0815d772018-11-07 15:35:52 -0700801 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
Rob Clark667ce332016-09-28 19:58:32 -0400802
Jordan Crouse575f0482018-11-07 15:35:49 -0700803 if (!list_empty(&msm_obj->vmas)) {
804
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700805 seq_puts(m, " vmas:");
Jordan Crouse575f0482018-11-07 15:35:49 -0700806
807 list_for_each_entry(vma, &msm_obj->vmas, list)
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700808 seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
809 vma->iova, vma->mapped ? "mapped" : "unmapped",
810 vma->inuse);
Jordan Crouse575f0482018-11-07 15:35:49 -0700811
812 seq_puts(m, "\n");
813 }
Rob Clarkb6295f92016-03-15 18:26:28 -0400814
815 rcu_read_lock();
816 fobj = rcu_dereference(robj->fence);
817 if (fobj) {
818 unsigned int i, shared_count = fobj->shared_count;
819
820 for (i = 0; i < shared_count; i++) {
821 fence = rcu_dereference(fobj->shared[i]);
822 describe_fence(fence, "Shared", m);
823 }
824 }
825
826 fence = rcu_dereference(robj->fence_excl);
827 if (fence)
828 describe_fence(fence, "Exclusive", m);
829 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600830
831 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400832}
833
834void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
835{
836 struct msm_gem_object *msm_obj;
837 int count = 0;
838 size_t size = 0;
839
Jordan Crouse0815d772018-11-07 15:35:52 -0700840 seq_puts(m, " flags id ref offset kaddr size madv name\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400841 list_for_each_entry(msm_obj, list, mm_list) {
842 struct drm_gem_object *obj = &msm_obj->base;
Jordan Crouse575f0482018-11-07 15:35:49 -0700843 seq_puts(m, " ");
Rob Clarkc8afe682013-06-26 12:44:06 -0400844 msm_gem_describe(obj, m);
845 count++;
846 size += obj->size;
847 }
848
849 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
850}
851#endif
852
Rob Clarkd71b6bd2018-02-14 11:14:23 -0500853/* don't call directly! Use drm_gem_object_put() and friends */
Rob Clarkc8afe682013-06-26 12:44:06 -0400854void msm_gem_free_object(struct drm_gem_object *obj)
855{
Rob Clarkc8afe682013-06-26 12:44:06 -0400856 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700857 struct drm_device *dev = obj->dev;
858 struct msm_drm_private *priv = dev->dev_private;
859
860 if (llist_add(&msm_obj->freed, &priv->free_list))
861 queue_work(priv->wq, &priv->free_work);
862}
863
864static void free_object(struct msm_gem_object *msm_obj)
865{
866 struct drm_gem_object *obj = &msm_obj->base;
867 struct drm_device *dev = obj->dev;
Rob Clarkc8afe682013-06-26 12:44:06 -0400868
869 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
870
Rob Clark7198e6b2013-07-19 12:59:32 -0400871 /* object should not be on active list: */
872 WARN_ON(is_active(msm_obj));
873
Rob Clarkc8afe682013-06-26 12:44:06 -0400874 list_del(&msm_obj->mm_list);
875
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600876 mutex_lock(&msm_obj->lock);
877
Rob Clark4fe5f652016-06-01 11:38:28 -0400878 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400879
Rob Clark05b84912013-09-28 11:28:35 -0400880 if (obj->import_attach) {
881 if (msm_obj->vaddr)
882 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400883
Rob Clark05b84912013-09-28 11:28:35 -0400884 /* Don't drop the pages for imported dmabuf, as they are not
885 * ours, just free the array we allocated:
886 */
887 if (msm_obj->pages)
Michal Hocko20981052017-05-17 14:23:12 +0200888 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -0400889
jilai wangf28730c2015-04-07 13:51:32 -0400890 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400891 } else {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600892 msm_gem_vunmap_locked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400893 put_pages(obj);
894 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400895
Rob Clark7198e6b2013-07-19 12:59:32 -0400896 if (msm_obj->resv == &msm_obj->_resv)
897 reservation_object_fini(msm_obj->resv);
898
Rob Clarkc8afe682013-06-26 12:44:06 -0400899 drm_gem_object_release(obj);
900
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600901 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400902 kfree(msm_obj);
903}
904
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700905void msm_gem_free_work(struct work_struct *work)
906{
907 struct msm_drm_private *priv =
908 container_of(work, struct msm_drm_private, free_work);
909 struct drm_device *dev = priv->dev;
910 struct llist_node *freed;
911 struct msm_gem_object *msm_obj, *next;
912
913 while ((freed = llist_del_all(&priv->free_list))) {
914
915 mutex_lock(&dev->struct_mutex);
916
917 llist_for_each_entry_safe(msm_obj, next,
918 freed, freed)
919 free_object(msm_obj);
920
921 mutex_unlock(&dev->struct_mutex);
922
923 if (need_resched())
924 break;
925 }
926}
927
Rob Clarkc8afe682013-06-26 12:44:06 -0400928/* convenience method to construct a GEM buffer object, and userspace handle */
929int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Jordan Crouse0815d772018-11-07 15:35:52 -0700930 uint32_t size, uint32_t flags, uint32_t *handle,
931 char *name)
Rob Clarkc8afe682013-06-26 12:44:06 -0400932{
933 struct drm_gem_object *obj;
934 int ret;
935
Rob Clarkc8afe682013-06-26 12:44:06 -0400936 obj = msm_gem_new(dev, size, flags);
937
Rob Clarkc8afe682013-06-26 12:44:06 -0400938 if (IS_ERR(obj))
939 return PTR_ERR(obj);
940
Jordan Crouse0815d772018-11-07 15:35:52 -0700941 if (name)
942 msm_gem_object_set_name(obj, "%s", name);
943
Rob Clarkc8afe682013-06-26 12:44:06 -0400944 ret = drm_gem_handle_create(file, obj, handle);
945
946 /* drop reference from allocate - handle holds it now */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100947 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400948
949 return ret;
950}
951
Rob Clark05b84912013-09-28 11:28:35 -0400952static int msm_gem_new_impl(struct drm_device *dev,
953 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400954 struct reservation_object *resv,
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600955 struct drm_gem_object **obj,
956 bool struct_mutex_locked)
Rob Clarkc8afe682013-06-26 12:44:06 -0400957{
958 struct msm_drm_private *priv = dev->dev_private;
959 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -0400960
961 switch (flags & MSM_BO_CACHE_MASK) {
962 case MSM_BO_UNCACHED:
963 case MSM_BO_CACHED:
964 case MSM_BO_WC:
965 break;
966 default:
Mamta Shukla6a41da12018-10-20 23:19:26 +0530967 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
Rob Clarkc8afe682013-06-26 12:44:06 -0400968 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400969 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400970 }
971
Rob Clark667ce332016-09-28 19:58:32 -0400972 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400973 if (!msm_obj)
974 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400975
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600976 mutex_init(&msm_obj->lock);
977
Rob Clarkc8afe682013-06-26 12:44:06 -0400978 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400979 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400980
Rob Clark79f0e202016-03-16 12:40:35 -0400981 if (resv) {
982 msm_obj->resv = resv;
983 } else {
984 msm_obj->resv = &msm_obj->_resv;
985 reservation_object_init(msm_obj->resv);
986 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400987
Rob Clark7198e6b2013-07-19 12:59:32 -0400988 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400989 INIT_LIST_HEAD(&msm_obj->vmas);
990
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600991 if (struct_mutex_locked) {
992 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
993 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
994 } else {
995 mutex_lock(&dev->struct_mutex);
996 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
997 mutex_unlock(&dev->struct_mutex);
998 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400999
Rob Clark05b84912013-09-28 11:28:35 -04001000 *obj = &msm_obj->base;
1001
1002 return 0;
1003}
1004
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001005static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1006 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -04001007{
Rob Clarkf4839bd2017-06-13 11:50:05 -04001008 struct msm_drm_private *priv = dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -05001009 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001010 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -04001011 int ret;
1012
Rob Clark05b84912013-09-28 11:28:35 -04001013 size = PAGE_ALIGN(size);
1014
Jonathan Marekc2052a42018-11-14 17:08:04 -05001015 if (!msm_use_mmu(dev))
Rob Clarkf4839bd2017-06-13 11:50:05 -04001016 use_vram = true;
Jonathan Marek86f46f22018-11-21 20:52:30 -05001017 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
Rob Clarkf4839bd2017-06-13 11:50:05 -04001018 use_vram = true;
1019
1020 if (WARN_ON(use_vram && !priv->vram.size))
1021 return ERR_PTR(-EINVAL);
1022
Jordan Crouse1a5dff52017-03-07 10:02:51 -07001023 /* Disallow zero sized objects as they make the underlying
1024 * infrastructure grumpy
1025 */
1026 if (size == 0)
1027 return ERR_PTR(-EINVAL);
1028
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001029 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
Rob Clark05b84912013-09-28 11:28:35 -04001030 if (ret)
1031 goto fail;
1032
Rob Clarkf4839bd2017-06-13 11:50:05 -04001033 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -04001034 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001035 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +02001036 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1037
1038 mutex_lock(&msm_obj->lock);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001039
Rob Clark4b85f7f2017-06-13 13:54:13 -04001040 vma = add_vma(obj, NULL);
Hans Verkuilb3949a92017-07-30 14:42:36 +02001041 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001042 if (IS_ERR(vma)) {
1043 ret = PTR_ERR(vma);
1044 goto fail;
1045 }
1046
1047 to_msm_bo(obj)->vram_node = &vma->node;
1048
Rob Clarkf4839bd2017-06-13 11:50:05 -04001049 drm_gem_private_object_init(dev, obj, size);
1050
Rob Clarkf4839bd2017-06-13 11:50:05 -04001051 pages = get_pages(obj);
1052 if (IS_ERR(pages)) {
1053 ret = PTR_ERR(pages);
1054 goto fail;
1055 }
Rob Clark4b85f7f2017-06-13 13:54:13 -04001056
1057 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001058 } else {
Rob Clark871d8122013-11-16 12:56:06 -05001059 ret = drm_gem_object_init(dev, obj, size);
1060 if (ret)
1061 goto fail;
Lucas Stach0abdba42019-02-28 07:23:29 +01001062 /*
1063 * Our buffers are kept pinned, so allocating them from the
1064 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1065 * See comments above new_inode() why this is required _and_
1066 * expected if you're going to pin these pages.
1067 */
1068 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
Rob Clark871d8122013-11-16 12:56:06 -05001069 }
Rob Clark05b84912013-09-28 11:28:35 -04001070
1071 return obj;
1072
1073fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001074 drm_gem_object_put_unlocked(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001075 return ERR_PTR(ret);
1076}
1077
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001078struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1079 uint32_t size, uint32_t flags)
1080{
1081 return _msm_gem_new(dev, size, flags, true);
1082}
1083
1084struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1085 uint32_t size, uint32_t flags)
1086{
1087 return _msm_gem_new(dev, size, flags, false);
1088}
1089
Rob Clark05b84912013-09-28 11:28:35 -04001090struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -04001091 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -04001092{
1093 struct msm_gem_object *msm_obj;
1094 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -04001095 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -04001096 int ret, npages;
1097
Rob Clark871d8122013-11-16 12:56:06 -05001098 /* if we don't have IOMMU, don't bother pretending we can import: */
Jonathan Marekc2052a42018-11-14 17:08:04 -05001099 if (!msm_use_mmu(dev)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301100 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
Rob Clark871d8122013-11-16 12:56:06 -05001101 return ERR_PTR(-EINVAL);
1102 }
1103
Rob Clark79f0e202016-03-16 12:40:35 -04001104 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001105
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001106 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
Rob Clark05b84912013-09-28 11:28:35 -04001107 if (ret)
1108 goto fail;
1109
1110 drm_gem_private_object_init(dev, obj, size);
1111
1112 npages = size / PAGE_SIZE;
1113
1114 msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001115 mutex_lock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001116 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001117 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001118 if (!msm_obj->pages) {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001119 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001120 ret = -ENOMEM;
1121 goto fail;
1122 }
1123
1124 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001125 if (ret) {
1126 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001127 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001128 }
Rob Clark05b84912013-09-28 11:28:35 -04001129
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001130 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001131 return obj;
1132
1133fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001134 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001135 return ERR_PTR(ret);
1136}
Jordan Crouse82232862017-07-27 10:42:40 -06001137
1138static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1139 uint32_t flags, struct msm_gem_address_space *aspace,
1140 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1141{
1142 void *vaddr;
1143 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1144 int ret;
1145
1146 if (IS_ERR(obj))
1147 return ERR_CAST(obj);
1148
1149 if (iova) {
Jordan Crouse9fe041f2018-11-07 15:35:50 -07001150 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001151 if (ret)
1152 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001153 }
1154
1155 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001156 if (IS_ERR(vaddr)) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001157 msm_gem_unpin_iova(obj, aspace);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001158 ret = PTR_ERR(vaddr);
1159 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001160 }
1161
1162 if (bo)
1163 *bo = obj;
1164
1165 return vaddr;
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001166err:
1167 if (locked)
1168 drm_gem_object_put(obj);
1169 else
1170 drm_gem_object_put_unlocked(obj);
1171
1172 return ERR_PTR(ret);
1173
Jordan Crouse82232862017-07-27 10:42:40 -06001174}
1175
1176void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1177 uint32_t flags, struct msm_gem_address_space *aspace,
1178 struct drm_gem_object **bo, uint64_t *iova)
1179{
1180 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1181}
1182
1183void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1184 uint32_t flags, struct msm_gem_address_space *aspace,
1185 struct drm_gem_object **bo, uint64_t *iova)
1186{
1187 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1188}
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001189
1190void msm_gem_kernel_put(struct drm_gem_object *bo,
1191 struct msm_gem_address_space *aspace, bool locked)
1192{
1193 if (IS_ERR_OR_NULL(bo))
1194 return;
1195
1196 msm_gem_put_vaddr(bo);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001197 msm_gem_unpin_iova(bo, aspace);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001198
1199 if (locked)
1200 drm_gem_object_put(bo);
1201 else
1202 drm_gem_object_put_unlocked(bo);
1203}
Jordan Crouse0815d772018-11-07 15:35:52 -07001204
1205void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1206{
1207 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1208 va_list ap;
1209
1210 if (!fmt)
1211 return;
1212
1213 va_start(ap, fmt);
1214 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1215 va_end(ap);
1216}