blob: 6277fde13df91c33380c9f708cdf9c285fe31fc1 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clarkc8afe682013-06-26 12:44:06 -04002/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Rob Clarkc8afe682013-06-26 12:44:06 -04005 */
6
7#include <linux/spinlock.h>
8#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -04009#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080010#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040011
Sam Ravnborgfeea39a2019-08-04 08:55:51 +020012#include <drm/drm_prime.h>
13
Rob Clarkc8afe682013-06-26 12:44:06 -040014#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040015#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040016#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040017#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050018#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040019
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060020static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
21
22
Rob Clark871d8122013-11-16 12:56:06 -050023static dma_addr_t physaddr(struct drm_gem_object *obj)
24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 priv->vram.paddr;
29}
30
Rob Clark072f1f92015-03-03 15:04:25 -050031static bool use_pages(struct drm_gem_object *obj)
32{
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
35}
36
Rob Clark3de433c2019-07-30 14:46:28 -070037/*
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
42 *
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
47 *
48 * Let this be a cautionary tail of abstraction gone wrong.
49 */
50
51static void sync_for_device(struct msm_gem_object *msm_obj)
52{
53 struct device *dev = msm_obj->base.dev->dev;
54
Rob Clark9f614192019-09-04 09:56:03 -070055 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
Rob Clark3de433c2019-07-30 14:46:28 -070056 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
57 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
58 } else {
59 dma_map_sg(dev, msm_obj->sgt->sgl,
60 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
61 }
62}
63
64static void sync_for_cpu(struct msm_gem_object *msm_obj)
65{
66 struct device *dev = msm_obj->base.dev->dev;
67
Rob Clark9f614192019-09-04 09:56:03 -070068 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
Rob Clark3de433c2019-07-30 14:46:28 -070069 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
70 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
71 } else {
72 dma_unmap_sg(dev, msm_obj->sgt->sgl,
73 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
74 }
75}
76
Rob Clark871d8122013-11-16 12:56:06 -050077/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060078static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050079{
80 struct msm_gem_object *msm_obj = to_msm_bo(obj);
81 struct msm_drm_private *priv = obj->dev->dev_private;
82 dma_addr_t paddr;
83 struct page **p;
84 int ret, i;
85
Michal Hocko20981052017-05-17 14:23:12 +020086 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050087 if (!p)
88 return ERR_PTR(-ENOMEM);
89
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060090 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000091 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060092 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050093 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020094 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050095 return ERR_PTR(ret);
96 }
97
98 paddr = physaddr(obj);
99 for (i = 0; i < npages; i++) {
100 p[i] = phys_to_page(paddr);
101 paddr += PAGE_SIZE;
102 }
103
104 return p;
105}
Rob Clarkc8afe682013-06-26 12:44:06 -0400106
Rob Clarkc8afe682013-06-26 12:44:06 -0400107static struct page **get_pages(struct drm_gem_object *obj)
108{
109 struct msm_gem_object *msm_obj = to_msm_bo(obj);
110
111 if (!msm_obj->pages) {
112 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -0500113 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -0400114 int npages = obj->size >> PAGE_SHIFT;
115
Rob Clark072f1f92015-03-03 15:04:25 -0500116 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200117 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500118 else
119 p = get_pages_vram(obj, npages);
120
Rob Clarkc8afe682013-06-26 12:44:06 -0400121 if (IS_ERR(p)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530122 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
Rob Clarkc8afe682013-06-26 12:44:06 -0400123 PTR_ERR(p));
124 return p;
125 }
126
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530127 msm_obj->pages = p;
128
Rob Clarkc8afe682013-06-26 12:44:06 -0400129 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +0800130 if (IS_ERR(msm_obj->sgt)) {
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530131 void *ptr = ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400132
Mamta Shukla6a41da12018-10-20 23:19:26 +0530133 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530134 msm_obj->sgt = NULL;
135 return ptr;
136 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400137
138 /* For non-cached buffers, ensure the new pages are clean
139 * because display controller, GPU, etc. are not coherent:
140 */
141 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Rob Clark3de433c2019-07-30 14:46:28 -0700142 sync_for_device(msm_obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400143 }
144
145 return msm_obj->pages;
146}
147
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600148static void put_pages_vram(struct drm_gem_object *obj)
149{
150 struct msm_gem_object *msm_obj = to_msm_bo(obj);
151 struct msm_drm_private *priv = obj->dev->dev_private;
152
153 spin_lock(&priv->vram.lock);
154 drm_mm_remove_node(msm_obj->vram_node);
155 spin_unlock(&priv->vram.lock);
156
157 kvfree(msm_obj->pages);
158}
159
Rob Clarkc8afe682013-06-26 12:44:06 -0400160static void put_pages(struct drm_gem_object *obj)
161{
162 struct msm_gem_object *msm_obj = to_msm_bo(obj);
163
164 if (msm_obj->pages) {
Ben Hutchings39766262018-04-03 23:38:45 +0100165 if (msm_obj->sgt) {
166 /* For non-cached buffers, ensure the new
167 * pages are clean because display controller,
168 * GPU, etc. are not coherent:
169 */
170 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Rob Clark3de433c2019-07-30 14:46:28 -0700171 sync_for_cpu(msm_obj);
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530172
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530173 sg_free_table(msm_obj->sgt);
Ben Hutchings39766262018-04-03 23:38:45 +0100174 kfree(msm_obj->sgt);
175 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400176
Rob Clark072f1f92015-03-03 15:04:25 -0500177 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500178 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600179 else
180 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500181
Rob Clarkc8afe682013-06-26 12:44:06 -0400182 msm_obj->pages = NULL;
183 }
184}
185
Rob Clark05b84912013-09-28 11:28:35 -0400186struct page **msm_gem_get_pages(struct drm_gem_object *obj)
187{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600188 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400189 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600190
191 mutex_lock(&msm_obj->lock);
192
193 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
194 mutex_unlock(&msm_obj->lock);
195 return ERR_PTR(-EBUSY);
196 }
197
Rob Clark05b84912013-09-28 11:28:35 -0400198 p = get_pages(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600199 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -0400200 return p;
201}
202
203void msm_gem_put_pages(struct drm_gem_object *obj)
204{
205 /* when we start tracking the pin count, then do something here */
206}
207
Rob Clarkc8afe682013-06-26 12:44:06 -0400208int msm_gem_mmap_obj(struct drm_gem_object *obj,
209 struct vm_area_struct *vma)
210{
211 struct msm_gem_object *msm_obj = to_msm_bo(obj);
212
213 vma->vm_flags &= ~VM_PFNMAP;
214 vma->vm_flags |= VM_MIXEDMAP;
215
216 if (msm_obj->flags & MSM_BO_WC) {
217 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
218 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
219 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
220 } else {
221 /*
222 * Shunt off cached objs to shmem file so they have their own
223 * address_space (so unmap_mapping_range does what we want,
224 * in particular in the case of mmap'd dmabufs)
225 */
226 fput(vma->vm_file);
227 get_file(obj->filp);
228 vma->vm_pgoff = 0;
229 vma->vm_file = obj->filp;
230
231 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
232 }
233
234 return 0;
235}
236
237int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
238{
239 int ret;
240
241 ret = drm_gem_mmap(filp, vma);
242 if (ret) {
243 DBG("mmap failed: %d", ret);
244 return ret;
245 }
246
247 return msm_gem_mmap_obj(vma->vm_private_data, vma);
248}
249
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530250vm_fault_t msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400251{
Dave Jiang11bac802017-02-24 14:56:41 -0800252 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400253 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600254 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400255 struct page **pages;
256 unsigned long pfn;
257 pgoff_t pgoff;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530258 int err;
259 vm_fault_t ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400260
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600261 /*
262 * vm_ops.open/drm_gem_mmap_obj and close get and put
263 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400264 */
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530265 err = mutex_lock_interruptible(&msm_obj->lock);
266 if (err) {
267 ret = VM_FAULT_NOPAGE;
Rob Clarkc8afe682013-06-26 12:44:06 -0400268 goto out;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530269 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400270
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600271 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
272 mutex_unlock(&msm_obj->lock);
273 return VM_FAULT_SIGBUS;
274 }
275
Rob Clarkc8afe682013-06-26 12:44:06 -0400276 /* make sure we have pages attached now */
277 pages = get_pages(obj);
278 if (IS_ERR(pages)) {
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530279 ret = vmf_error(PTR_ERR(pages));
Rob Clarkc8afe682013-06-26 12:44:06 -0400280 goto out_unlock;
281 }
282
283 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800284 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400285
Rob Clark871d8122013-11-16 12:56:06 -0500286 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400287
Jan Kara1a29d852016-12-14 15:07:01 -0800288 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400289 pfn, pfn << PAGE_SHIFT);
290
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530291 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400292out_unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600293 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400294out:
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530295 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400296}
297
298/** get mmap offset */
299static uint64_t mmap_offset(struct drm_gem_object *obj)
300{
301 struct drm_device *dev = obj->dev;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600302 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400303 int ret;
304
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600305 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkc8afe682013-06-26 12:44:06 -0400306
307 /* Make it mmapable */
308 ret = drm_gem_create_mmap_offset(obj);
309
310 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530311 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400312 return 0;
313 }
314
315 return drm_vma_node_offset_addr(&obj->vma_node);
316}
317
318uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
319{
320 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600321 struct msm_gem_object *msm_obj = to_msm_bo(obj);
322
323 mutex_lock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400324 offset = mmap_offset(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600325 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400326 return offset;
327}
328
Rob Clark4b85f7f2017-06-13 13:54:13 -0400329static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
330 struct msm_gem_address_space *aspace)
331{
332 struct msm_gem_object *msm_obj = to_msm_bo(obj);
333 struct msm_gem_vma *vma;
334
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600335 WARN_ON(!mutex_is_locked(&msm_obj->lock));
336
Rob Clark4b85f7f2017-06-13 13:54:13 -0400337 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
338 if (!vma)
339 return ERR_PTR(-ENOMEM);
340
341 vma->aspace = aspace;
342
343 list_add_tail(&vma->list, &msm_obj->vmas);
344
345 return vma;
346}
347
348static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
349 struct msm_gem_address_space *aspace)
350{
351 struct msm_gem_object *msm_obj = to_msm_bo(obj);
352 struct msm_gem_vma *vma;
353
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600354 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400355
356 list_for_each_entry(vma, &msm_obj->vmas, list) {
357 if (vma->aspace == aspace)
358 return vma;
359 }
360
361 return NULL;
362}
363
364static void del_vma(struct msm_gem_vma *vma)
365{
366 if (!vma)
367 return;
368
369 list_del(&vma->list);
370 kfree(vma);
371}
372
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600373/* Called with msm_obj->lock locked */
Rob Clark4fe5f652016-06-01 11:38:28 -0400374static void
375put_iova(struct drm_gem_object *obj)
376{
Rob Clark4fe5f652016-06-01 11:38:28 -0400377 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400378 struct msm_gem_vma *vma, *tmp;
Rob Clark4fe5f652016-06-01 11:38:28 -0400379
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600380 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4fe5f652016-06-01 11:38:28 -0400381
Rob Clark4b85f7f2017-06-13 13:54:13 -0400382 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
Brian Masneyd67f1b62019-06-02 21:01:31 -0400383 if (vma->aspace) {
384 msm_gem_purge_vma(vma->aspace, vma);
385 msm_gem_close_vma(vma->aspace, vma);
386 }
Rob Clark4b85f7f2017-06-13 13:54:13 -0400387 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400388 }
389}
390
Jordan Crousec0ee9792018-11-07 15:35:48 -0700391static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400392 struct msm_gem_address_space *aspace, uint64_t *iova,
393 u64 range_start, u64 range_end)
Rob Clarkc8afe682013-06-26 12:44:06 -0400394{
395 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400396 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400397 int ret = 0;
398
Jordan Crousec0ee9792018-11-07 15:35:48 -0700399 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkcb1e3812017-06-13 09:15:36 -0400400
Rob Clark4b85f7f2017-06-13 13:54:13 -0400401 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500402
Rob Clark4b85f7f2017-06-13 13:54:13 -0400403 if (!vma) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400404 vma = add_vma(obj, aspace);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700405 if (IS_ERR(vma))
406 return PTR_ERR(vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400407
Jonathan Marekd3b88772020-04-23 17:09:13 -0400408 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
409 range_start, range_end);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700410 if (ret) {
411 del_vma(vma);
412 return ret;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400413 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400414 }
415
Rob Clark4b85f7f2017-06-13 13:54:13 -0400416 *iova = vma->iova;
417 return 0;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700418}
Rob Clark4b85f7f2017-06-13 13:54:13 -0400419
Jordan Crousec0ee9792018-11-07 15:35:48 -0700420static int msm_gem_pin_iova(struct drm_gem_object *obj,
421 struct msm_gem_address_space *aspace)
422{
423 struct msm_gem_object *msm_obj = to_msm_bo(obj);
424 struct msm_gem_vma *vma;
425 struct page **pages;
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500426 int prot = IOMMU_READ;
427
428 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
429 prot |= IOMMU_WRITE;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700430
Jonathan Marek0b462d72020-04-23 17:09:14 -0400431 if (msm_obj->flags & MSM_BO_MAP_PRIV)
432 prot |= IOMMU_PRIV;
433
Jordan Crousec0ee9792018-11-07 15:35:48 -0700434 WARN_ON(!mutex_is_locked(&msm_obj->lock));
435
436 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
437 return -EBUSY;
438
439 vma = lookup_vma(obj, aspace);
440 if (WARN_ON(!vma))
441 return -EINVAL;
442
443 pages = get_pages(obj);
444 if (IS_ERR(pages))
445 return PTR_ERR(pages);
446
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500447 return msm_gem_map_vma(aspace, vma, prot,
448 msm_obj->sgt, obj->size >> PAGE_SHIFT);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700449}
450
Jonathan Marekd3b88772020-04-23 17:09:13 -0400451/*
452 * get iova and pin it. Should have a matching put
453 * limits iova to specified range (in pages)
454 */
455int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
456 struct msm_gem_address_space *aspace, uint64_t *iova,
457 u64 range_start, u64 range_end)
Jordan Crousec0ee9792018-11-07 15:35:48 -0700458{
459 struct msm_gem_object *msm_obj = to_msm_bo(obj);
460 u64 local;
461 int ret;
462
463 mutex_lock(&msm_obj->lock);
464
Jonathan Marekd3b88772020-04-23 17:09:13 -0400465 ret = msm_gem_get_iova_locked(obj, aspace, &local,
466 range_start, range_end);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700467
468 if (!ret)
469 ret = msm_gem_pin_iova(obj, aspace);
470
471 if (!ret)
472 *iova = local;
473
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600474 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400475 return ret;
476}
477
Jonathan Marekd3b88772020-04-23 17:09:13 -0400478/* get iova and pin it. Should have a matching put */
479int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
480 struct msm_gem_address_space *aspace, uint64_t *iova)
481{
482 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
483}
484
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700485/*
486 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
487 * valid for the life of the object
488 */
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700489int msm_gem_get_iova(struct drm_gem_object *obj,
490 struct msm_gem_address_space *aspace, uint64_t *iova)
491{
492 struct msm_gem_object *msm_obj = to_msm_bo(obj);
493 int ret;
494
495 mutex_lock(&msm_obj->lock);
Jonathan Marekd3b88772020-04-23 17:09:13 -0400496 ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700497 mutex_unlock(&msm_obj->lock);
498
499 return ret;
500}
501
Rob Clark2638d902014-11-08 09:13:37 -0500502/* get iova without taking a reference, used in places where you have
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700503 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
Rob Clark2638d902014-11-08 09:13:37 -0500504 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400505uint64_t msm_gem_iova(struct drm_gem_object *obj,
506 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500507{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600508 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400509 struct msm_gem_vma *vma;
510
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600511 mutex_lock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400512 vma = lookup_vma(obj, aspace);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600513 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400514 WARN_ON(!vma);
515
516 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500517}
518
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700519/*
520 * Unpin a iova by updating the reference counts. The memory isn't actually
521 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
522 * to get rid of it
523 */
524void msm_gem_unpin_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400525 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400526{
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700527 struct msm_gem_object *msm_obj = to_msm_bo(obj);
528 struct msm_gem_vma *vma;
529
530 mutex_lock(&msm_obj->lock);
531 vma = lookup_vma(obj, aspace);
532
533 if (!WARN_ON(!vma))
534 msm_gem_unmap_vma(aspace, vma);
535
536 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400537}
538
539int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
540 struct drm_mode_create_dumb *args)
541{
542 args->pitch = align_pitch(args->width, args->bpp);
543 args->size = PAGE_ALIGN(args->pitch * args->height);
544 return msm_gem_new_handle(dev, file, args->size,
Jordan Crouse0815d772018-11-07 15:35:52 -0700545 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
Rob Clarkc8afe682013-06-26 12:44:06 -0400546}
547
Rob Clarkc8afe682013-06-26 12:44:06 -0400548int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
549 uint32_t handle, uint64_t *offset)
550{
551 struct drm_gem_object *obj;
552 int ret = 0;
553
554 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100555 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400556 if (obj == NULL) {
557 ret = -ENOENT;
558 goto fail;
559 }
560
561 *offset = msm_gem_mmap_offset(obj);
562
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100563 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400564
565fail:
566 return ret;
567}
568
Rob Clarkfad33f42017-09-15 08:38:20 -0400569static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400570{
Rob Clarke1e9db22016-05-27 11:16:28 -0400571 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600572 int ret = 0;
573
574 mutex_lock(&msm_obj->lock);
575
Rob Clarkfad33f42017-09-15 08:38:20 -0400576 if (WARN_ON(msm_obj->madv > madv)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530577 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
Rob Clarkfad33f42017-09-15 08:38:20 -0400578 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600579 mutex_unlock(&msm_obj->lock);
580 return ERR_PTR(-EBUSY);
581 }
582
583 /* increment vmap_count *before* vmap() call, so shrinker can
584 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
585 * This guarantees that we won't try to msm_gem_vunmap() this
586 * same object from within the vmap() call (while we already
587 * hold msm_obj->lock)
588 */
589 msm_obj->vmap_count++;
590
591 if (!msm_obj->vaddr) {
592 struct page **pages = get_pages(obj);
593 if (IS_ERR(pages)) {
594 ret = PTR_ERR(pages);
595 goto fail;
596 }
597 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
598 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
599 if (msm_obj->vaddr == NULL) {
600 ret = -ENOMEM;
601 goto fail;
602 }
603 }
604
605 mutex_unlock(&msm_obj->lock);
606 return msm_obj->vaddr;
607
608fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400609 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600610 mutex_unlock(&msm_obj->lock);
611 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400612}
613
Rob Clarkfad33f42017-09-15 08:38:20 -0400614void *msm_gem_get_vaddr(struct drm_gem_object *obj)
615{
616 return get_vaddr(obj, MSM_MADV_WILLNEED);
617}
618
619/*
620 * Don't use this! It is for the very special case of dumping
621 * submits from GPU hangs or faults, were the bo may already
622 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
623 * active list.
624 */
625void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
626{
627 return get_vaddr(obj, __MSM_MADV_PURGED);
628}
629
Rob Clark18f23042016-05-26 16:24:35 -0400630void msm_gem_put_vaddr(struct drm_gem_object *obj)
631{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600632 struct msm_gem_object *msm_obj = to_msm_bo(obj);
633
634 mutex_lock(&msm_obj->lock);
635 WARN_ON(msm_obj->vmap_count < 1);
636 msm_obj->vmap_count--;
637 mutex_unlock(&msm_obj->lock);
Rob Clark18f23042016-05-26 16:24:35 -0400638}
639
Rob Clark4cd33c42016-05-17 15:44:49 -0400640/* Update madvise status, returns true if not purged, else
641 * false or -errno.
642 */
643int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
644{
645 struct msm_gem_object *msm_obj = to_msm_bo(obj);
646
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600647 mutex_lock(&msm_obj->lock);
648
Rob Clark4cd33c42016-05-17 15:44:49 -0400649 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
650
651 if (msm_obj->madv != __MSM_MADV_PURGED)
652 msm_obj->madv = madv;
653
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600654 madv = msm_obj->madv;
655
656 mutex_unlock(&msm_obj->lock);
657
658 return (madv != __MSM_MADV_PURGED);
Rob Clark4cd33c42016-05-17 15:44:49 -0400659}
660
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600661void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
Rob Clark68209392016-05-17 16:19:32 -0400662{
663 struct drm_device *dev = obj->dev;
664 struct msm_gem_object *msm_obj = to_msm_bo(obj);
665
666 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
667 WARN_ON(!is_purgeable(msm_obj));
668 WARN_ON(obj->import_attach);
669
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600670 mutex_lock_nested(&msm_obj->lock, subclass);
671
Rob Clark68209392016-05-17 16:19:32 -0400672 put_iova(obj);
673
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600674 msm_gem_vunmap_locked(obj);
Rob Clark68209392016-05-17 16:19:32 -0400675
676 put_pages(obj);
677
678 msm_obj->madv = __MSM_MADV_PURGED;
679
680 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
681 drm_gem_free_mmap_offset(obj);
682
683 /* Our goal here is to return as much of the memory as
684 * is possible back to the system as we are called from OOM.
685 * To do this we must instruct the shmfs to drop all of its
686 * backing pages, *now*.
687 */
688 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
689
690 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
691 0, (loff_t)-1);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600692
693 mutex_unlock(&msm_obj->lock);
Rob Clark68209392016-05-17 16:19:32 -0400694}
695
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600696static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400697{
698 struct msm_gem_object *msm_obj = to_msm_bo(obj);
699
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600700 WARN_ON(!mutex_is_locked(&msm_obj->lock));
701
Rob Clarke1e9db22016-05-27 11:16:28 -0400702 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
703 return;
704
705 vunmap(msm_obj->vaddr);
706 msm_obj->vaddr = NULL;
707}
708
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600709void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
710{
711 struct msm_gem_object *msm_obj = to_msm_bo(obj);
712
713 mutex_lock_nested(&msm_obj->lock, subclass);
714 msm_gem_vunmap_locked(obj);
715 mutex_unlock(&msm_obj->lock);
716}
717
Rob Clarkb6295f92016-03-15 18:26:28 -0400718/* must be called before _move_to_active().. */
719int msm_gem_sync_object(struct drm_gem_object *obj,
720 struct msm_fence_context *fctx, bool exclusive)
721{
Christian König52791ee2019-08-11 10:06:32 +0200722 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100723 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400724 int i, ret;
725
Christian König52791ee2019-08-11 10:06:32 +0200726 fobj = dma_resv_get_list(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400727 if (!fobj || (fobj->shared_count == 0)) {
Christian König52791ee2019-08-11 10:06:32 +0200728 fence = dma_resv_get_excl(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400729 /* don't need to wait on our own fences, since ring is fifo */
730 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100731 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400732 if (ret)
733 return ret;
734 }
735 }
736
737 if (!exclusive || !fobj)
738 return 0;
739
740 for (i = 0; i < fobj->shared_count; i++) {
741 fence = rcu_dereference_protected(fobj->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200742 dma_resv_held(obj->resv));
Rob Clarkb6295f92016-03-15 18:26:28 -0400743 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100744 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400745 if (ret)
746 return ret;
747 }
748 }
749
750 return 0;
751}
752
Rob Clark7198e6b2013-07-19 12:59:32 -0400753void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100754 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400755{
756 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400757 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400758 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400759 if (exclusive)
Christian König52791ee2019-08-11 10:06:32 +0200760 dma_resv_add_excl_fence(obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400761 else
Christian König52791ee2019-08-11 10:06:32 +0200762 dma_resv_add_shared_fence(obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400763 list_del_init(&msm_obj->mm_list);
764 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
765}
766
767void msm_gem_move_to_inactive(struct drm_gem_object *obj)
768{
769 struct drm_device *dev = obj->dev;
770 struct msm_drm_private *priv = dev->dev_private;
771 struct msm_gem_object *msm_obj = to_msm_bo(obj);
772
773 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
774
775 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400776 list_del_init(&msm_obj->mm_list);
777 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400778}
779
Rob Clarkba00c3f2016-03-16 18:18:17 -0400780int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
781{
Rob Clarkb6295f92016-03-15 18:26:28 -0400782 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100783 unsigned long remain =
784 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
785 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400786
Christian König52791ee2019-08-11 10:06:32 +0200787 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
Chris Wilsonf755e222016-08-29 08:08:26 +0100788 true, remain);
789 if (ret == 0)
790 return remain == 0 ? -EBUSY : -ETIMEDOUT;
791 else if (ret < 0)
792 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400793
Rob Clark7198e6b2013-07-19 12:59:32 -0400794 /* TODO cache maintenance */
795
Rob Clarkb6295f92016-03-15 18:26:28 -0400796 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400797}
798
799int msm_gem_cpu_fini(struct drm_gem_object *obj)
800{
801 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400802 return 0;
803}
804
805#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100806static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400807 struct seq_file *m)
808{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100809 if (!dma_fence_is_signaled(fence))
Dave Airliea3115622019-01-10 06:20:15 +1000810 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400811 fence->ops->get_driver_name(fence),
812 fence->ops->get_timeline_name(fence),
813 fence->seqno);
814}
815
Rob Clarkc8afe682013-06-26 12:44:06 -0400816void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
817{
Rob Clarkc8afe682013-06-26 12:44:06 -0400818 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Christian König52791ee2019-08-11 10:06:32 +0200819 struct dma_resv *robj = obj->resv;
820 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100821 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400822 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400823 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400824 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400825
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600826 mutex_lock(&msm_obj->lock);
Rob Clarkb6295f92016-03-15 18:26:28 -0400827
Rob Clark4cd33c42016-05-17 15:44:49 -0400828 switch (msm_obj->madv) {
829 case __MSM_MADV_PURGED:
830 madv = " purged";
831 break;
832 case MSM_MADV_DONTNEED:
833 madv = " purgeable";
834 break;
835 case MSM_MADV_WILLNEED:
836 default:
837 madv = "";
838 break;
839 }
840
Jordan Crouse575f0482018-11-07 15:35:49 -0700841 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
Rob Clark7198e6b2013-07-19 12:59:32 -0400842 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100843 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400844 off, msm_obj->vaddr);
845
Jordan Crouse0815d772018-11-07 15:35:52 -0700846 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
Rob Clark667ce332016-09-28 19:58:32 -0400847
Jordan Crouse575f0482018-11-07 15:35:49 -0700848 if (!list_empty(&msm_obj->vmas)) {
849
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700850 seq_puts(m, " vmas:");
Jordan Crouse575f0482018-11-07 15:35:49 -0700851
852 list_for_each_entry(vma, &msm_obj->vmas, list)
Brian Masney90f94662019-05-13 19:41:05 -0400853 seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
854 vma->aspace != NULL ? vma->aspace->name : NULL,
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700855 vma->iova, vma->mapped ? "mapped" : "unmapped",
856 vma->inuse);
Jordan Crouse575f0482018-11-07 15:35:49 -0700857
858 seq_puts(m, "\n");
859 }
Rob Clarkb6295f92016-03-15 18:26:28 -0400860
861 rcu_read_lock();
862 fobj = rcu_dereference(robj->fence);
863 if (fobj) {
864 unsigned int i, shared_count = fobj->shared_count;
865
866 for (i = 0; i < shared_count; i++) {
867 fence = rcu_dereference(fobj->shared[i]);
868 describe_fence(fence, "Shared", m);
869 }
870 }
871
872 fence = rcu_dereference(robj->fence_excl);
873 if (fence)
874 describe_fence(fence, "Exclusive", m);
875 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600876
877 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400878}
879
880void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
881{
882 struct msm_gem_object *msm_obj;
883 int count = 0;
884 size_t size = 0;
885
Jordan Crouse0815d772018-11-07 15:35:52 -0700886 seq_puts(m, " flags id ref offset kaddr size madv name\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400887 list_for_each_entry(msm_obj, list, mm_list) {
888 struct drm_gem_object *obj = &msm_obj->base;
Jordan Crouse575f0482018-11-07 15:35:49 -0700889 seq_puts(m, " ");
Rob Clarkc8afe682013-06-26 12:44:06 -0400890 msm_gem_describe(obj, m);
891 count++;
892 size += obj->size;
893 }
894
895 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
896}
897#endif
898
Rob Clarkd71b6bd2018-02-14 11:14:23 -0500899/* don't call directly! Use drm_gem_object_put() and friends */
Rob Clarkc8afe682013-06-26 12:44:06 -0400900void msm_gem_free_object(struct drm_gem_object *obj)
901{
Rob Clarkc8afe682013-06-26 12:44:06 -0400902 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700903 struct drm_device *dev = obj->dev;
904 struct msm_drm_private *priv = dev->dev_private;
905
906 if (llist_add(&msm_obj->freed, &priv->free_list))
907 queue_work(priv->wq, &priv->free_work);
908}
909
910static void free_object(struct msm_gem_object *msm_obj)
911{
912 struct drm_gem_object *obj = &msm_obj->base;
913 struct drm_device *dev = obj->dev;
Rob Clarkc8afe682013-06-26 12:44:06 -0400914
915 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
916
Rob Clark7198e6b2013-07-19 12:59:32 -0400917 /* object should not be on active list: */
918 WARN_ON(is_active(msm_obj));
919
Rob Clarkc8afe682013-06-26 12:44:06 -0400920 list_del(&msm_obj->mm_list);
921
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600922 mutex_lock(&msm_obj->lock);
923
Rob Clark4fe5f652016-06-01 11:38:28 -0400924 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400925
Rob Clark05b84912013-09-28 11:28:35 -0400926 if (obj->import_attach) {
927 if (msm_obj->vaddr)
928 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400929
Rob Clark05b84912013-09-28 11:28:35 -0400930 /* Don't drop the pages for imported dmabuf, as they are not
931 * ours, just free the array we allocated:
932 */
933 if (msm_obj->pages)
Michal Hocko20981052017-05-17 14:23:12 +0200934 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -0400935
jilai wangf28730c2015-04-07 13:51:32 -0400936 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400937 } else {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600938 msm_gem_vunmap_locked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400939 put_pages(obj);
940 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400941
942 drm_gem_object_release(obj);
943
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600944 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400945 kfree(msm_obj);
946}
947
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -0700948void msm_gem_free_work(struct work_struct *work)
949{
950 struct msm_drm_private *priv =
951 container_of(work, struct msm_drm_private, free_work);
952 struct drm_device *dev = priv->dev;
953 struct llist_node *freed;
954 struct msm_gem_object *msm_obj, *next;
955
956 while ((freed = llist_del_all(&priv->free_list))) {
957
958 mutex_lock(&dev->struct_mutex);
959
960 llist_for_each_entry_safe(msm_obj, next,
961 freed, freed)
962 free_object(msm_obj);
963
964 mutex_unlock(&dev->struct_mutex);
965
966 if (need_resched())
967 break;
968 }
969}
970
Rob Clarkc8afe682013-06-26 12:44:06 -0400971/* convenience method to construct a GEM buffer object, and userspace handle */
972int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Jordan Crouse0815d772018-11-07 15:35:52 -0700973 uint32_t size, uint32_t flags, uint32_t *handle,
974 char *name)
Rob Clarkc8afe682013-06-26 12:44:06 -0400975{
976 struct drm_gem_object *obj;
977 int ret;
978
Rob Clarkc8afe682013-06-26 12:44:06 -0400979 obj = msm_gem_new(dev, size, flags);
980
Rob Clarkc8afe682013-06-26 12:44:06 -0400981 if (IS_ERR(obj))
982 return PTR_ERR(obj);
983
Jordan Crouse0815d772018-11-07 15:35:52 -0700984 if (name)
985 msm_gem_object_set_name(obj, "%s", name);
986
Rob Clarkc8afe682013-06-26 12:44:06 -0400987 ret = drm_gem_handle_create(file, obj, handle);
988
989 /* drop reference from allocate - handle holds it now */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100990 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400991
992 return ret;
993}
994
Rob Clark05b84912013-09-28 11:28:35 -0400995static int msm_gem_new_impl(struct drm_device *dev,
996 uint32_t size, uint32_t flags,
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600997 struct drm_gem_object **obj,
998 bool struct_mutex_locked)
Rob Clarkc8afe682013-06-26 12:44:06 -0400999{
1000 struct msm_drm_private *priv = dev->dev_private;
1001 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -04001002
1003 switch (flags & MSM_BO_CACHE_MASK) {
1004 case MSM_BO_UNCACHED:
1005 case MSM_BO_CACHED:
1006 case MSM_BO_WC:
1007 break;
1008 default:
Mamta Shukla6a41da12018-10-20 23:19:26 +05301009 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
Rob Clarkc8afe682013-06-26 12:44:06 -04001010 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -04001011 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -04001012 }
1013
Rob Clark667ce332016-09-28 19:58:32 -04001014 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001015 if (!msm_obj)
1016 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -04001017
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001018 mutex_init(&msm_obj->lock);
1019
Rob Clarkc8afe682013-06-26 12:44:06 -04001020 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -04001021 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -04001022
Rob Clark7198e6b2013-07-19 12:59:32 -04001023 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001024 INIT_LIST_HEAD(&msm_obj->vmas);
1025
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001026 if (struct_mutex_locked) {
1027 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1028 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1029 } else {
1030 mutex_lock(&dev->struct_mutex);
1031 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1032 mutex_unlock(&dev->struct_mutex);
1033 }
Rob Clarkc8afe682013-06-26 12:44:06 -04001034
Rob Clark05b84912013-09-28 11:28:35 -04001035 *obj = &msm_obj->base;
1036
1037 return 0;
1038}
1039
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001040static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1041 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -04001042{
Rob Clarkf4839bd2017-06-13 11:50:05 -04001043 struct msm_drm_private *priv = dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -05001044 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001045 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -04001046 int ret;
1047
Rob Clark05b84912013-09-28 11:28:35 -04001048 size = PAGE_ALIGN(size);
1049
Jonathan Marekc2052a42018-11-14 17:08:04 -05001050 if (!msm_use_mmu(dev))
Rob Clarkf4839bd2017-06-13 11:50:05 -04001051 use_vram = true;
Jonathan Marek86f46f22018-11-21 20:52:30 -05001052 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
Rob Clarkf4839bd2017-06-13 11:50:05 -04001053 use_vram = true;
1054
1055 if (WARN_ON(use_vram && !priv->vram.size))
1056 return ERR_PTR(-EINVAL);
1057
Jordan Crouse1a5dff52017-03-07 10:02:51 -07001058 /* Disallow zero sized objects as they make the underlying
1059 * infrastructure grumpy
1060 */
1061 if (size == 0)
1062 return ERR_PTR(-EINVAL);
1063
Daniel Vetter5ebeb022019-06-14 22:36:01 +02001064 ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
Rob Clark05b84912013-09-28 11:28:35 -04001065 if (ret)
1066 goto fail;
1067
Rob Clarkf4839bd2017-06-13 11:50:05 -04001068 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -04001069 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001070 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +02001071 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1072
1073 mutex_lock(&msm_obj->lock);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001074
Rob Clark4b85f7f2017-06-13 13:54:13 -04001075 vma = add_vma(obj, NULL);
Hans Verkuilb3949a92017-07-30 14:42:36 +02001076 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001077 if (IS_ERR(vma)) {
1078 ret = PTR_ERR(vma);
1079 goto fail;
1080 }
1081
1082 to_msm_bo(obj)->vram_node = &vma->node;
1083
Rob Clarkf4839bd2017-06-13 11:50:05 -04001084 drm_gem_private_object_init(dev, obj, size);
1085
Rob Clarkf4839bd2017-06-13 11:50:05 -04001086 pages = get_pages(obj);
1087 if (IS_ERR(pages)) {
1088 ret = PTR_ERR(pages);
1089 goto fail;
1090 }
Rob Clark4b85f7f2017-06-13 13:54:13 -04001091
1092 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001093 } else {
Rob Clark871d8122013-11-16 12:56:06 -05001094 ret = drm_gem_object_init(dev, obj, size);
1095 if (ret)
1096 goto fail;
Lucas Stach0abdba42019-02-28 07:23:29 +01001097 /*
1098 * Our buffers are kept pinned, so allocating them from the
1099 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1100 * See comments above new_inode() why this is required _and_
1101 * expected if you're going to pin these pages.
1102 */
1103 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
Rob Clark871d8122013-11-16 12:56:06 -05001104 }
Rob Clark05b84912013-09-28 11:28:35 -04001105
1106 return obj;
1107
1108fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001109 drm_gem_object_put_unlocked(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001110 return ERR_PTR(ret);
1111}
1112
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001113struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1114 uint32_t size, uint32_t flags)
1115{
1116 return _msm_gem_new(dev, size, flags, true);
1117}
1118
1119struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1120 uint32_t size, uint32_t flags)
1121{
1122 return _msm_gem_new(dev, size, flags, false);
1123}
1124
Rob Clark05b84912013-09-28 11:28:35 -04001125struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -04001126 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -04001127{
1128 struct msm_gem_object *msm_obj;
1129 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -04001130 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -04001131 int ret, npages;
1132
Rob Clark871d8122013-11-16 12:56:06 -05001133 /* if we don't have IOMMU, don't bother pretending we can import: */
Jonathan Marekc2052a42018-11-14 17:08:04 -05001134 if (!msm_use_mmu(dev)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301135 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
Rob Clark871d8122013-11-16 12:56:06 -05001136 return ERR_PTR(-EINVAL);
1137 }
1138
Rob Clark79f0e202016-03-16 12:40:35 -04001139 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001140
Daniel Vetter5ebeb022019-06-14 22:36:01 +02001141 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
Rob Clark05b84912013-09-28 11:28:35 -04001142 if (ret)
1143 goto fail;
1144
1145 drm_gem_private_object_init(dev, obj, size);
1146
1147 npages = size / PAGE_SIZE;
1148
1149 msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001150 mutex_lock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001151 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001152 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001153 if (!msm_obj->pages) {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001154 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001155 ret = -ENOMEM;
1156 goto fail;
1157 }
1158
1159 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001160 if (ret) {
1161 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001162 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001163 }
Rob Clark05b84912013-09-28 11:28:35 -04001164
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001165 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001166 return obj;
1167
1168fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001169 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001170 return ERR_PTR(ret);
1171}
Jordan Crouse82232862017-07-27 10:42:40 -06001172
1173static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1174 uint32_t flags, struct msm_gem_address_space *aspace,
1175 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1176{
1177 void *vaddr;
1178 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1179 int ret;
1180
1181 if (IS_ERR(obj))
1182 return ERR_CAST(obj);
1183
1184 if (iova) {
Jordan Crouse9fe041f2018-11-07 15:35:50 -07001185 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001186 if (ret)
1187 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001188 }
1189
1190 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001191 if (IS_ERR(vaddr)) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001192 msm_gem_unpin_iova(obj, aspace);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001193 ret = PTR_ERR(vaddr);
1194 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001195 }
1196
1197 if (bo)
1198 *bo = obj;
1199
1200 return vaddr;
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001201err:
1202 if (locked)
1203 drm_gem_object_put(obj);
1204 else
1205 drm_gem_object_put_unlocked(obj);
1206
1207 return ERR_PTR(ret);
1208
Jordan Crouse82232862017-07-27 10:42:40 -06001209}
1210
1211void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1212 uint32_t flags, struct msm_gem_address_space *aspace,
1213 struct drm_gem_object **bo, uint64_t *iova)
1214{
1215 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1216}
1217
1218void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1219 uint32_t flags, struct msm_gem_address_space *aspace,
1220 struct drm_gem_object **bo, uint64_t *iova)
1221{
1222 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1223}
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001224
1225void msm_gem_kernel_put(struct drm_gem_object *bo,
1226 struct msm_gem_address_space *aspace, bool locked)
1227{
1228 if (IS_ERR_OR_NULL(bo))
1229 return;
1230
1231 msm_gem_put_vaddr(bo);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001232 msm_gem_unpin_iova(bo, aspace);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001233
1234 if (locked)
1235 drm_gem_object_put(bo);
1236 else
1237 drm_gem_object_put_unlocked(bo);
1238}
Jordan Crouse0815d772018-11-07 15:35:52 -07001239
1240void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1241{
1242 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1243 va_list ap;
1244
1245 if (!fmt)
1246 return;
1247
1248 va_start(ap, fmt);
1249 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1250 va_end(ap);
1251}