blob: 72a07e311de3e3ca221273531cca80a32f03c7ed [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clarkc8afe682013-06-26 12:44:06 -04002/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Rob Clarkc8afe682013-06-26 12:44:06 -04005 */
6
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +02007#include <linux/dma-map-ops.h>
Rob Clarkc8afe682013-06-26 12:44:06 -04008#include <linux/spinlock.h>
9#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040010#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080011#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040012
Sam Ravnborgfeea39a2019-08-04 08:55:51 +020013#include <drm/drm_prime.h>
14
Rob Clarkc8afe682013-06-26 12:44:06 -040015#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040016#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040017#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040018#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050019#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040020
Rob Clark3edfa302020-11-16 09:48:51 -080021static void update_inactive(struct msm_gem_object *msm_obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060022
Rob Clark871d8122013-11-16 12:56:06 -050023static dma_addr_t physaddr(struct drm_gem_object *obj)
24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 priv->vram.paddr;
29}
30
Rob Clark072f1f92015-03-03 15:04:25 -050031static bool use_pages(struct drm_gem_object *obj)
32{
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
35}
36
Rob Clark3de433c2019-07-30 14:46:28 -070037/*
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
42 *
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
47 *
48 * Let this be a cautionary tail of abstraction gone wrong.
49 */
50
51static void sync_for_device(struct msm_gem_object *msm_obj)
52{
53 struct device *dev = msm_obj->base.dev->dev;
54
Dave Airlie91d0ca32020-09-29 10:18:49 +100055 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
Rob Clark3de433c2019-07-30 14:46:28 -070056}
57
58static void sync_for_cpu(struct msm_gem_object *msm_obj)
59{
60 struct device *dev = msm_obj->base.dev->dev;
61
Dave Airlie91d0ca32020-09-29 10:18:49 +100062 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
Rob Clark3de433c2019-07-30 14:46:28 -070063}
64
Rob Clark871d8122013-11-16 12:56:06 -050065/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060066static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050067{
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 struct msm_drm_private *priv = obj->dev->dev_private;
70 dma_addr_t paddr;
71 struct page **p;
72 int ret, i;
73
Michal Hocko20981052017-05-17 14:23:12 +020074 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050075 if (!p)
76 return ERR_PTR(-ENOMEM);
77
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060078 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000079 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060080 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050081 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020082 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050083 return ERR_PTR(ret);
84 }
85
86 paddr = physaddr(obj);
87 for (i = 0; i < npages; i++) {
88 p[i] = phys_to_page(paddr);
89 paddr += PAGE_SIZE;
90 }
91
92 return p;
93}
Rob Clarkc8afe682013-06-26 12:44:06 -040094
Rob Clarkc8afe682013-06-26 12:44:06 -040095static struct page **get_pages(struct drm_gem_object *obj)
96{
97 struct msm_gem_object *msm_obj = to_msm_bo(obj);
98
Rob Clark90643a22021-04-05 10:45:24 -070099 GEM_WARN_ON(!msm_gem_is_locked(obj));
Iskren Chernev07fcad02020-12-28 23:31:31 +0200100
Rob Clarkc8afe682013-06-26 12:44:06 -0400101 if (!msm_obj->pages) {
102 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -0500103 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -0400104 int npages = obj->size >> PAGE_SHIFT;
105
Rob Clark072f1f92015-03-03 15:04:25 -0500106 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200107 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500108 else
109 p = get_pages_vram(obj, npages);
110
Rob Clarkc8afe682013-06-26 12:44:06 -0400111 if (IS_ERR(p)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
Rob Clarkc8afe682013-06-26 12:44:06 -0400113 PTR_ERR(p));
114 return p;
115 }
116
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530117 msm_obj->pages = p;
118
Gerd Hoffmann707d5612020-09-07 13:24:25 +0200119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +0800120 if (IS_ERR(msm_obj->sgt)) {
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530121 void *ptr = ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400122
Mamta Shukla6a41da12018-10-20 23:19:26 +0530123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530124 msm_obj->sgt = NULL;
125 return ptr;
126 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400127
128 /* For non-cached buffers, ensure the new pages are clean
129 * because display controller, GPU, etc. are not coherent:
130 */
131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Rob Clark3de433c2019-07-30 14:46:28 -0700132 sync_for_device(msm_obj);
Rob Clark64fcbde2021-04-05 10:45:29 -0700133
134 GEM_WARN_ON(msm_obj->active_count);
135 update_inactive(msm_obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400136 }
137
138 return msm_obj->pages;
139}
140
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600141static void put_pages_vram(struct drm_gem_object *obj)
142{
143 struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 struct msm_drm_private *priv = obj->dev->dev_private;
145
146 spin_lock(&priv->vram.lock);
147 drm_mm_remove_node(msm_obj->vram_node);
148 spin_unlock(&priv->vram.lock);
149
150 kvfree(msm_obj->pages);
151}
152
Rob Clarkc8afe682013-06-26 12:44:06 -0400153static void put_pages(struct drm_gem_object *obj)
154{
155 struct msm_gem_object *msm_obj = to_msm_bo(obj);
156
157 if (msm_obj->pages) {
Ben Hutchings39766262018-04-03 23:38:45 +0100158 if (msm_obj->sgt) {
159 /* For non-cached buffers, ensure the new
160 * pages are clean because display controller,
161 * GPU, etc. are not coherent:
162 */
163 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Rob Clark3de433c2019-07-30 14:46:28 -0700164 sync_for_cpu(msm_obj);
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530165
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530166 sg_free_table(msm_obj->sgt);
Ben Hutchings39766262018-04-03 23:38:45 +0100167 kfree(msm_obj->sgt);
Rob Clarkb9a31d02021-04-05 10:45:26 -0700168 msm_obj->sgt = NULL;
Ben Hutchings39766262018-04-03 23:38:45 +0100169 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400170
Rob Clark072f1f92015-03-03 15:04:25 -0500171 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500172 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600173 else
174 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500175
Rob Clarkc8afe682013-06-26 12:44:06 -0400176 msm_obj->pages = NULL;
177 }
178}
179
Rob Clark05b84912013-09-28 11:28:35 -0400180struct page **msm_gem_get_pages(struct drm_gem_object *obj)
181{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600182 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400183 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600184
Rob Clarka6ae74c2020-10-23 09:51:03 -0700185 msm_gem_lock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600186
Rob Clark90643a22021-04-05 10:45:24 -0700187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
Rob Clarka6ae74c2020-10-23 09:51:03 -0700188 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600189 return ERR_PTR(-EBUSY);
190 }
191
Rob Clark05b84912013-09-28 11:28:35 -0400192 p = get_pages(obj);
Rob Clark10f76162021-04-26 16:53:25 -0700193
194 if (!IS_ERR(p)) {
195 msm_obj->pin_count++;
196 update_inactive(msm_obj);
197 }
198
Rob Clarka6ae74c2020-10-23 09:51:03 -0700199 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400200 return p;
201}
202
203void msm_gem_put_pages(struct drm_gem_object *obj)
204{
Rob Clark10f76162021-04-26 16:53:25 -0700205 struct msm_gem_object *msm_obj = to_msm_bo(obj);
206
207 msm_gem_lock(obj);
208 msm_obj->pin_count--;
209 GEM_WARN_ON(msm_obj->pin_count < 0);
210 update_inactive(msm_obj);
211 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400212}
213
Rob Clarkc8afe682013-06-26 12:44:06 -0400214int msm_gem_mmap_obj(struct drm_gem_object *obj,
215 struct vm_area_struct *vma)
216{
217 struct msm_gem_object *msm_obj = to_msm_bo(obj);
218
219 vma->vm_flags &= ~VM_PFNMAP;
220 vma->vm_flags |= VM_MIXEDMAP;
221
222 if (msm_obj->flags & MSM_BO_WC) {
223 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
224 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
225 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
226 } else {
227 /*
228 * Shunt off cached objs to shmem file so they have their own
229 * address_space (so unmap_mapping_range does what we want,
230 * in particular in the case of mmap'd dmabufs)
231 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400232 vma->vm_pgoff = 0;
Christian König295992f2020-09-14 15:09:33 +0200233 vma_set_file(vma, obj->filp);
Rob Clarkc8afe682013-06-26 12:44:06 -0400234
235 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
236 }
237
238 return 0;
239}
240
241int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
242{
243 int ret;
244
245 ret = drm_gem_mmap(filp, vma);
246 if (ret) {
247 DBG("mmap failed: %d", ret);
248 return ret;
249 }
250
251 return msm_gem_mmap_obj(vma->vm_private_data, vma);
252}
253
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +0200254static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400255{
Dave Jiang11bac802017-02-24 14:56:41 -0800256 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400257 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600258 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400259 struct page **pages;
260 unsigned long pfn;
261 pgoff_t pgoff;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530262 int err;
263 vm_fault_t ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400264
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600265 /*
266 * vm_ops.open/drm_gem_mmap_obj and close get and put
267 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400268 */
Rob Clarka6ae74c2020-10-23 09:51:03 -0700269 err = msm_gem_lock_interruptible(obj);
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530270 if (err) {
271 ret = VM_FAULT_NOPAGE;
Rob Clarkc8afe682013-06-26 12:44:06 -0400272 goto out;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530273 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400274
Rob Clark90643a22021-04-05 10:45:24 -0700275 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
Rob Clarka6ae74c2020-10-23 09:51:03 -0700276 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600277 return VM_FAULT_SIGBUS;
278 }
279
Rob Clarkc8afe682013-06-26 12:44:06 -0400280 /* make sure we have pages attached now */
281 pages = get_pages(obj);
282 if (IS_ERR(pages)) {
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530283 ret = vmf_error(PTR_ERR(pages));
Rob Clarkc8afe682013-06-26 12:44:06 -0400284 goto out_unlock;
285 }
286
287 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800288 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400289
Rob Clark871d8122013-11-16 12:56:06 -0500290 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400291
Jan Kara1a29d852016-12-14 15:07:01 -0800292 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400293 pfn, pfn << PAGE_SHIFT);
294
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530295 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400296out_unlock:
Rob Clarka6ae74c2020-10-23 09:51:03 -0700297 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400298out:
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530299 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400300}
301
302/** get mmap offset */
303static uint64_t mmap_offset(struct drm_gem_object *obj)
304{
305 struct drm_device *dev = obj->dev;
306 int ret;
307
Rob Clark90643a22021-04-05 10:45:24 -0700308 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarkc8afe682013-06-26 12:44:06 -0400309
310 /* Make it mmapable */
311 ret = drm_gem_create_mmap_offset(obj);
312
313 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530314 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400315 return 0;
316 }
317
318 return drm_vma_node_offset_addr(&obj->vma_node);
319}
320
321uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
322{
323 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600324
Rob Clarka6ae74c2020-10-23 09:51:03 -0700325 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400326 offset = mmap_offset(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700327 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400328 return offset;
329}
330
Rob Clark4b85f7f2017-06-13 13:54:13 -0400331static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
332 struct msm_gem_address_space *aspace)
333{
334 struct msm_gem_object *msm_obj = to_msm_bo(obj);
335 struct msm_gem_vma *vma;
336
Rob Clark90643a22021-04-05 10:45:24 -0700337 GEM_WARN_ON(!msm_gem_is_locked(obj));
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600338
Rob Clark4b85f7f2017-06-13 13:54:13 -0400339 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
340 if (!vma)
341 return ERR_PTR(-ENOMEM);
342
343 vma->aspace = aspace;
344
345 list_add_tail(&vma->list, &msm_obj->vmas);
346
347 return vma;
348}
349
350static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
351 struct msm_gem_address_space *aspace)
352{
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 struct msm_gem_vma *vma;
355
Rob Clark90643a22021-04-05 10:45:24 -0700356 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400357
358 list_for_each_entry(vma, &msm_obj->vmas, list) {
359 if (vma->aspace == aspace)
360 return vma;
361 }
362
363 return NULL;
364}
365
366static void del_vma(struct msm_gem_vma *vma)
367{
368 if (!vma)
369 return;
370
371 list_del(&vma->list);
372 kfree(vma);
373}
374
Rob Clark20d0ae22021-04-05 10:45:27 -0700375/**
376 * If close is true, this also closes the VMA (releasing the allocated
377 * iova range) in addition to removing the iommu mapping. In the eviction
378 * case (!close), we keep the iova allocated, but only remove the iommu
379 * mapping.
380 */
Rob Clark4fe5f652016-06-01 11:38:28 -0400381static void
Rob Clark20d0ae22021-04-05 10:45:27 -0700382put_iova_spaces(struct drm_gem_object *obj, bool close)
Rob Clark4fe5f652016-06-01 11:38:28 -0400383{
Rob Clark4fe5f652016-06-01 11:38:28 -0400384 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200385 struct msm_gem_vma *vma;
Rob Clark4fe5f652016-06-01 11:38:28 -0400386
Rob Clark90643a22021-04-05 10:45:24 -0700387 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4fe5f652016-06-01 11:38:28 -0400388
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200389 list_for_each_entry(vma, &msm_obj->vmas, list) {
Brian Masneyd67f1b62019-06-02 21:01:31 -0400390 if (vma->aspace) {
391 msm_gem_purge_vma(vma->aspace, vma);
Rob Clark20d0ae22021-04-05 10:45:27 -0700392 if (close)
393 msm_gem_close_vma(vma->aspace, vma);
Brian Masneyd67f1b62019-06-02 21:01:31 -0400394 }
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200395 }
396}
397
398/* Called with msm_obj locked */
399static void
400put_iova_vmas(struct drm_gem_object *obj)
Rob Clark4fe5f652016-06-01 11:38:28 -0400401{
402 struct msm_gem_object *msm_obj = to_msm_bo(obj);
403 struct msm_gem_vma *vma, *tmp;
404
Rob Clark90643a22021-04-05 10:45:24 -0700405 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4fe5f652016-06-01 11:38:28 -0400406
Rob Clark4b85f7f2017-06-13 13:54:13 -0400407 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400408 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400409 }
410}
411
Rob Clark8117e5e2020-10-23 09:51:04 -0700412static int get_iova_locked(struct drm_gem_object *obj,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400413 struct msm_gem_address_space *aspace, uint64_t *iova,
414 u64 range_start, u64 range_end)
Rob Clarkc8afe682013-06-26 12:44:06 -0400415{
Rob Clark4b85f7f2017-06-13 13:54:13 -0400416 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400417 int ret = 0;
418
Rob Clark90643a22021-04-05 10:45:24 -0700419 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarkcb1e3812017-06-13 09:15:36 -0400420
Rob Clark4b85f7f2017-06-13 13:54:13 -0400421 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500422
Rob Clark4b85f7f2017-06-13 13:54:13 -0400423 if (!vma) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400424 vma = add_vma(obj, aspace);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700425 if (IS_ERR(vma))
426 return PTR_ERR(vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400427
Jonathan Marekd3b88772020-04-23 17:09:13 -0400428 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
429 range_start, range_end);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700430 if (ret) {
431 del_vma(vma);
432 return ret;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400433 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400434 }
435
Rob Clark4b85f7f2017-06-13 13:54:13 -0400436 *iova = vma->iova;
437 return 0;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700438}
Rob Clark4b85f7f2017-06-13 13:54:13 -0400439
Jordan Crousec0ee9792018-11-07 15:35:48 -0700440static int msm_gem_pin_iova(struct drm_gem_object *obj,
441 struct msm_gem_address_space *aspace)
442{
443 struct msm_gem_object *msm_obj = to_msm_bo(obj);
444 struct msm_gem_vma *vma;
445 struct page **pages;
Rob Clark64fcbde2021-04-05 10:45:29 -0700446 int ret, prot = IOMMU_READ;
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500447
448 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
449 prot |= IOMMU_WRITE;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700450
Jonathan Marek0b462d72020-04-23 17:09:14 -0400451 if (msm_obj->flags & MSM_BO_MAP_PRIV)
452 prot |= IOMMU_PRIV;
453
Rob Clark90643a22021-04-05 10:45:24 -0700454 GEM_WARN_ON(!msm_gem_is_locked(obj));
Jordan Crousec0ee9792018-11-07 15:35:48 -0700455
Rob Clark90643a22021-04-05 10:45:24 -0700456 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
Jordan Crousec0ee9792018-11-07 15:35:48 -0700457 return -EBUSY;
458
459 vma = lookup_vma(obj, aspace);
Rob Clark90643a22021-04-05 10:45:24 -0700460 if (GEM_WARN_ON(!vma))
Jordan Crousec0ee9792018-11-07 15:35:48 -0700461 return -EINVAL;
462
463 pages = get_pages(obj);
464 if (IS_ERR(pages))
465 return PTR_ERR(pages);
466
Rob Clark64fcbde2021-04-05 10:45:29 -0700467 ret = msm_gem_map_vma(aspace, vma, prot,
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500468 msm_obj->sgt, obj->size >> PAGE_SHIFT);
Rob Clark64fcbde2021-04-05 10:45:29 -0700469
470 if (!ret)
471 msm_obj->pin_count++;
472
473 return ret;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700474}
475
Rob Clarke4b87d22020-10-23 09:51:06 -0700476static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400477 struct msm_gem_address_space *aspace, uint64_t *iova,
478 u64 range_start, u64 range_end)
Jordan Crousec0ee9792018-11-07 15:35:48 -0700479{
Jordan Crousec0ee9792018-11-07 15:35:48 -0700480 u64 local;
481 int ret;
482
Rob Clark90643a22021-04-05 10:45:24 -0700483 GEM_WARN_ON(!msm_gem_is_locked(obj));
Jordan Crousec0ee9792018-11-07 15:35:48 -0700484
Rob Clark8117e5e2020-10-23 09:51:04 -0700485 ret = get_iova_locked(obj, aspace, &local,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400486 range_start, range_end);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700487
488 if (!ret)
489 ret = msm_gem_pin_iova(obj, aspace);
490
491 if (!ret)
492 *iova = local;
493
Rob Clarkc8afe682013-06-26 12:44:06 -0400494 return ret;
495}
496
Rob Clarke4b87d22020-10-23 09:51:06 -0700497/*
498 * get iova and pin it. Should have a matching put
499 * limits iova to specified range (in pages)
500 */
501int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
502 struct msm_gem_address_space *aspace, uint64_t *iova,
503 u64 range_start, u64 range_end)
504{
505 int ret;
506
507 msm_gem_lock(obj);
508 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
509 msm_gem_unlock(obj);
510
511 return ret;
512}
513
514int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
515 struct msm_gem_address_space *aspace, uint64_t *iova)
516{
517 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
518}
519
Jonathan Marekd3b88772020-04-23 17:09:13 -0400520/* get iova and pin it. Should have a matching put */
521int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
522 struct msm_gem_address_space *aspace, uint64_t *iova)
523{
524 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
525}
526
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700527/*
528 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
529 * valid for the life of the object
530 */
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700531int msm_gem_get_iova(struct drm_gem_object *obj,
532 struct msm_gem_address_space *aspace, uint64_t *iova)
533{
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700534 int ret;
535
Rob Clarka6ae74c2020-10-23 09:51:03 -0700536 msm_gem_lock(obj);
Rob Clark8117e5e2020-10-23 09:51:04 -0700537 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700538 msm_gem_unlock(obj);
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700539
540 return ret;
541}
542
Rob Clark2638d902014-11-08 09:13:37 -0500543/* get iova without taking a reference, used in places where you have
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700544 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
Rob Clark2638d902014-11-08 09:13:37 -0500545 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400546uint64_t msm_gem_iova(struct drm_gem_object *obj,
547 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500548{
Rob Clark4b85f7f2017-06-13 13:54:13 -0400549 struct msm_gem_vma *vma;
550
Rob Clarka6ae74c2020-10-23 09:51:03 -0700551 msm_gem_lock(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400552 vma = lookup_vma(obj, aspace);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700553 msm_gem_unlock(obj);
Rob Clark90643a22021-04-05 10:45:24 -0700554 GEM_WARN_ON(!vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400555
556 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500557}
558
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700559/*
Rob Clarke4b87d22020-10-23 09:51:06 -0700560 * Locked variant of msm_gem_unpin_iova()
561 */
562void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
563 struct msm_gem_address_space *aspace)
564{
Rob Clark64fcbde2021-04-05 10:45:29 -0700565 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarke4b87d22020-10-23 09:51:06 -0700566 struct msm_gem_vma *vma;
567
Rob Clark90643a22021-04-05 10:45:24 -0700568 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarke4b87d22020-10-23 09:51:06 -0700569
570 vma = lookup_vma(obj, aspace);
571
Rob Clark64fcbde2021-04-05 10:45:29 -0700572 if (!GEM_WARN_ON(!vma)) {
Rob Clarke4b87d22020-10-23 09:51:06 -0700573 msm_gem_unmap_vma(aspace, vma);
Rob Clark64fcbde2021-04-05 10:45:29 -0700574
575 msm_obj->pin_count--;
576 GEM_WARN_ON(msm_obj->pin_count < 0);
577
578 update_inactive(msm_obj);
579 }
Rob Clarke4b87d22020-10-23 09:51:06 -0700580}
581
582/*
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700583 * Unpin a iova by updating the reference counts. The memory isn't actually
584 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
585 * to get rid of it
586 */
587void msm_gem_unpin_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400588 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400589{
Rob Clarka6ae74c2020-10-23 09:51:03 -0700590 msm_gem_lock(obj);
Rob Clarke4b87d22020-10-23 09:51:06 -0700591 msm_gem_unpin_iova_locked(obj, aspace);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700592 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400593}
594
595int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
596 struct drm_mode_create_dumb *args)
597{
598 args->pitch = align_pitch(args->width, args->bpp);
599 args->size = PAGE_ALIGN(args->pitch * args->height);
600 return msm_gem_new_handle(dev, file, args->size,
Jordan Crouse0815d772018-11-07 15:35:52 -0700601 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
Rob Clarkc8afe682013-06-26 12:44:06 -0400602}
603
Rob Clarkc8afe682013-06-26 12:44:06 -0400604int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
605 uint32_t handle, uint64_t *offset)
606{
607 struct drm_gem_object *obj;
608 int ret = 0;
609
610 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100611 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400612 if (obj == NULL) {
613 ret = -ENOENT;
614 goto fail;
615 }
616
617 *offset = msm_gem_mmap_offset(obj);
618
Emil Velikovf7d33952020-05-15 10:51:04 +0100619 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400620
621fail:
622 return ret;
623}
624
Rob Clarkfad33f42017-09-15 08:38:20 -0400625static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400626{
Rob Clarke1e9db22016-05-27 11:16:28 -0400627 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600628 int ret = 0;
629
Rob Clark90643a22021-04-05 10:45:24 -0700630 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarke4b87d22020-10-23 09:51:06 -0700631
Daniel Vetter8b6b7d82020-05-14 22:11:17 +0200632 if (obj->import_attach)
633 return ERR_PTR(-ENODEV);
634
Rob Clark90643a22021-04-05 10:45:24 -0700635 if (GEM_WARN_ON(msm_obj->madv > madv)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530636 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
Rob Clarkfad33f42017-09-15 08:38:20 -0400637 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600638 return ERR_PTR(-EBUSY);
639 }
640
641 /* increment vmap_count *before* vmap() call, so shrinker can
Rob Clarka6ae74c2020-10-23 09:51:03 -0700642 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600643 * This guarantees that we won't try to msm_gem_vunmap() this
644 * same object from within the vmap() call (while we already
Rob Clarka6ae74c2020-10-23 09:51:03 -0700645 * hold msm_obj lock)
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600646 */
647 msm_obj->vmap_count++;
648
649 if (!msm_obj->vaddr) {
650 struct page **pages = get_pages(obj);
651 if (IS_ERR(pages)) {
652 ret = PTR_ERR(pages);
653 goto fail;
654 }
655 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
656 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
657 if (msm_obj->vaddr == NULL) {
658 ret = -ENOMEM;
659 goto fail;
660 }
Rob Clark10f76162021-04-26 16:53:25 -0700661
662 update_inactive(msm_obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600663 }
664
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600665 return msm_obj->vaddr;
666
667fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400668 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600669 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400670}
671
Rob Clarke4b87d22020-10-23 09:51:06 -0700672void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
673{
674 return get_vaddr(obj, MSM_MADV_WILLNEED);
675}
676
Rob Clarkfad33f42017-09-15 08:38:20 -0400677void *msm_gem_get_vaddr(struct drm_gem_object *obj)
678{
Rob Clarke4b87d22020-10-23 09:51:06 -0700679 void *ret;
680
681 msm_gem_lock(obj);
682 ret = msm_gem_get_vaddr_locked(obj);
683 msm_gem_unlock(obj);
684
685 return ret;
Rob Clarkfad33f42017-09-15 08:38:20 -0400686}
687
688/*
689 * Don't use this! It is for the very special case of dumping
690 * submits from GPU hangs or faults, were the bo may already
691 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
692 * active list.
693 */
694void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
695{
696 return get_vaddr(obj, __MSM_MADV_PURGED);
697}
698
Rob Clarke4b87d22020-10-23 09:51:06 -0700699void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
Rob Clark18f23042016-05-26 16:24:35 -0400700{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600701 struct msm_gem_object *msm_obj = to_msm_bo(obj);
702
Rob Clark90643a22021-04-05 10:45:24 -0700703 GEM_WARN_ON(!msm_gem_is_locked(obj));
704 GEM_WARN_ON(msm_obj->vmap_count < 1);
Rob Clarke4b87d22020-10-23 09:51:06 -0700705
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600706 msm_obj->vmap_count--;
Rob Clark4cd33c42016-05-17 15:44:49 -0400707}
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600708
709void msm_gem_put_vaddr(struct drm_gem_object *obj)
Rob Clark4cd33c42016-05-17 15:44:49 -0400710{
Rob Clarka6ae74c2020-10-23 09:51:03 -0700711 msm_gem_lock(obj);
Rob Clarke4b87d22020-10-23 09:51:06 -0700712 msm_gem_put_vaddr_locked(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700713 msm_gem_unlock(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400714}
715
716/* Update madvise status, returns true if not purged, else
717 * false or -errno.
718 */
719int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
720{
721 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark68209392016-05-17 16:19:32 -0400722
Rob Clarka6ae74c2020-10-23 09:51:03 -0700723 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400724
725 if (msm_obj->madv != __MSM_MADV_PURGED)
726 msm_obj->madv = madv;
727
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600728 madv = msm_obj->madv;
729
Rob Clark3edfa302020-11-16 09:48:51 -0800730 /* If the obj is inactive, we might need to move it
731 * between inactive lists
732 */
733 if (msm_obj->active_count == 0)
734 update_inactive(msm_obj);
735
Rob Clarka6ae74c2020-10-23 09:51:03 -0700736 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600737
738 return (madv != __MSM_MADV_PURGED);
Rob Clarkc8afe682013-06-26 12:44:06 -0400739}
740
Rob Clark599089c2020-10-23 09:51:07 -0700741void msm_gem_purge(struct drm_gem_object *obj)
Rob Clark68209392016-05-17 16:19:32 -0400742{
743 struct drm_device *dev = obj->dev;
744 struct msm_gem_object *msm_obj = to_msm_bo(obj);
745
Rob Clark81d4d592021-04-05 10:45:30 -0700746 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark90643a22021-04-05 10:45:24 -0700747 GEM_WARN_ON(!is_purgeable(msm_obj));
Rob Clark68209392016-05-17 16:19:32 -0400748
Rob Clark20d0ae22021-04-05 10:45:27 -0700749 /* Get rid of any iommu mapping(s): */
750 put_iova_spaces(obj, true);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600751
Rob Clark599089c2020-10-23 09:51:07 -0700752 msm_gem_vunmap(obj);
Rob Clark68209392016-05-17 16:19:32 -0400753
Rob Clark81d4d592021-04-05 10:45:30 -0700754 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
755
Rob Clark68209392016-05-17 16:19:32 -0400756 put_pages(obj);
757
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200758 put_iova_vmas(obj);
759
Rob Clark68209392016-05-17 16:19:32 -0400760 msm_obj->madv = __MSM_MADV_PURGED;
Rob Clark25ed38b2021-04-02 14:12:26 -0700761 update_inactive(msm_obj);
Rob Clark68209392016-05-17 16:19:32 -0400762
Rob Clark68209392016-05-17 16:19:32 -0400763 drm_gem_free_mmap_offset(obj);
764
765 /* Our goal here is to return as much of the memory as
766 * is possible back to the system as we are called from OOM.
767 * To do this we must instruct the shmfs to drop all of its
768 * backing pages, *now*.
769 */
770 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
771
772 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
773 0, (loff_t)-1);
774}
775
Rob Clark63f17ef2021-04-05 10:45:31 -0700776/**
777 * Unpin the backing pages and make them available to be swapped out.
778 */
779void msm_gem_evict(struct drm_gem_object *obj)
780{
781 struct drm_device *dev = obj->dev;
782 struct msm_gem_object *msm_obj = to_msm_bo(obj);
783
784 GEM_WARN_ON(!msm_gem_is_locked(obj));
785 GEM_WARN_ON(is_unevictable(msm_obj));
786 GEM_WARN_ON(!msm_obj->evictable);
787 GEM_WARN_ON(msm_obj->active_count);
788
789 /* Get rid of any iommu mapping(s): */
790 put_iova_spaces(obj, false);
791
792 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
793
794 put_pages(obj);
795
796 update_inactive(msm_obj);
797}
798
Rob Clark599089c2020-10-23 09:51:07 -0700799void msm_gem_vunmap(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400800{
801 struct msm_gem_object *msm_obj = to_msm_bo(obj);
802
Rob Clark90643a22021-04-05 10:45:24 -0700803 GEM_WARN_ON(!msm_gem_is_locked(obj));
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600804
Rob Clark90643a22021-04-05 10:45:24 -0700805 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
Rob Clarke1e9db22016-05-27 11:16:28 -0400806 return;
807
808 vunmap(msm_obj->vaddr);
809 msm_obj->vaddr = NULL;
810}
811
Rob Clarkb6295f92016-03-15 18:26:28 -0400812/* must be called before _move_to_active().. */
813int msm_gem_sync_object(struct drm_gem_object *obj,
814 struct msm_fence_context *fctx, bool exclusive)
815{
Christian König52791ee2019-08-11 10:06:32 +0200816 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100817 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400818 int i, ret;
819
Christian König60f800b2021-07-02 13:16:42 +0200820 fence = dma_resv_excl_fence(obj->resv);
821 /* don't need to wait on our own fences, since ring is fifo */
822 if (fence && (fence->context != fctx->context)) {
823 ret = dma_fence_wait(fence, true);
824 if (ret)
825 return ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400826 }
827
Christian König60f800b2021-07-02 13:16:42 +0200828 fobj = dma_resv_shared_list(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400829 if (!exclusive || !fobj)
830 return 0;
831
832 for (i = 0; i < fobj->shared_count; i++) {
833 fence = rcu_dereference_protected(fobj->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200834 dma_resv_held(obj->resv));
Rob Clarkb6295f92016-03-15 18:26:28 -0400835 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100836 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400837 if (ret)
838 return ret;
839 }
840 }
841
842 return 0;
843}
844
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530845void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
Rob Clark7198e6b2013-07-19 12:59:32 -0400846{
847 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkd9844572020-10-23 09:51:14 -0700848 struct msm_drm_private *priv = obj->dev->dev_private;
849
850 might_sleep();
Rob Clark90643a22021-04-05 10:45:24 -0700851 GEM_WARN_ON(!msm_gem_is_locked(obj));
852 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
853 GEM_WARN_ON(msm_obj->dontneed);
Rob Clark64fcbde2021-04-05 10:45:29 -0700854 GEM_WARN_ON(!msm_obj->sgt);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530855
Rob Clarkab5c54c2020-11-16 09:48:49 -0800856 if (msm_obj->active_count++ == 0) {
Rob Clarkd9844572020-10-23 09:51:14 -0700857 mutex_lock(&priv->mm_lock);
Rob Clark64fcbde2021-04-05 10:45:29 -0700858 if (msm_obj->evictable)
859 mark_unevictable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700860 list_del(&msm_obj->mm_list);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530861 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
Rob Clarkd9844572020-10-23 09:51:14 -0700862 mutex_unlock(&priv->mm_lock);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530863 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400864}
865
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530866void msm_gem_active_put(struct drm_gem_object *obj)
Rob Clark7198e6b2013-07-19 12:59:32 -0400867{
Rob Clark7198e6b2013-07-19 12:59:32 -0400868 struct msm_gem_object *msm_obj = to_msm_bo(obj);
869
Rob Clarkd9844572020-10-23 09:51:14 -0700870 might_sleep();
Rob Clark90643a22021-04-05 10:45:24 -0700871 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark7198e6b2013-07-19 12:59:32 -0400872
Rob Clarkab5c54c2020-11-16 09:48:49 -0800873 if (--msm_obj->active_count == 0) {
Rob Clark3edfa302020-11-16 09:48:51 -0800874 update_inactive(msm_obj);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530875 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400876}
877
Rob Clark3edfa302020-11-16 09:48:51 -0800878static void update_inactive(struct msm_gem_object *msm_obj)
879{
880 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
881
Rob Clark64fcbde2021-04-05 10:45:29 -0700882 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
883
884 if (msm_obj->active_count != 0)
885 return;
886
Rob Clark3edfa302020-11-16 09:48:51 -0800887 mutex_lock(&priv->mm_lock);
Rob Clark3edfa302020-11-16 09:48:51 -0800888
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700889 if (msm_obj->dontneed)
Rob Clark0054eeb2021-04-06 08:18:16 -0700890 mark_unpurgeable(msm_obj);
Rob Clark64fcbde2021-04-05 10:45:29 -0700891 if (msm_obj->evictable)
892 mark_unevictable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700893
894 list_del(&msm_obj->mm_list);
Rob Clark64fcbde2021-04-05 10:45:29 -0700895 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
Rob Clark3edfa302020-11-16 09:48:51 -0800896 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
Rob Clark64fcbde2021-04-05 10:45:29 -0700897 mark_evictable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700898 } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
Rob Clark3edfa302020-11-16 09:48:51 -0800899 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
Rob Clark0054eeb2021-04-06 08:18:16 -0700900 mark_purgeable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700901 } else {
Rob Clark64fcbde2021-04-05 10:45:29 -0700902 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
903 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700904 }
Rob Clark3edfa302020-11-16 09:48:51 -0800905
906 mutex_unlock(&priv->mm_lock);
907}
908
Rob Clarkba00c3f2016-03-16 18:18:17 -0400909int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
910{
Rob Clarkb6295f92016-03-15 18:26:28 -0400911 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100912 unsigned long remain =
913 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
914 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400915
Christian Königd3fae3b2021-06-02 13:01:15 +0200916 ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
Chris Wilsonf755e222016-08-29 08:08:26 +0100917 if (ret == 0)
918 return remain == 0 ? -EBUSY : -ETIMEDOUT;
919 else if (ret < 0)
920 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400921
Rob Clark7198e6b2013-07-19 12:59:32 -0400922 /* TODO cache maintenance */
923
Rob Clarkb6295f92016-03-15 18:26:28 -0400924 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400925}
926
927int msm_gem_cpu_fini(struct drm_gem_object *obj)
928{
929 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400930 return 0;
931}
932
933#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100934static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400935 struct seq_file *m)
936{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100937 if (!dma_fence_is_signaled(fence))
Dave Airliea3115622019-01-10 06:20:15 +1000938 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400939 fence->ops->get_driver_name(fence),
940 fence->ops->get_timeline_name(fence),
941 fence->seqno);
942}
943
Rob Clark528107c2021-03-31 18:27:21 -0700944void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
945 struct msm_gem_stats *stats)
Rob Clarkc8afe682013-06-26 12:44:06 -0400946{
Rob Clarkc8afe682013-06-26 12:44:06 -0400947 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Christian König52791ee2019-08-11 10:06:32 +0200948 struct dma_resv *robj = obj->resv;
949 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100950 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400951 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400952 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400953 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400954
Rob Clarka6ae74c2020-10-23 09:51:03 -0700955 msm_gem_lock(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400956
Rob Clark528107c2021-03-31 18:27:21 -0700957 stats->all.count++;
958 stats->all.size += obj->size;
959
960 if (is_active(msm_obj)) {
961 stats->active.count++;
962 stats->active.size += obj->size;
963 }
964
Rob Clarkf48f3562021-04-05 10:45:28 -0700965 if (msm_obj->pages) {
966 stats->resident.count++;
967 stats->resident.size += obj->size;
968 }
969
Rob Clark4cd33c42016-05-17 15:44:49 -0400970 switch (msm_obj->madv) {
971 case __MSM_MADV_PURGED:
Rob Clark528107c2021-03-31 18:27:21 -0700972 stats->purged.count++;
973 stats->purged.size += obj->size;
Rob Clark4cd33c42016-05-17 15:44:49 -0400974 madv = " purged";
975 break;
976 case MSM_MADV_DONTNEED:
Rob Clark0054eeb2021-04-06 08:18:16 -0700977 stats->purgeable.count++;
978 stats->purgeable.size += obj->size;
Rob Clark4cd33c42016-05-17 15:44:49 -0400979 madv = " purgeable";
980 break;
981 case MSM_MADV_WILLNEED:
982 default:
983 madv = "";
984 break;
985 }
986
Jordan Crouse575f0482018-11-07 15:35:49 -0700987 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
Rob Clark7198e6b2013-07-19 12:59:32 -0400988 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100989 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400990 off, msm_obj->vaddr);
991
Jordan Crouse0815d772018-11-07 15:35:52 -0700992 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
Rob Clark667ce332016-09-28 19:58:32 -0400993
Jordan Crouse575f0482018-11-07 15:35:49 -0700994 if (!list_empty(&msm_obj->vmas)) {
995
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700996 seq_puts(m, " vmas:");
Jordan Crouse575f0482018-11-07 15:35:49 -0700997
Rob Clark25faf2f2020-08-17 15:01:45 -0700998 list_for_each_entry(vma, &msm_obj->vmas, list) {
999 const char *name, *comm;
1000 if (vma->aspace) {
1001 struct msm_gem_address_space *aspace = vma->aspace;
1002 struct task_struct *task =
1003 get_pid_task(aspace->pid, PIDTYPE_PID);
1004 if (task) {
1005 comm = kstrdup(task->comm, GFP_KERNEL);
1006 } else {
1007 comm = NULL;
1008 }
1009 name = aspace->name;
1010 } else {
1011 name = comm = NULL;
1012 }
1013 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
1014 name, comm ? ":" : "", comm ? comm : "",
1015 vma->aspace, vma->iova,
1016 vma->mapped ? "mapped" : "unmapped",
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001017 vma->inuse);
Rob Clark25faf2f2020-08-17 15:01:45 -07001018 kfree(comm);
1019 }
Jordan Crouse575f0482018-11-07 15:35:49 -07001020
1021 seq_puts(m, "\n");
1022 }
Rob Clarkb6295f92016-03-15 18:26:28 -04001023
1024 rcu_read_lock();
Christian Königfb5ce732021-05-11 14:11:41 +02001025 fobj = dma_resv_shared_list(robj);
Rob Clarkb6295f92016-03-15 18:26:28 -04001026 if (fobj) {
1027 unsigned int i, shared_count = fobj->shared_count;
1028
1029 for (i = 0; i < shared_count; i++) {
1030 fence = rcu_dereference(fobj->shared[i]);
1031 describe_fence(fence, "Shared", m);
1032 }
1033 }
1034
Christian König6edbd6a2021-05-10 16:14:09 +02001035 fence = dma_resv_excl_fence(robj);
Rob Clarkb6295f92016-03-15 18:26:28 -04001036 if (fence)
1037 describe_fence(fence, "Exclusive", m);
1038 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001039
Rob Clarka6ae74c2020-10-23 09:51:03 -07001040 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001041}
1042
1043void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1044{
Rob Clark528107c2021-03-31 18:27:21 -07001045 struct msm_gem_stats stats = {};
Rob Clarkc8afe682013-06-26 12:44:06 -04001046 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -04001047
Jordan Crouse0815d772018-11-07 15:35:52 -07001048 seq_puts(m, " flags id ref offset kaddr size madv name\n");
Rob Clark6ed08972021-03-31 18:27:20 -07001049 list_for_each_entry(msm_obj, list, node) {
Rob Clarkc8afe682013-06-26 12:44:06 -04001050 struct drm_gem_object *obj = &msm_obj->base;
Jordan Crouse575f0482018-11-07 15:35:49 -07001051 seq_puts(m, " ");
Rob Clark528107c2021-03-31 18:27:21 -07001052 msm_gem_describe(obj, m, &stats);
Rob Clarkc8afe682013-06-26 12:44:06 -04001053 }
1054
Colin Ian Kingf1902c62021-04-06 14:39:39 +01001055 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -07001056 stats.all.count, stats.all.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +01001057 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -07001058 stats.active.count, stats.active.size);
Rob Clarkf48f3562021-04-05 10:45:28 -07001059 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
1060 stats.resident.count, stats.resident.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +01001061 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
Rob Clark0054eeb2021-04-06 08:18:16 -07001062 stats.purgeable.count, stats.purgeable.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +01001063 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -07001064 stats.purged.count, stats.purged.size);
Rob Clarkc8afe682013-06-26 12:44:06 -04001065}
1066#endif
1067
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001068/* don't call directly! Use drm_gem_object_put_locked() and friends */
Rob Clarkc8afe682013-06-26 12:44:06 -04001069void msm_gem_free_object(struct drm_gem_object *obj)
1070{
Rob Clarkc8afe682013-06-26 12:44:06 -04001071 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -07001072 struct drm_device *dev = obj->dev;
1073 struct msm_drm_private *priv = dev->dev_private;
1074
Rob Clark6ed08972021-03-31 18:27:20 -07001075 mutex_lock(&priv->obj_lock);
1076 list_del(&msm_obj->node);
1077 mutex_unlock(&priv->obj_lock);
1078
Rob Clarkd9844572020-10-23 09:51:14 -07001079 mutex_lock(&priv->mm_lock);
Rob Clarkcc8a4d52021-03-31 18:27:19 -07001080 if (msm_obj->dontneed)
Rob Clark0054eeb2021-04-06 08:18:16 -07001081 mark_unpurgeable(msm_obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001082 list_del(&msm_obj->mm_list);
Rob Clarkd9844572020-10-23 09:51:14 -07001083 mutex_unlock(&priv->mm_lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001084
Rob Clarka6ae74c2020-10-23 09:51:03 -07001085 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001086
1087 /* object should not be on active list: */
Rob Clark90643a22021-04-05 10:45:24 -07001088 GEM_WARN_ON(is_active(msm_obj));
Rob Clarkc8afe682013-06-26 12:44:06 -04001089
Rob Clark20d0ae22021-04-05 10:45:27 -07001090 put_iova_spaces(obj, true);
Rob Clarkc8afe682013-06-26 12:44:06 -04001091
Rob Clark05b84912013-09-28 11:28:35 -04001092 if (obj->import_attach) {
Rob Clark90643a22021-04-05 10:45:24 -07001093 GEM_WARN_ON(msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -04001094
Rob Clark05b84912013-09-28 11:28:35 -04001095 /* Don't drop the pages for imported dmabuf, as they are not
1096 * ours, just free the array we allocated:
1097 */
Jiapeng Zhongdd5d08b2021-01-26 17:51:19 +08001098 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -04001099
Rob Clark57f04812020-12-10 09:40:28 -08001100 put_iova_vmas(obj);
1101
Rob Clark6c0e3ea2020-10-23 09:51:10 -07001102 /* dma_buf_detach() grabs resv lock, so we need to unlock
1103 * prior to drm_prime_gem_destroy
1104 */
1105 msm_gem_unlock(obj);
1106
jilai wangf28730c2015-04-07 13:51:32 -04001107 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -04001108 } else {
Rob Clark599089c2020-10-23 09:51:07 -07001109 msm_gem_vunmap(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001110 put_pages(obj);
Rob Clark57f04812020-12-10 09:40:28 -08001111 put_iova_vmas(obj);
Rob Clark6c0e3ea2020-10-23 09:51:10 -07001112 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001113 }
Rob Clarkc8afe682013-06-26 12:44:06 -04001114
1115 drm_gem_object_release(obj);
1116
1117 kfree(msm_obj);
1118}
1119
1120/* convenience method to construct a GEM buffer object, and userspace handle */
1121int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Jordan Crouse0815d772018-11-07 15:35:52 -07001122 uint32_t size, uint32_t flags, uint32_t *handle,
1123 char *name)
Rob Clarkc8afe682013-06-26 12:44:06 -04001124{
1125 struct drm_gem_object *obj;
1126 int ret;
1127
Rob Clarkc8afe682013-06-26 12:44:06 -04001128 obj = msm_gem_new(dev, size, flags);
1129
Rob Clarkc8afe682013-06-26 12:44:06 -04001130 if (IS_ERR(obj))
1131 return PTR_ERR(obj);
1132
Jordan Crouse0815d772018-11-07 15:35:52 -07001133 if (name)
1134 msm_gem_object_set_name(obj, "%s", name);
1135
Rob Clarkc8afe682013-06-26 12:44:06 -04001136 ret = drm_gem_handle_create(file, obj, handle);
1137
1138 /* drop reference from allocate - handle holds it now */
Emil Velikovf7d33952020-05-15 10:51:04 +01001139 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001140
1141 return ret;
1142}
1143
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +02001144static const struct vm_operations_struct vm_ops = {
1145 .fault = msm_gem_fault,
1146 .open = drm_gem_vm_open,
1147 .close = drm_gem_vm_close,
1148};
1149
1150static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1151 .free = msm_gem_free_object,
1152 .pin = msm_gem_prime_pin,
1153 .unpin = msm_gem_prime_unpin,
1154 .get_sg_table = msm_gem_prime_get_sg_table,
1155 .vmap = msm_gem_prime_vmap,
1156 .vunmap = msm_gem_prime_vunmap,
1157 .vm_ops = &vm_ops,
1158};
1159
Rob Clark05b84912013-09-28 11:28:35 -04001160static int msm_gem_new_impl(struct drm_device *dev,
1161 uint32_t size, uint32_t flags,
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301162 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -04001163{
Rob Clarkc8afe682013-06-26 12:44:06 -04001164 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -04001165
1166 switch (flags & MSM_BO_CACHE_MASK) {
1167 case MSM_BO_UNCACHED:
1168 case MSM_BO_CACHED:
1169 case MSM_BO_WC:
1170 break;
1171 default:
Mamta Shukla6a41da12018-10-20 23:19:26 +05301172 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
Rob Clarkc8afe682013-06-26 12:44:06 -04001173 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -04001174 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -04001175 }
1176
Rob Clark667ce332016-09-28 19:58:32 -04001177 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001178 if (!msm_obj)
1179 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -04001180
1181 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -04001182 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -04001183
Rob Clark7198e6b2013-07-19 12:59:32 -04001184 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001185 INIT_LIST_HEAD(&msm_obj->vmas);
1186
Rob Clark05b84912013-09-28 11:28:35 -04001187 *obj = &msm_obj->base;
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +02001188 (*obj)->funcs = &msm_gem_object_funcs;
Rob Clark05b84912013-09-28 11:28:35 -04001189
1190 return 0;
1191}
1192
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001193static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1194 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -04001195{
Rob Clarkf4839bd2017-06-13 11:50:05 -04001196 struct msm_drm_private *priv = dev->dev_private;
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301197 struct msm_gem_object *msm_obj;
Rob Clark871d8122013-11-16 12:56:06 -05001198 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001199 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -04001200 int ret;
1201
Rob Clark05b84912013-09-28 11:28:35 -04001202 size = PAGE_ALIGN(size);
1203
Jonathan Marekc2052a42018-11-14 17:08:04 -05001204 if (!msm_use_mmu(dev))
Rob Clarkf4839bd2017-06-13 11:50:05 -04001205 use_vram = true;
Jonathan Marek86f46f22018-11-21 20:52:30 -05001206 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
Rob Clarkf4839bd2017-06-13 11:50:05 -04001207 use_vram = true;
1208
Rob Clark90643a22021-04-05 10:45:24 -07001209 if (GEM_WARN_ON(use_vram && !priv->vram.size))
Rob Clarkf4839bd2017-06-13 11:50:05 -04001210 return ERR_PTR(-EINVAL);
1211
Jordan Crouse1a5dff52017-03-07 10:02:51 -07001212 /* Disallow zero sized objects as they make the underlying
1213 * infrastructure grumpy
1214 */
1215 if (size == 0)
1216 return ERR_PTR(-EINVAL);
1217
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301218 ret = msm_gem_new_impl(dev, size, flags, &obj);
Rob Clark05b84912013-09-28 11:28:35 -04001219 if (ret)
1220 goto fail;
1221
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301222 msm_obj = to_msm_bo(obj);
1223
Rob Clarkf4839bd2017-06-13 11:50:05 -04001224 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -04001225 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001226 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +02001227
Iskren Cherneva694ffe2020-12-28 23:31:30 +02001228 drm_gem_private_object_init(dev, obj, size);
1229
Rob Clarka6ae74c2020-10-23 09:51:03 -07001230 msm_gem_lock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001231
Rob Clark4b85f7f2017-06-13 13:54:13 -04001232 vma = add_vma(obj, NULL);
Rob Clarka6ae74c2020-10-23 09:51:03 -07001233 msm_gem_unlock(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001234 if (IS_ERR(vma)) {
1235 ret = PTR_ERR(vma);
1236 goto fail;
1237 }
1238
1239 to_msm_bo(obj)->vram_node = &vma->node;
1240
Iskren Chernev07fcad02020-12-28 23:31:31 +02001241 msm_gem_lock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001242 pages = get_pages(obj);
Iskren Chernev07fcad02020-12-28 23:31:31 +02001243 msm_gem_unlock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001244 if (IS_ERR(pages)) {
1245 ret = PTR_ERR(pages);
1246 goto fail;
1247 }
Rob Clark4b85f7f2017-06-13 13:54:13 -04001248
1249 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001250 } else {
Rob Clark871d8122013-11-16 12:56:06 -05001251 ret = drm_gem_object_init(dev, obj, size);
1252 if (ret)
1253 goto fail;
Lucas Stach0abdba42019-02-28 07:23:29 +01001254 /*
1255 * Our buffers are kept pinned, so allocating them from the
1256 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1257 * See comments above new_inode() why this is required _and_
1258 * expected if you're going to pin these pages.
1259 */
1260 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
Rob Clark871d8122013-11-16 12:56:06 -05001261 }
Rob Clark05b84912013-09-28 11:28:35 -04001262
Rob Clarkd9844572020-10-23 09:51:14 -07001263 mutex_lock(&priv->mm_lock);
Rob Clark64fcbde2021-04-05 10:45:29 -07001264 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
Rob Clarkd9844572020-10-23 09:51:14 -07001265 mutex_unlock(&priv->mm_lock);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301266
Rob Clark6ed08972021-03-31 18:27:20 -07001267 mutex_lock(&priv->obj_lock);
1268 list_add_tail(&msm_obj->node, &priv->objects);
1269 mutex_unlock(&priv->obj_lock);
1270
Rob Clark05b84912013-09-28 11:28:35 -04001271 return obj;
1272
1273fail:
Rob Clarkce0a9dc02020-10-23 09:51:11 -07001274 if (struct_mutex_locked) {
1275 drm_gem_object_put_locked(obj);
1276 } else {
1277 drm_gem_object_put(obj);
1278 }
Rob Clark05b84912013-09-28 11:28:35 -04001279 return ERR_PTR(ret);
1280}
1281
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001282struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1283 uint32_t size, uint32_t flags)
1284{
1285 return _msm_gem_new(dev, size, flags, true);
1286}
1287
1288struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1289 uint32_t size, uint32_t flags)
1290{
1291 return _msm_gem_new(dev, size, flags, false);
1292}
1293
Rob Clark05b84912013-09-28 11:28:35 -04001294struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -04001295 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -04001296{
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301297 struct msm_drm_private *priv = dev->dev_private;
Rob Clark05b84912013-09-28 11:28:35 -04001298 struct msm_gem_object *msm_obj;
1299 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -04001300 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -04001301 int ret, npages;
1302
Rob Clark871d8122013-11-16 12:56:06 -05001303 /* if we don't have IOMMU, don't bother pretending we can import: */
Jonathan Marekc2052a42018-11-14 17:08:04 -05001304 if (!msm_use_mmu(dev)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301305 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
Rob Clark871d8122013-11-16 12:56:06 -05001306 return ERR_PTR(-EINVAL);
1307 }
1308
Rob Clark79f0e202016-03-16 12:40:35 -04001309 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001310
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301311 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
Rob Clark05b84912013-09-28 11:28:35 -04001312 if (ret)
1313 goto fail;
1314
1315 drm_gem_private_object_init(dev, obj, size);
1316
1317 npages = size / PAGE_SIZE;
1318
1319 msm_obj = to_msm_bo(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -07001320 msm_gem_lock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001321 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001322 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001323 if (!msm_obj->pages) {
Rob Clarka6ae74c2020-10-23 09:51:03 -07001324 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001325 ret = -ENOMEM;
1326 goto fail;
1327 }
1328
Christian Königc67e6272020-10-08 12:57:32 +02001329 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001330 if (ret) {
Rob Clarka6ae74c2020-10-23 09:51:03 -07001331 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001332 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001333 }
Rob Clark05b84912013-09-28 11:28:35 -04001334
Rob Clarka6ae74c2020-10-23 09:51:03 -07001335 msm_gem_unlock(obj);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301336
Rob Clarkd9844572020-10-23 09:51:14 -07001337 mutex_lock(&priv->mm_lock);
Rob Clark64fcbde2021-04-05 10:45:29 -07001338 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
Rob Clarkd9844572020-10-23 09:51:14 -07001339 mutex_unlock(&priv->mm_lock);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301340
Rob Clark6ed08972021-03-31 18:27:20 -07001341 mutex_lock(&priv->obj_lock);
1342 list_add_tail(&msm_obj->node, &priv->objects);
1343 mutex_unlock(&priv->obj_lock);
1344
Rob Clarkc8afe682013-06-26 12:44:06 -04001345 return obj;
1346
1347fail:
Emil Velikovf7d33952020-05-15 10:51:04 +01001348 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001349 return ERR_PTR(ret);
1350}
Jordan Crouse82232862017-07-27 10:42:40 -06001351
1352static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1353 uint32_t flags, struct msm_gem_address_space *aspace,
1354 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1355{
1356 void *vaddr;
1357 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1358 int ret;
1359
1360 if (IS_ERR(obj))
1361 return ERR_CAST(obj);
1362
1363 if (iova) {
Jordan Crouse9fe041f2018-11-07 15:35:50 -07001364 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001365 if (ret)
1366 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001367 }
1368
1369 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001370 if (IS_ERR(vaddr)) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001371 msm_gem_unpin_iova(obj, aspace);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001372 ret = PTR_ERR(vaddr);
1373 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001374 }
1375
1376 if (bo)
1377 *bo = obj;
1378
1379 return vaddr;
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001380err:
1381 if (locked)
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001382 drm_gem_object_put_locked(obj);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001383 else
Emil Velikovf7d33952020-05-15 10:51:04 +01001384 drm_gem_object_put(obj);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001385
1386 return ERR_PTR(ret);
1387
Jordan Crouse82232862017-07-27 10:42:40 -06001388}
1389
1390void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1391 uint32_t flags, struct msm_gem_address_space *aspace,
1392 struct drm_gem_object **bo, uint64_t *iova)
1393{
1394 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1395}
1396
1397void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1398 uint32_t flags, struct msm_gem_address_space *aspace,
1399 struct drm_gem_object **bo, uint64_t *iova)
1400{
1401 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1402}
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001403
1404void msm_gem_kernel_put(struct drm_gem_object *bo,
1405 struct msm_gem_address_space *aspace, bool locked)
1406{
1407 if (IS_ERR_OR_NULL(bo))
1408 return;
1409
1410 msm_gem_put_vaddr(bo);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001411 msm_gem_unpin_iova(bo, aspace);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001412
1413 if (locked)
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001414 drm_gem_object_put_locked(bo);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001415 else
Emil Velikovf7d33952020-05-15 10:51:04 +01001416 drm_gem_object_put(bo);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001417}
Jordan Crouse0815d772018-11-07 15:35:52 -07001418
1419void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1420{
1421 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1422 va_list ap;
1423
1424 if (!fmt)
1425 return;
1426
1427 va_start(ap, fmt);
1428 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1429 va_end(ap);
1430}