blob: 46d3a96dce86ef1046c154073d623521f9450ff6 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clarkc8afe682013-06-26 12:44:06 -04002/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Rob Clarkc8afe682013-06-26 12:44:06 -04005 */
6
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +02007#include <linux/dma-map-ops.h>
Rob Clarkc8afe682013-06-26 12:44:06 -04008#include <linux/spinlock.h>
9#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040010#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080011#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040012
Sam Ravnborgfeea39a2019-08-04 08:55:51 +020013#include <drm/drm_prime.h>
14
Rob Clarkc8afe682013-06-26 12:44:06 -040015#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040016#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040017#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040018#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050019#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040020
Rob Clark3edfa302020-11-16 09:48:51 -080021static void update_inactive(struct msm_gem_object *msm_obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060022
Rob Clark871d8122013-11-16 12:56:06 -050023static dma_addr_t physaddr(struct drm_gem_object *obj)
24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 priv->vram.paddr;
29}
30
Rob Clark072f1f92015-03-03 15:04:25 -050031static bool use_pages(struct drm_gem_object *obj)
32{
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
35}
36
Rob Clark3de433c2019-07-30 14:46:28 -070037/*
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
42 *
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
47 *
48 * Let this be a cautionary tail of abstraction gone wrong.
49 */
50
51static void sync_for_device(struct msm_gem_object *msm_obj)
52{
53 struct device *dev = msm_obj->base.dev->dev;
54
Dave Airlie91d0ca32020-09-29 10:18:49 +100055 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
Rob Clark3de433c2019-07-30 14:46:28 -070056}
57
58static void sync_for_cpu(struct msm_gem_object *msm_obj)
59{
60 struct device *dev = msm_obj->base.dev->dev;
61
Dave Airlie91d0ca32020-09-29 10:18:49 +100062 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
Rob Clark3de433c2019-07-30 14:46:28 -070063}
64
Rob Clark871d8122013-11-16 12:56:06 -050065/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060066static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050067{
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 struct msm_drm_private *priv = obj->dev->dev_private;
70 dma_addr_t paddr;
71 struct page **p;
72 int ret, i;
73
Michal Hocko20981052017-05-17 14:23:12 +020074 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050075 if (!p)
76 return ERR_PTR(-ENOMEM);
77
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060078 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000079 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060080 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050081 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020082 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050083 return ERR_PTR(ret);
84 }
85
86 paddr = physaddr(obj);
87 for (i = 0; i < npages; i++) {
88 p[i] = phys_to_page(paddr);
89 paddr += PAGE_SIZE;
90 }
91
92 return p;
93}
Rob Clarkc8afe682013-06-26 12:44:06 -040094
Rob Clarkc8afe682013-06-26 12:44:06 -040095static struct page **get_pages(struct drm_gem_object *obj)
96{
97 struct msm_gem_object *msm_obj = to_msm_bo(obj);
98
Rob Clark90643a22021-04-05 10:45:24 -070099 GEM_WARN_ON(!msm_gem_is_locked(obj));
Iskren Chernev07fcad02020-12-28 23:31:31 +0200100
Rob Clarkc8afe682013-06-26 12:44:06 -0400101 if (!msm_obj->pages) {
102 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -0500103 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -0400104 int npages = obj->size >> PAGE_SHIFT;
105
Rob Clark072f1f92015-03-03 15:04:25 -0500106 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200107 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500108 else
109 p = get_pages_vram(obj, npages);
110
Rob Clarkc8afe682013-06-26 12:44:06 -0400111 if (IS_ERR(p)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
Rob Clarkc8afe682013-06-26 12:44:06 -0400113 PTR_ERR(p));
114 return p;
115 }
116
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530117 msm_obj->pages = p;
118
Gerd Hoffmann707d5612020-09-07 13:24:25 +0200119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +0800120 if (IS_ERR(msm_obj->sgt)) {
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530121 void *ptr = ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400122
Mamta Shukla6a41da12018-10-20 23:19:26 +0530123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530124 msm_obj->sgt = NULL;
125 return ptr;
126 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400127
128 /* For non-cached buffers, ensure the new pages are clean
129 * because display controller, GPU, etc. are not coherent:
130 */
131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Rob Clark3de433c2019-07-30 14:46:28 -0700132 sync_for_device(msm_obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400133 }
134
135 return msm_obj->pages;
136}
137
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600138static void put_pages_vram(struct drm_gem_object *obj)
139{
140 struct msm_gem_object *msm_obj = to_msm_bo(obj);
141 struct msm_drm_private *priv = obj->dev->dev_private;
142
143 spin_lock(&priv->vram.lock);
144 drm_mm_remove_node(msm_obj->vram_node);
145 spin_unlock(&priv->vram.lock);
146
147 kvfree(msm_obj->pages);
148}
149
Rob Clarkc8afe682013-06-26 12:44:06 -0400150static void put_pages(struct drm_gem_object *obj)
151{
152 struct msm_gem_object *msm_obj = to_msm_bo(obj);
153
154 if (msm_obj->pages) {
Ben Hutchings39766262018-04-03 23:38:45 +0100155 if (msm_obj->sgt) {
156 /* For non-cached buffers, ensure the new
157 * pages are clean because display controller,
158 * GPU, etc. are not coherent:
159 */
160 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Rob Clark3de433c2019-07-30 14:46:28 -0700161 sync_for_cpu(msm_obj);
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530162
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530163 sg_free_table(msm_obj->sgt);
Ben Hutchings39766262018-04-03 23:38:45 +0100164 kfree(msm_obj->sgt);
Rob Clarkb9a31d02021-04-05 10:45:26 -0700165 msm_obj->sgt = NULL;
Ben Hutchings39766262018-04-03 23:38:45 +0100166 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400167
Rob Clark072f1f92015-03-03 15:04:25 -0500168 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500169 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600170 else
171 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500172
Rob Clarkc8afe682013-06-26 12:44:06 -0400173 msm_obj->pages = NULL;
174 }
175}
176
Rob Clark05b84912013-09-28 11:28:35 -0400177struct page **msm_gem_get_pages(struct drm_gem_object *obj)
178{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600179 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400180 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600181
Rob Clarka6ae74c2020-10-23 09:51:03 -0700182 msm_gem_lock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600183
Rob Clark90643a22021-04-05 10:45:24 -0700184 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
Rob Clarka6ae74c2020-10-23 09:51:03 -0700185 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600186 return ERR_PTR(-EBUSY);
187 }
188
Rob Clark05b84912013-09-28 11:28:35 -0400189 p = get_pages(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700190 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400191 return p;
192}
193
194void msm_gem_put_pages(struct drm_gem_object *obj)
195{
196 /* when we start tracking the pin count, then do something here */
197}
198
Rob Clarkc8afe682013-06-26 12:44:06 -0400199int msm_gem_mmap_obj(struct drm_gem_object *obj,
200 struct vm_area_struct *vma)
201{
202 struct msm_gem_object *msm_obj = to_msm_bo(obj);
203
204 vma->vm_flags &= ~VM_PFNMAP;
205 vma->vm_flags |= VM_MIXEDMAP;
206
207 if (msm_obj->flags & MSM_BO_WC) {
208 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
209 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
210 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
211 } else {
212 /*
213 * Shunt off cached objs to shmem file so they have their own
214 * address_space (so unmap_mapping_range does what we want,
215 * in particular in the case of mmap'd dmabufs)
216 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400217 vma->vm_pgoff = 0;
Christian König295992f2020-09-14 15:09:33 +0200218 vma_set_file(vma, obj->filp);
Rob Clarkc8afe682013-06-26 12:44:06 -0400219
220 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
221 }
222
223 return 0;
224}
225
226int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
227{
228 int ret;
229
230 ret = drm_gem_mmap(filp, vma);
231 if (ret) {
232 DBG("mmap failed: %d", ret);
233 return ret;
234 }
235
236 return msm_gem_mmap_obj(vma->vm_private_data, vma);
237}
238
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +0200239static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400240{
Dave Jiang11bac802017-02-24 14:56:41 -0800241 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400242 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600243 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400244 struct page **pages;
245 unsigned long pfn;
246 pgoff_t pgoff;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530247 int err;
248 vm_fault_t ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400249
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600250 /*
251 * vm_ops.open/drm_gem_mmap_obj and close get and put
252 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400253 */
Rob Clarka6ae74c2020-10-23 09:51:03 -0700254 err = msm_gem_lock_interruptible(obj);
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530255 if (err) {
256 ret = VM_FAULT_NOPAGE;
Rob Clarkc8afe682013-06-26 12:44:06 -0400257 goto out;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530258 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400259
Rob Clark90643a22021-04-05 10:45:24 -0700260 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
Rob Clarka6ae74c2020-10-23 09:51:03 -0700261 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600262 return VM_FAULT_SIGBUS;
263 }
264
Rob Clarkc8afe682013-06-26 12:44:06 -0400265 /* make sure we have pages attached now */
266 pages = get_pages(obj);
267 if (IS_ERR(pages)) {
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530268 ret = vmf_error(PTR_ERR(pages));
Rob Clarkc8afe682013-06-26 12:44:06 -0400269 goto out_unlock;
270 }
271
272 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800273 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400274
Rob Clark871d8122013-11-16 12:56:06 -0500275 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400276
Jan Kara1a29d852016-12-14 15:07:01 -0800277 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400278 pfn, pfn << PAGE_SHIFT);
279
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530280 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400281out_unlock:
Rob Clarka6ae74c2020-10-23 09:51:03 -0700282 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400283out:
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530284 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400285}
286
287/** get mmap offset */
288static uint64_t mmap_offset(struct drm_gem_object *obj)
289{
290 struct drm_device *dev = obj->dev;
291 int ret;
292
Rob Clark90643a22021-04-05 10:45:24 -0700293 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarkc8afe682013-06-26 12:44:06 -0400294
295 /* Make it mmapable */
296 ret = drm_gem_create_mmap_offset(obj);
297
298 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530299 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400300 return 0;
301 }
302
303 return drm_vma_node_offset_addr(&obj->vma_node);
304}
305
306uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
307{
308 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600309
Rob Clarka6ae74c2020-10-23 09:51:03 -0700310 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400311 offset = mmap_offset(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700312 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400313 return offset;
314}
315
Rob Clark4b85f7f2017-06-13 13:54:13 -0400316static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
317 struct msm_gem_address_space *aspace)
318{
319 struct msm_gem_object *msm_obj = to_msm_bo(obj);
320 struct msm_gem_vma *vma;
321
Rob Clark90643a22021-04-05 10:45:24 -0700322 GEM_WARN_ON(!msm_gem_is_locked(obj));
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600323
Rob Clark4b85f7f2017-06-13 13:54:13 -0400324 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
325 if (!vma)
326 return ERR_PTR(-ENOMEM);
327
328 vma->aspace = aspace;
329
330 list_add_tail(&vma->list, &msm_obj->vmas);
331
332 return vma;
333}
334
335static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
336 struct msm_gem_address_space *aspace)
337{
338 struct msm_gem_object *msm_obj = to_msm_bo(obj);
339 struct msm_gem_vma *vma;
340
Rob Clark90643a22021-04-05 10:45:24 -0700341 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400342
343 list_for_each_entry(vma, &msm_obj->vmas, list) {
344 if (vma->aspace == aspace)
345 return vma;
346 }
347
348 return NULL;
349}
350
351static void del_vma(struct msm_gem_vma *vma)
352{
353 if (!vma)
354 return;
355
356 list_del(&vma->list);
357 kfree(vma);
358}
359
Rob Clark20d0ae22021-04-05 10:45:27 -0700360/**
361 * If close is true, this also closes the VMA (releasing the allocated
362 * iova range) in addition to removing the iommu mapping. In the eviction
363 * case (!close), we keep the iova allocated, but only remove the iommu
364 * mapping.
365 */
Rob Clark4fe5f652016-06-01 11:38:28 -0400366static void
Rob Clark20d0ae22021-04-05 10:45:27 -0700367put_iova_spaces(struct drm_gem_object *obj, bool close)
Rob Clark4fe5f652016-06-01 11:38:28 -0400368{
Rob Clark4fe5f652016-06-01 11:38:28 -0400369 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200370 struct msm_gem_vma *vma;
Rob Clark4fe5f652016-06-01 11:38:28 -0400371
Rob Clark90643a22021-04-05 10:45:24 -0700372 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4fe5f652016-06-01 11:38:28 -0400373
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200374 list_for_each_entry(vma, &msm_obj->vmas, list) {
Brian Masneyd67f1b62019-06-02 21:01:31 -0400375 if (vma->aspace) {
376 msm_gem_purge_vma(vma->aspace, vma);
Rob Clark20d0ae22021-04-05 10:45:27 -0700377 if (close)
378 msm_gem_close_vma(vma->aspace, vma);
Brian Masneyd67f1b62019-06-02 21:01:31 -0400379 }
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200380 }
381}
382
383/* Called with msm_obj locked */
384static void
385put_iova_vmas(struct drm_gem_object *obj)
Rob Clark4fe5f652016-06-01 11:38:28 -0400386{
387 struct msm_gem_object *msm_obj = to_msm_bo(obj);
388 struct msm_gem_vma *vma, *tmp;
389
Rob Clark90643a22021-04-05 10:45:24 -0700390 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4fe5f652016-06-01 11:38:28 -0400391
Rob Clark4b85f7f2017-06-13 13:54:13 -0400392 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400393 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400394 }
395}
396
Rob Clark8117e5e2020-10-23 09:51:04 -0700397static int get_iova_locked(struct drm_gem_object *obj,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400398 struct msm_gem_address_space *aspace, uint64_t *iova,
399 u64 range_start, u64 range_end)
Rob Clarkc8afe682013-06-26 12:44:06 -0400400{
Rob Clark4b85f7f2017-06-13 13:54:13 -0400401 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400402 int ret = 0;
403
Rob Clark90643a22021-04-05 10:45:24 -0700404 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarkcb1e3812017-06-13 09:15:36 -0400405
Rob Clark4b85f7f2017-06-13 13:54:13 -0400406 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500407
Rob Clark4b85f7f2017-06-13 13:54:13 -0400408 if (!vma) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400409 vma = add_vma(obj, aspace);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700410 if (IS_ERR(vma))
411 return PTR_ERR(vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400412
Jonathan Marekd3b88772020-04-23 17:09:13 -0400413 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
414 range_start, range_end);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700415 if (ret) {
416 del_vma(vma);
417 return ret;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400418 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400419 }
420
Rob Clark4b85f7f2017-06-13 13:54:13 -0400421 *iova = vma->iova;
422 return 0;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700423}
Rob Clark4b85f7f2017-06-13 13:54:13 -0400424
Jordan Crousec0ee9792018-11-07 15:35:48 -0700425static int msm_gem_pin_iova(struct drm_gem_object *obj,
426 struct msm_gem_address_space *aspace)
427{
428 struct msm_gem_object *msm_obj = to_msm_bo(obj);
429 struct msm_gem_vma *vma;
430 struct page **pages;
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500431 int prot = IOMMU_READ;
432
433 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
434 prot |= IOMMU_WRITE;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700435
Jonathan Marek0b462d72020-04-23 17:09:14 -0400436 if (msm_obj->flags & MSM_BO_MAP_PRIV)
437 prot |= IOMMU_PRIV;
438
Rob Clark90643a22021-04-05 10:45:24 -0700439 GEM_WARN_ON(!msm_gem_is_locked(obj));
Jordan Crousec0ee9792018-11-07 15:35:48 -0700440
Rob Clark90643a22021-04-05 10:45:24 -0700441 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
Jordan Crousec0ee9792018-11-07 15:35:48 -0700442 return -EBUSY;
443
444 vma = lookup_vma(obj, aspace);
Rob Clark90643a22021-04-05 10:45:24 -0700445 if (GEM_WARN_ON(!vma))
Jordan Crousec0ee9792018-11-07 15:35:48 -0700446 return -EINVAL;
447
448 pages = get_pages(obj);
449 if (IS_ERR(pages))
450 return PTR_ERR(pages);
451
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500452 return msm_gem_map_vma(aspace, vma, prot,
453 msm_obj->sgt, obj->size >> PAGE_SHIFT);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700454}
455
Rob Clarke4b87d22020-10-23 09:51:06 -0700456static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400457 struct msm_gem_address_space *aspace, uint64_t *iova,
458 u64 range_start, u64 range_end)
Jordan Crousec0ee9792018-11-07 15:35:48 -0700459{
Jordan Crousec0ee9792018-11-07 15:35:48 -0700460 u64 local;
461 int ret;
462
Rob Clark90643a22021-04-05 10:45:24 -0700463 GEM_WARN_ON(!msm_gem_is_locked(obj));
Jordan Crousec0ee9792018-11-07 15:35:48 -0700464
Rob Clark8117e5e2020-10-23 09:51:04 -0700465 ret = get_iova_locked(obj, aspace, &local,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400466 range_start, range_end);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700467
468 if (!ret)
469 ret = msm_gem_pin_iova(obj, aspace);
470
471 if (!ret)
472 *iova = local;
473
Rob Clarkc8afe682013-06-26 12:44:06 -0400474 return ret;
475}
476
Rob Clarke4b87d22020-10-23 09:51:06 -0700477/*
478 * get iova and pin it. Should have a matching put
479 * limits iova to specified range (in pages)
480 */
481int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
482 struct msm_gem_address_space *aspace, uint64_t *iova,
483 u64 range_start, u64 range_end)
484{
485 int ret;
486
487 msm_gem_lock(obj);
488 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
489 msm_gem_unlock(obj);
490
491 return ret;
492}
493
494int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
495 struct msm_gem_address_space *aspace, uint64_t *iova)
496{
497 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
498}
499
Jonathan Marekd3b88772020-04-23 17:09:13 -0400500/* get iova and pin it. Should have a matching put */
501int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
502 struct msm_gem_address_space *aspace, uint64_t *iova)
503{
504 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
505}
506
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700507/*
508 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
509 * valid for the life of the object
510 */
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700511int msm_gem_get_iova(struct drm_gem_object *obj,
512 struct msm_gem_address_space *aspace, uint64_t *iova)
513{
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700514 int ret;
515
Rob Clarka6ae74c2020-10-23 09:51:03 -0700516 msm_gem_lock(obj);
Rob Clark8117e5e2020-10-23 09:51:04 -0700517 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700518 msm_gem_unlock(obj);
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700519
520 return ret;
521}
522
Rob Clark2638d902014-11-08 09:13:37 -0500523/* get iova without taking a reference, used in places where you have
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700524 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
Rob Clark2638d902014-11-08 09:13:37 -0500525 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400526uint64_t msm_gem_iova(struct drm_gem_object *obj,
527 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500528{
Rob Clark4b85f7f2017-06-13 13:54:13 -0400529 struct msm_gem_vma *vma;
530
Rob Clarka6ae74c2020-10-23 09:51:03 -0700531 msm_gem_lock(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400532 vma = lookup_vma(obj, aspace);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700533 msm_gem_unlock(obj);
Rob Clark90643a22021-04-05 10:45:24 -0700534 GEM_WARN_ON(!vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400535
536 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500537}
538
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700539/*
Rob Clarke4b87d22020-10-23 09:51:06 -0700540 * Locked variant of msm_gem_unpin_iova()
541 */
542void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
543 struct msm_gem_address_space *aspace)
544{
545 struct msm_gem_vma *vma;
546
Rob Clark90643a22021-04-05 10:45:24 -0700547 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarke4b87d22020-10-23 09:51:06 -0700548
549 vma = lookup_vma(obj, aspace);
550
Rob Clark90643a22021-04-05 10:45:24 -0700551 if (!GEM_WARN_ON(!vma))
Rob Clarke4b87d22020-10-23 09:51:06 -0700552 msm_gem_unmap_vma(aspace, vma);
553}
554
555/*
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700556 * Unpin a iova by updating the reference counts. The memory isn't actually
557 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
558 * to get rid of it
559 */
560void msm_gem_unpin_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400561 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400562{
Rob Clarka6ae74c2020-10-23 09:51:03 -0700563 msm_gem_lock(obj);
Rob Clarke4b87d22020-10-23 09:51:06 -0700564 msm_gem_unpin_iova_locked(obj, aspace);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700565 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400566}
567
568int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
569 struct drm_mode_create_dumb *args)
570{
571 args->pitch = align_pitch(args->width, args->bpp);
572 args->size = PAGE_ALIGN(args->pitch * args->height);
573 return msm_gem_new_handle(dev, file, args->size,
Jordan Crouse0815d772018-11-07 15:35:52 -0700574 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
Rob Clarkc8afe682013-06-26 12:44:06 -0400575}
576
Rob Clarkc8afe682013-06-26 12:44:06 -0400577int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
578 uint32_t handle, uint64_t *offset)
579{
580 struct drm_gem_object *obj;
581 int ret = 0;
582
583 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100584 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400585 if (obj == NULL) {
586 ret = -ENOENT;
587 goto fail;
588 }
589
590 *offset = msm_gem_mmap_offset(obj);
591
Emil Velikovf7d33952020-05-15 10:51:04 +0100592 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400593
594fail:
595 return ret;
596}
597
Rob Clarkfad33f42017-09-15 08:38:20 -0400598static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400599{
Rob Clarke1e9db22016-05-27 11:16:28 -0400600 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600601 int ret = 0;
602
Rob Clark90643a22021-04-05 10:45:24 -0700603 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarke4b87d22020-10-23 09:51:06 -0700604
Daniel Vetter8b6b7d82020-05-14 22:11:17 +0200605 if (obj->import_attach)
606 return ERR_PTR(-ENODEV);
607
Rob Clark90643a22021-04-05 10:45:24 -0700608 if (GEM_WARN_ON(msm_obj->madv > madv)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530609 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
Rob Clarkfad33f42017-09-15 08:38:20 -0400610 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600611 return ERR_PTR(-EBUSY);
612 }
613
614 /* increment vmap_count *before* vmap() call, so shrinker can
Rob Clarka6ae74c2020-10-23 09:51:03 -0700615 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600616 * This guarantees that we won't try to msm_gem_vunmap() this
617 * same object from within the vmap() call (while we already
Rob Clarka6ae74c2020-10-23 09:51:03 -0700618 * hold msm_obj lock)
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600619 */
620 msm_obj->vmap_count++;
621
622 if (!msm_obj->vaddr) {
623 struct page **pages = get_pages(obj);
624 if (IS_ERR(pages)) {
625 ret = PTR_ERR(pages);
626 goto fail;
627 }
628 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
629 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
630 if (msm_obj->vaddr == NULL) {
631 ret = -ENOMEM;
632 goto fail;
633 }
634 }
635
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600636 return msm_obj->vaddr;
637
638fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400639 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600640 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400641}
642
Rob Clarke4b87d22020-10-23 09:51:06 -0700643void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
644{
645 return get_vaddr(obj, MSM_MADV_WILLNEED);
646}
647
Rob Clarkfad33f42017-09-15 08:38:20 -0400648void *msm_gem_get_vaddr(struct drm_gem_object *obj)
649{
Rob Clarke4b87d22020-10-23 09:51:06 -0700650 void *ret;
651
652 msm_gem_lock(obj);
653 ret = msm_gem_get_vaddr_locked(obj);
654 msm_gem_unlock(obj);
655
656 return ret;
Rob Clarkfad33f42017-09-15 08:38:20 -0400657}
658
659/*
660 * Don't use this! It is for the very special case of dumping
661 * submits from GPU hangs or faults, were the bo may already
662 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
663 * active list.
664 */
665void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
666{
667 return get_vaddr(obj, __MSM_MADV_PURGED);
668}
669
Rob Clarke4b87d22020-10-23 09:51:06 -0700670void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
Rob Clark18f23042016-05-26 16:24:35 -0400671{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600672 struct msm_gem_object *msm_obj = to_msm_bo(obj);
673
Rob Clark90643a22021-04-05 10:45:24 -0700674 GEM_WARN_ON(!msm_gem_is_locked(obj));
675 GEM_WARN_ON(msm_obj->vmap_count < 1);
Rob Clarke4b87d22020-10-23 09:51:06 -0700676
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600677 msm_obj->vmap_count--;
Rob Clark4cd33c42016-05-17 15:44:49 -0400678}
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600679
680void msm_gem_put_vaddr(struct drm_gem_object *obj)
Rob Clark4cd33c42016-05-17 15:44:49 -0400681{
Rob Clarka6ae74c2020-10-23 09:51:03 -0700682 msm_gem_lock(obj);
Rob Clarke4b87d22020-10-23 09:51:06 -0700683 msm_gem_put_vaddr_locked(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700684 msm_gem_unlock(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400685}
686
687/* Update madvise status, returns true if not purged, else
688 * false or -errno.
689 */
690int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
691{
692 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark68209392016-05-17 16:19:32 -0400693
Rob Clarka6ae74c2020-10-23 09:51:03 -0700694 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400695
696 if (msm_obj->madv != __MSM_MADV_PURGED)
697 msm_obj->madv = madv;
698
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600699 madv = msm_obj->madv;
700
Rob Clark3edfa302020-11-16 09:48:51 -0800701 /* If the obj is inactive, we might need to move it
702 * between inactive lists
703 */
704 if (msm_obj->active_count == 0)
705 update_inactive(msm_obj);
706
Rob Clarka6ae74c2020-10-23 09:51:03 -0700707 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600708
709 return (madv != __MSM_MADV_PURGED);
Rob Clarkc8afe682013-06-26 12:44:06 -0400710}
711
Rob Clark599089c2020-10-23 09:51:07 -0700712void msm_gem_purge(struct drm_gem_object *obj)
Rob Clark68209392016-05-17 16:19:32 -0400713{
714 struct drm_device *dev = obj->dev;
715 struct msm_gem_object *msm_obj = to_msm_bo(obj);
716
Rob Clark90643a22021-04-05 10:45:24 -0700717 GEM_WARN_ON(!is_purgeable(msm_obj));
718 GEM_WARN_ON(obj->import_attach);
Rob Clark68209392016-05-17 16:19:32 -0400719
Rob Clark20d0ae22021-04-05 10:45:27 -0700720 /* Get rid of any iommu mapping(s): */
721 put_iova_spaces(obj, true);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600722
Rob Clark599089c2020-10-23 09:51:07 -0700723 msm_gem_vunmap(obj);
Rob Clark68209392016-05-17 16:19:32 -0400724
725 put_pages(obj);
726
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200727 put_iova_vmas(obj);
728
Rob Clark68209392016-05-17 16:19:32 -0400729 msm_obj->madv = __MSM_MADV_PURGED;
Rob Clark25ed38b2021-04-02 14:12:26 -0700730 update_inactive(msm_obj);
Rob Clark68209392016-05-17 16:19:32 -0400731
732 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
733 drm_gem_free_mmap_offset(obj);
734
735 /* Our goal here is to return as much of the memory as
736 * is possible back to the system as we are called from OOM.
737 * To do this we must instruct the shmfs to drop all of its
738 * backing pages, *now*.
739 */
740 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
741
742 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
743 0, (loff_t)-1);
744}
745
Rob Clark599089c2020-10-23 09:51:07 -0700746void msm_gem_vunmap(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400747{
748 struct msm_gem_object *msm_obj = to_msm_bo(obj);
749
Rob Clark90643a22021-04-05 10:45:24 -0700750 GEM_WARN_ON(!msm_gem_is_locked(obj));
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600751
Rob Clark90643a22021-04-05 10:45:24 -0700752 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
Rob Clarke1e9db22016-05-27 11:16:28 -0400753 return;
754
755 vunmap(msm_obj->vaddr);
756 msm_obj->vaddr = NULL;
757}
758
Rob Clarkb6295f92016-03-15 18:26:28 -0400759/* must be called before _move_to_active().. */
760int msm_gem_sync_object(struct drm_gem_object *obj,
761 struct msm_fence_context *fctx, bool exclusive)
762{
Christian König52791ee2019-08-11 10:06:32 +0200763 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100764 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400765 int i, ret;
766
Christian König52791ee2019-08-11 10:06:32 +0200767 fobj = dma_resv_get_list(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400768 if (!fobj || (fobj->shared_count == 0)) {
Christian König52791ee2019-08-11 10:06:32 +0200769 fence = dma_resv_get_excl(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400770 /* don't need to wait on our own fences, since ring is fifo */
771 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100772 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400773 if (ret)
774 return ret;
775 }
776 }
777
778 if (!exclusive || !fobj)
779 return 0;
780
781 for (i = 0; i < fobj->shared_count; i++) {
782 fence = rcu_dereference_protected(fobj->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200783 dma_resv_held(obj->resv));
Rob Clarkb6295f92016-03-15 18:26:28 -0400784 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100785 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400786 if (ret)
787 return ret;
788 }
789 }
790
791 return 0;
792}
793
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530794void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
Rob Clark7198e6b2013-07-19 12:59:32 -0400795{
796 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkd9844572020-10-23 09:51:14 -0700797 struct msm_drm_private *priv = obj->dev->dev_private;
798
799 might_sleep();
Rob Clark90643a22021-04-05 10:45:24 -0700800 GEM_WARN_ON(!msm_gem_is_locked(obj));
801 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
802 GEM_WARN_ON(msm_obj->dontneed);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530803
Rob Clarkab5c54c2020-11-16 09:48:49 -0800804 if (msm_obj->active_count++ == 0) {
Rob Clarkd9844572020-10-23 09:51:14 -0700805 mutex_lock(&priv->mm_lock);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700806 list_del(&msm_obj->mm_list);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530807 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
Rob Clarkd9844572020-10-23 09:51:14 -0700808 mutex_unlock(&priv->mm_lock);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530809 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400810}
811
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530812void msm_gem_active_put(struct drm_gem_object *obj)
Rob Clark7198e6b2013-07-19 12:59:32 -0400813{
Rob Clark7198e6b2013-07-19 12:59:32 -0400814 struct msm_gem_object *msm_obj = to_msm_bo(obj);
815
Rob Clarkd9844572020-10-23 09:51:14 -0700816 might_sleep();
Rob Clark90643a22021-04-05 10:45:24 -0700817 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark7198e6b2013-07-19 12:59:32 -0400818
Rob Clarkab5c54c2020-11-16 09:48:49 -0800819 if (--msm_obj->active_count == 0) {
Rob Clark3edfa302020-11-16 09:48:51 -0800820 update_inactive(msm_obj);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530821 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400822}
823
Rob Clark3edfa302020-11-16 09:48:51 -0800824static void update_inactive(struct msm_gem_object *msm_obj)
825{
826 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
827
828 mutex_lock(&priv->mm_lock);
Rob Clark90643a22021-04-05 10:45:24 -0700829 GEM_WARN_ON(msm_obj->active_count != 0);
Rob Clark3edfa302020-11-16 09:48:51 -0800830
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700831 if (msm_obj->dontneed)
Rob Clark0054eeb2021-04-06 08:18:16 -0700832 mark_unpurgeable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700833
834 list_del(&msm_obj->mm_list);
835 if (msm_obj->madv == MSM_MADV_WILLNEED) {
Rob Clark3edfa302020-11-16 09:48:51 -0800836 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700837 } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
Rob Clark3edfa302020-11-16 09:48:51 -0800838 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
Rob Clark0054eeb2021-04-06 08:18:16 -0700839 mark_purgeable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700840 } else {
Rob Clark90643a22021-04-05 10:45:24 -0700841 GEM_WARN_ON(msm_obj->madv != __MSM_MADV_PURGED);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700842 list_add_tail(&msm_obj->mm_list, &priv->inactive_purged);
843 }
Rob Clark3edfa302020-11-16 09:48:51 -0800844
845 mutex_unlock(&priv->mm_lock);
846}
847
Rob Clarkba00c3f2016-03-16 18:18:17 -0400848int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
849{
Rob Clarkb6295f92016-03-15 18:26:28 -0400850 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100851 unsigned long remain =
852 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
853 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400854
Christian König52791ee2019-08-11 10:06:32 +0200855 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
Chris Wilsonf755e222016-08-29 08:08:26 +0100856 true, remain);
857 if (ret == 0)
858 return remain == 0 ? -EBUSY : -ETIMEDOUT;
859 else if (ret < 0)
860 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400861
Rob Clark7198e6b2013-07-19 12:59:32 -0400862 /* TODO cache maintenance */
863
Rob Clarkb6295f92016-03-15 18:26:28 -0400864 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400865}
866
867int msm_gem_cpu_fini(struct drm_gem_object *obj)
868{
869 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400870 return 0;
871}
872
873#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100874static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400875 struct seq_file *m)
876{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100877 if (!dma_fence_is_signaled(fence))
Dave Airliea3115622019-01-10 06:20:15 +1000878 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400879 fence->ops->get_driver_name(fence),
880 fence->ops->get_timeline_name(fence),
881 fence->seqno);
882}
883
Rob Clark528107c2021-03-31 18:27:21 -0700884void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
885 struct msm_gem_stats *stats)
Rob Clarkc8afe682013-06-26 12:44:06 -0400886{
Rob Clarkc8afe682013-06-26 12:44:06 -0400887 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Christian König52791ee2019-08-11 10:06:32 +0200888 struct dma_resv *robj = obj->resv;
889 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100890 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400891 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400892 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400893 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400894
Rob Clarka6ae74c2020-10-23 09:51:03 -0700895 msm_gem_lock(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400896
Rob Clark528107c2021-03-31 18:27:21 -0700897 stats->all.count++;
898 stats->all.size += obj->size;
899
900 if (is_active(msm_obj)) {
901 stats->active.count++;
902 stats->active.size += obj->size;
903 }
904
Rob Clark4cd33c42016-05-17 15:44:49 -0400905 switch (msm_obj->madv) {
906 case __MSM_MADV_PURGED:
Rob Clark528107c2021-03-31 18:27:21 -0700907 stats->purged.count++;
908 stats->purged.size += obj->size;
Rob Clark4cd33c42016-05-17 15:44:49 -0400909 madv = " purged";
910 break;
911 case MSM_MADV_DONTNEED:
Rob Clark0054eeb2021-04-06 08:18:16 -0700912 stats->purgeable.count++;
913 stats->purgeable.size += obj->size;
Rob Clark4cd33c42016-05-17 15:44:49 -0400914 madv = " purgeable";
915 break;
916 case MSM_MADV_WILLNEED:
917 default:
918 madv = "";
919 break;
920 }
921
Jordan Crouse575f0482018-11-07 15:35:49 -0700922 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
Rob Clark7198e6b2013-07-19 12:59:32 -0400923 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100924 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400925 off, msm_obj->vaddr);
926
Jordan Crouse0815d772018-11-07 15:35:52 -0700927 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
Rob Clark667ce332016-09-28 19:58:32 -0400928
Jordan Crouse575f0482018-11-07 15:35:49 -0700929 if (!list_empty(&msm_obj->vmas)) {
930
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700931 seq_puts(m, " vmas:");
Jordan Crouse575f0482018-11-07 15:35:49 -0700932
Rob Clark25faf2f2020-08-17 15:01:45 -0700933 list_for_each_entry(vma, &msm_obj->vmas, list) {
934 const char *name, *comm;
935 if (vma->aspace) {
936 struct msm_gem_address_space *aspace = vma->aspace;
937 struct task_struct *task =
938 get_pid_task(aspace->pid, PIDTYPE_PID);
939 if (task) {
940 comm = kstrdup(task->comm, GFP_KERNEL);
941 } else {
942 comm = NULL;
943 }
944 name = aspace->name;
945 } else {
946 name = comm = NULL;
947 }
948 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
949 name, comm ? ":" : "", comm ? comm : "",
950 vma->aspace, vma->iova,
951 vma->mapped ? "mapped" : "unmapped",
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700952 vma->inuse);
Rob Clark25faf2f2020-08-17 15:01:45 -0700953 kfree(comm);
954 }
Jordan Crouse575f0482018-11-07 15:35:49 -0700955
956 seq_puts(m, "\n");
957 }
Rob Clarkb6295f92016-03-15 18:26:28 -0400958
959 rcu_read_lock();
960 fobj = rcu_dereference(robj->fence);
961 if (fobj) {
962 unsigned int i, shared_count = fobj->shared_count;
963
964 for (i = 0; i < shared_count; i++) {
965 fence = rcu_dereference(fobj->shared[i]);
966 describe_fence(fence, "Shared", m);
967 }
968 }
969
970 fence = rcu_dereference(robj->fence_excl);
971 if (fence)
972 describe_fence(fence, "Exclusive", m);
973 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600974
Rob Clarka6ae74c2020-10-23 09:51:03 -0700975 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400976}
977
978void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
979{
Rob Clark528107c2021-03-31 18:27:21 -0700980 struct msm_gem_stats stats = {};
Rob Clarkc8afe682013-06-26 12:44:06 -0400981 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -0400982
Jordan Crouse0815d772018-11-07 15:35:52 -0700983 seq_puts(m, " flags id ref offset kaddr size madv name\n");
Rob Clark6ed08972021-03-31 18:27:20 -0700984 list_for_each_entry(msm_obj, list, node) {
Rob Clarkc8afe682013-06-26 12:44:06 -0400985 struct drm_gem_object *obj = &msm_obj->base;
Jordan Crouse575f0482018-11-07 15:35:49 -0700986 seq_puts(m, " ");
Rob Clark528107c2021-03-31 18:27:21 -0700987 msm_gem_describe(obj, m, &stats);
Rob Clarkc8afe682013-06-26 12:44:06 -0400988 }
989
Colin Ian Kingf1902c62021-04-06 14:39:39 +0100990 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -0700991 stats.all.count, stats.all.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +0100992 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -0700993 stats.active.count, stats.active.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +0100994 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
Rob Clark0054eeb2021-04-06 08:18:16 -0700995 stats.purgeable.count, stats.purgeable.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +0100996 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -0700997 stats.purged.count, stats.purged.size);
Rob Clarkc8afe682013-06-26 12:44:06 -0400998}
999#endif
1000
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001001/* don't call directly! Use drm_gem_object_put_locked() and friends */
Rob Clarkc8afe682013-06-26 12:44:06 -04001002void msm_gem_free_object(struct drm_gem_object *obj)
1003{
Rob Clarkc8afe682013-06-26 12:44:06 -04001004 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -07001005 struct drm_device *dev = obj->dev;
1006 struct msm_drm_private *priv = dev->dev_private;
1007
Rob Clark6ed08972021-03-31 18:27:20 -07001008 mutex_lock(&priv->obj_lock);
1009 list_del(&msm_obj->node);
1010 mutex_unlock(&priv->obj_lock);
1011
Rob Clarkd9844572020-10-23 09:51:14 -07001012 mutex_lock(&priv->mm_lock);
Rob Clarkcc8a4d52021-03-31 18:27:19 -07001013 if (msm_obj->dontneed)
Rob Clark0054eeb2021-04-06 08:18:16 -07001014 mark_unpurgeable(msm_obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001015 list_del(&msm_obj->mm_list);
Rob Clarkd9844572020-10-23 09:51:14 -07001016 mutex_unlock(&priv->mm_lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001017
Rob Clarka6ae74c2020-10-23 09:51:03 -07001018 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001019
1020 /* object should not be on active list: */
Rob Clark90643a22021-04-05 10:45:24 -07001021 GEM_WARN_ON(is_active(msm_obj));
Rob Clarkc8afe682013-06-26 12:44:06 -04001022
Rob Clark20d0ae22021-04-05 10:45:27 -07001023 put_iova_spaces(obj, true);
Rob Clarkc8afe682013-06-26 12:44:06 -04001024
Rob Clark05b84912013-09-28 11:28:35 -04001025 if (obj->import_attach) {
Rob Clark90643a22021-04-05 10:45:24 -07001026 GEM_WARN_ON(msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -04001027
Rob Clark05b84912013-09-28 11:28:35 -04001028 /* Don't drop the pages for imported dmabuf, as they are not
1029 * ours, just free the array we allocated:
1030 */
Jiapeng Zhongdd5d08b2021-01-26 17:51:19 +08001031 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -04001032
Rob Clark57f04812020-12-10 09:40:28 -08001033 put_iova_vmas(obj);
1034
Rob Clark6c0e3ea2020-10-23 09:51:10 -07001035 /* dma_buf_detach() grabs resv lock, so we need to unlock
1036 * prior to drm_prime_gem_destroy
1037 */
1038 msm_gem_unlock(obj);
1039
jilai wangf28730c2015-04-07 13:51:32 -04001040 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -04001041 } else {
Rob Clark599089c2020-10-23 09:51:07 -07001042 msm_gem_vunmap(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001043 put_pages(obj);
Rob Clark57f04812020-12-10 09:40:28 -08001044 put_iova_vmas(obj);
Rob Clark6c0e3ea2020-10-23 09:51:10 -07001045 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001046 }
Rob Clarkc8afe682013-06-26 12:44:06 -04001047
1048 drm_gem_object_release(obj);
1049
1050 kfree(msm_obj);
1051}
1052
1053/* convenience method to construct a GEM buffer object, and userspace handle */
1054int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Jordan Crouse0815d772018-11-07 15:35:52 -07001055 uint32_t size, uint32_t flags, uint32_t *handle,
1056 char *name)
Rob Clarkc8afe682013-06-26 12:44:06 -04001057{
1058 struct drm_gem_object *obj;
1059 int ret;
1060
Rob Clarkc8afe682013-06-26 12:44:06 -04001061 obj = msm_gem_new(dev, size, flags);
1062
Rob Clarkc8afe682013-06-26 12:44:06 -04001063 if (IS_ERR(obj))
1064 return PTR_ERR(obj);
1065
Jordan Crouse0815d772018-11-07 15:35:52 -07001066 if (name)
1067 msm_gem_object_set_name(obj, "%s", name);
1068
Rob Clarkc8afe682013-06-26 12:44:06 -04001069 ret = drm_gem_handle_create(file, obj, handle);
1070
1071 /* drop reference from allocate - handle holds it now */
Emil Velikovf7d33952020-05-15 10:51:04 +01001072 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001073
1074 return ret;
1075}
1076
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +02001077static const struct vm_operations_struct vm_ops = {
1078 .fault = msm_gem_fault,
1079 .open = drm_gem_vm_open,
1080 .close = drm_gem_vm_close,
1081};
1082
1083static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1084 .free = msm_gem_free_object,
1085 .pin = msm_gem_prime_pin,
1086 .unpin = msm_gem_prime_unpin,
1087 .get_sg_table = msm_gem_prime_get_sg_table,
1088 .vmap = msm_gem_prime_vmap,
1089 .vunmap = msm_gem_prime_vunmap,
1090 .vm_ops = &vm_ops,
1091};
1092
Rob Clark05b84912013-09-28 11:28:35 -04001093static int msm_gem_new_impl(struct drm_device *dev,
1094 uint32_t size, uint32_t flags,
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301095 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -04001096{
Rob Clarkc8afe682013-06-26 12:44:06 -04001097 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -04001098
1099 switch (flags & MSM_BO_CACHE_MASK) {
1100 case MSM_BO_UNCACHED:
1101 case MSM_BO_CACHED:
1102 case MSM_BO_WC:
1103 break;
1104 default:
Mamta Shukla6a41da12018-10-20 23:19:26 +05301105 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
Rob Clarkc8afe682013-06-26 12:44:06 -04001106 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -04001107 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -04001108 }
1109
Rob Clark667ce332016-09-28 19:58:32 -04001110 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001111 if (!msm_obj)
1112 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -04001113
1114 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -04001115 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -04001116
Rob Clark7198e6b2013-07-19 12:59:32 -04001117 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001118 INIT_LIST_HEAD(&msm_obj->vmas);
1119
Rob Clark05b84912013-09-28 11:28:35 -04001120 *obj = &msm_obj->base;
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +02001121 (*obj)->funcs = &msm_gem_object_funcs;
Rob Clark05b84912013-09-28 11:28:35 -04001122
1123 return 0;
1124}
1125
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001126static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1127 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -04001128{
Rob Clarkf4839bd2017-06-13 11:50:05 -04001129 struct msm_drm_private *priv = dev->dev_private;
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301130 struct msm_gem_object *msm_obj;
Rob Clark871d8122013-11-16 12:56:06 -05001131 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001132 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -04001133 int ret;
1134
Rob Clark05b84912013-09-28 11:28:35 -04001135 size = PAGE_ALIGN(size);
1136
Jonathan Marekc2052a42018-11-14 17:08:04 -05001137 if (!msm_use_mmu(dev))
Rob Clarkf4839bd2017-06-13 11:50:05 -04001138 use_vram = true;
Jonathan Marek86f46f22018-11-21 20:52:30 -05001139 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
Rob Clarkf4839bd2017-06-13 11:50:05 -04001140 use_vram = true;
1141
Rob Clark90643a22021-04-05 10:45:24 -07001142 if (GEM_WARN_ON(use_vram && !priv->vram.size))
Rob Clarkf4839bd2017-06-13 11:50:05 -04001143 return ERR_PTR(-EINVAL);
1144
Jordan Crouse1a5dff52017-03-07 10:02:51 -07001145 /* Disallow zero sized objects as they make the underlying
1146 * infrastructure grumpy
1147 */
1148 if (size == 0)
1149 return ERR_PTR(-EINVAL);
1150
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301151 ret = msm_gem_new_impl(dev, size, flags, &obj);
Rob Clark05b84912013-09-28 11:28:35 -04001152 if (ret)
1153 goto fail;
1154
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301155 msm_obj = to_msm_bo(obj);
1156
Rob Clarkf4839bd2017-06-13 11:50:05 -04001157 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -04001158 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001159 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +02001160
Iskren Cherneva694ffe2020-12-28 23:31:30 +02001161 drm_gem_private_object_init(dev, obj, size);
1162
Rob Clarka6ae74c2020-10-23 09:51:03 -07001163 msm_gem_lock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001164
Rob Clark4b85f7f2017-06-13 13:54:13 -04001165 vma = add_vma(obj, NULL);
Rob Clarka6ae74c2020-10-23 09:51:03 -07001166 msm_gem_unlock(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001167 if (IS_ERR(vma)) {
1168 ret = PTR_ERR(vma);
1169 goto fail;
1170 }
1171
1172 to_msm_bo(obj)->vram_node = &vma->node;
1173
Iskren Chernev07fcad02020-12-28 23:31:31 +02001174 msm_gem_lock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001175 pages = get_pages(obj);
Iskren Chernev07fcad02020-12-28 23:31:31 +02001176 msm_gem_unlock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001177 if (IS_ERR(pages)) {
1178 ret = PTR_ERR(pages);
1179 goto fail;
1180 }
Rob Clark4b85f7f2017-06-13 13:54:13 -04001181
1182 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001183 } else {
Rob Clark871d8122013-11-16 12:56:06 -05001184 ret = drm_gem_object_init(dev, obj, size);
1185 if (ret)
1186 goto fail;
Lucas Stach0abdba42019-02-28 07:23:29 +01001187 /*
1188 * Our buffers are kept pinned, so allocating them from the
1189 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1190 * See comments above new_inode() why this is required _and_
1191 * expected if you're going to pin these pages.
1192 */
1193 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
Rob Clark871d8122013-11-16 12:56:06 -05001194 }
Rob Clark05b84912013-09-28 11:28:35 -04001195
Rob Clarkd9844572020-10-23 09:51:14 -07001196 mutex_lock(&priv->mm_lock);
Rob Clark3edfa302020-11-16 09:48:51 -08001197 /* Initially obj is idle, obj->madv == WILLNEED: */
1198 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
Rob Clarkd9844572020-10-23 09:51:14 -07001199 mutex_unlock(&priv->mm_lock);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301200
Rob Clark6ed08972021-03-31 18:27:20 -07001201 mutex_lock(&priv->obj_lock);
1202 list_add_tail(&msm_obj->node, &priv->objects);
1203 mutex_unlock(&priv->obj_lock);
1204
Rob Clark05b84912013-09-28 11:28:35 -04001205 return obj;
1206
1207fail:
Rob Clarkce0a9dc02020-10-23 09:51:11 -07001208 if (struct_mutex_locked) {
1209 drm_gem_object_put_locked(obj);
1210 } else {
1211 drm_gem_object_put(obj);
1212 }
Rob Clark05b84912013-09-28 11:28:35 -04001213 return ERR_PTR(ret);
1214}
1215
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001216struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1217 uint32_t size, uint32_t flags)
1218{
1219 return _msm_gem_new(dev, size, flags, true);
1220}
1221
1222struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1223 uint32_t size, uint32_t flags)
1224{
1225 return _msm_gem_new(dev, size, flags, false);
1226}
1227
Rob Clark05b84912013-09-28 11:28:35 -04001228struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -04001229 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -04001230{
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301231 struct msm_drm_private *priv = dev->dev_private;
Rob Clark05b84912013-09-28 11:28:35 -04001232 struct msm_gem_object *msm_obj;
1233 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -04001234 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -04001235 int ret, npages;
1236
Rob Clark871d8122013-11-16 12:56:06 -05001237 /* if we don't have IOMMU, don't bother pretending we can import: */
Jonathan Marekc2052a42018-11-14 17:08:04 -05001238 if (!msm_use_mmu(dev)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301239 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
Rob Clark871d8122013-11-16 12:56:06 -05001240 return ERR_PTR(-EINVAL);
1241 }
1242
Rob Clark79f0e202016-03-16 12:40:35 -04001243 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001244
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301245 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
Rob Clark05b84912013-09-28 11:28:35 -04001246 if (ret)
1247 goto fail;
1248
1249 drm_gem_private_object_init(dev, obj, size);
1250
1251 npages = size / PAGE_SIZE;
1252
1253 msm_obj = to_msm_bo(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -07001254 msm_gem_lock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001255 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001256 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001257 if (!msm_obj->pages) {
Rob Clarka6ae74c2020-10-23 09:51:03 -07001258 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001259 ret = -ENOMEM;
1260 goto fail;
1261 }
1262
Christian Königc67e6272020-10-08 12:57:32 +02001263 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001264 if (ret) {
Rob Clarka6ae74c2020-10-23 09:51:03 -07001265 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001266 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001267 }
Rob Clark05b84912013-09-28 11:28:35 -04001268
Rob Clarka6ae74c2020-10-23 09:51:03 -07001269 msm_gem_unlock(obj);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301270
Rob Clarkd9844572020-10-23 09:51:14 -07001271 mutex_lock(&priv->mm_lock);
Rob Clark3edfa302020-11-16 09:48:51 -08001272 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
Rob Clarkd9844572020-10-23 09:51:14 -07001273 mutex_unlock(&priv->mm_lock);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301274
Rob Clark6ed08972021-03-31 18:27:20 -07001275 mutex_lock(&priv->obj_lock);
1276 list_add_tail(&msm_obj->node, &priv->objects);
1277 mutex_unlock(&priv->obj_lock);
1278
Rob Clarkc8afe682013-06-26 12:44:06 -04001279 return obj;
1280
1281fail:
Emil Velikovf7d33952020-05-15 10:51:04 +01001282 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001283 return ERR_PTR(ret);
1284}
Jordan Crouse82232862017-07-27 10:42:40 -06001285
1286static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1287 uint32_t flags, struct msm_gem_address_space *aspace,
1288 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1289{
1290 void *vaddr;
1291 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1292 int ret;
1293
1294 if (IS_ERR(obj))
1295 return ERR_CAST(obj);
1296
1297 if (iova) {
Jordan Crouse9fe041f2018-11-07 15:35:50 -07001298 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001299 if (ret)
1300 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001301 }
1302
1303 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001304 if (IS_ERR(vaddr)) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001305 msm_gem_unpin_iova(obj, aspace);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001306 ret = PTR_ERR(vaddr);
1307 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001308 }
1309
1310 if (bo)
1311 *bo = obj;
1312
1313 return vaddr;
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001314err:
1315 if (locked)
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001316 drm_gem_object_put_locked(obj);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001317 else
Emil Velikovf7d33952020-05-15 10:51:04 +01001318 drm_gem_object_put(obj);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001319
1320 return ERR_PTR(ret);
1321
Jordan Crouse82232862017-07-27 10:42:40 -06001322}
1323
1324void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1325 uint32_t flags, struct msm_gem_address_space *aspace,
1326 struct drm_gem_object **bo, uint64_t *iova)
1327{
1328 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1329}
1330
1331void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1332 uint32_t flags, struct msm_gem_address_space *aspace,
1333 struct drm_gem_object **bo, uint64_t *iova)
1334{
1335 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1336}
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001337
1338void msm_gem_kernel_put(struct drm_gem_object *bo,
1339 struct msm_gem_address_space *aspace, bool locked)
1340{
1341 if (IS_ERR_OR_NULL(bo))
1342 return;
1343
1344 msm_gem_put_vaddr(bo);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001345 msm_gem_unpin_iova(bo, aspace);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001346
1347 if (locked)
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001348 drm_gem_object_put_locked(bo);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001349 else
Emil Velikovf7d33952020-05-15 10:51:04 +01001350 drm_gem_object_put(bo);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001351}
Jordan Crouse0815d772018-11-07 15:35:52 -07001352
1353void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1354{
1355 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1356 va_list ap;
1357
1358 if (!fmt)
1359 return;
1360
1361 va_start(ap, fmt);
1362 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1363 va_end(ap);
1364}