blob: b199942266a26502eb449e83010d01a9cbc41bf1 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clarkc8afe682013-06-26 12:44:06 -04002/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Rob Clarkc8afe682013-06-26 12:44:06 -04005 */
6
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +02007#include <linux/dma-map-ops.h>
Rob Clarkc8afe682013-06-26 12:44:06 -04008#include <linux/spinlock.h>
9#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040010#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080011#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040012
Sam Ravnborgfeea39a2019-08-04 08:55:51 +020013#include <drm/drm_prime.h>
14
Rob Clarkc8afe682013-06-26 12:44:06 -040015#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040016#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040017#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040018#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050019#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040020
Rob Clark3edfa302020-11-16 09:48:51 -080021static void update_inactive(struct msm_gem_object *msm_obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060022
Rob Clark871d8122013-11-16 12:56:06 -050023static dma_addr_t physaddr(struct drm_gem_object *obj)
24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 priv->vram.paddr;
29}
30
Rob Clark072f1f92015-03-03 15:04:25 -050031static bool use_pages(struct drm_gem_object *obj)
32{
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
35}
36
Rob Clark3de433c2019-07-30 14:46:28 -070037/*
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
42 *
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
47 *
48 * Let this be a cautionary tail of abstraction gone wrong.
49 */
50
51static void sync_for_device(struct msm_gem_object *msm_obj)
52{
53 struct device *dev = msm_obj->base.dev->dev;
54
Dave Airlie91d0ca32020-09-29 10:18:49 +100055 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
Rob Clark3de433c2019-07-30 14:46:28 -070056}
57
58static void sync_for_cpu(struct msm_gem_object *msm_obj)
59{
60 struct device *dev = msm_obj->base.dev->dev;
61
Dave Airlie91d0ca32020-09-29 10:18:49 +100062 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
Rob Clark3de433c2019-07-30 14:46:28 -070063}
64
Rob Clark871d8122013-11-16 12:56:06 -050065/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060066static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050067{
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 struct msm_drm_private *priv = obj->dev->dev_private;
70 dma_addr_t paddr;
71 struct page **p;
72 int ret, i;
73
Michal Hocko20981052017-05-17 14:23:12 +020074 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050075 if (!p)
76 return ERR_PTR(-ENOMEM);
77
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060078 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000079 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060080 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050081 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020082 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050083 return ERR_PTR(ret);
84 }
85
86 paddr = physaddr(obj);
87 for (i = 0; i < npages; i++) {
88 p[i] = phys_to_page(paddr);
89 paddr += PAGE_SIZE;
90 }
91
92 return p;
93}
Rob Clarkc8afe682013-06-26 12:44:06 -040094
Rob Clarkc8afe682013-06-26 12:44:06 -040095static struct page **get_pages(struct drm_gem_object *obj)
96{
97 struct msm_gem_object *msm_obj = to_msm_bo(obj);
98
Rob Clark90643a22021-04-05 10:45:24 -070099 GEM_WARN_ON(!msm_gem_is_locked(obj));
Iskren Chernev07fcad02020-12-28 23:31:31 +0200100
Rob Clarkc8afe682013-06-26 12:44:06 -0400101 if (!msm_obj->pages) {
102 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -0500103 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -0400104 int npages = obj->size >> PAGE_SHIFT;
105
Rob Clark072f1f92015-03-03 15:04:25 -0500106 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200107 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500108 else
109 p = get_pages_vram(obj, npages);
110
Rob Clarkc8afe682013-06-26 12:44:06 -0400111 if (IS_ERR(p)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
Rob Clarkc8afe682013-06-26 12:44:06 -0400113 PTR_ERR(p));
114 return p;
115 }
116
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530117 msm_obj->pages = p;
118
Gerd Hoffmann707d5612020-09-07 13:24:25 +0200119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +0800120 if (IS_ERR(msm_obj->sgt)) {
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530121 void *ptr = ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400122
Mamta Shukla6a41da12018-10-20 23:19:26 +0530123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530124 msm_obj->sgt = NULL;
125 return ptr;
126 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400127
128 /* For non-cached buffers, ensure the new pages are clean
129 * because display controller, GPU, etc. are not coherent:
130 */
131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Rob Clark3de433c2019-07-30 14:46:28 -0700132 sync_for_device(msm_obj);
Rob Clark64fcbde2021-04-05 10:45:29 -0700133
134 GEM_WARN_ON(msm_obj->active_count);
135 update_inactive(msm_obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400136 }
137
138 return msm_obj->pages;
139}
140
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600141static void put_pages_vram(struct drm_gem_object *obj)
142{
143 struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 struct msm_drm_private *priv = obj->dev->dev_private;
145
146 spin_lock(&priv->vram.lock);
147 drm_mm_remove_node(msm_obj->vram_node);
148 spin_unlock(&priv->vram.lock);
149
150 kvfree(msm_obj->pages);
151}
152
Rob Clarkc8afe682013-06-26 12:44:06 -0400153static void put_pages(struct drm_gem_object *obj)
154{
155 struct msm_gem_object *msm_obj = to_msm_bo(obj);
156
157 if (msm_obj->pages) {
Ben Hutchings39766262018-04-03 23:38:45 +0100158 if (msm_obj->sgt) {
159 /* For non-cached buffers, ensure the new
160 * pages are clean because display controller,
161 * GPU, etc. are not coherent:
162 */
163 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Rob Clark3de433c2019-07-30 14:46:28 -0700164 sync_for_cpu(msm_obj);
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530165
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530166 sg_free_table(msm_obj->sgt);
Ben Hutchings39766262018-04-03 23:38:45 +0100167 kfree(msm_obj->sgt);
Rob Clarkb9a31d02021-04-05 10:45:26 -0700168 msm_obj->sgt = NULL;
Ben Hutchings39766262018-04-03 23:38:45 +0100169 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400170
Rob Clark072f1f92015-03-03 15:04:25 -0500171 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500172 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600173 else
174 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500175
Rob Clarkc8afe682013-06-26 12:44:06 -0400176 msm_obj->pages = NULL;
177 }
178}
179
Rob Clark05b84912013-09-28 11:28:35 -0400180struct page **msm_gem_get_pages(struct drm_gem_object *obj)
181{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600182 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400183 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600184
Rob Clarka6ae74c2020-10-23 09:51:03 -0700185 msm_gem_lock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600186
Rob Clark90643a22021-04-05 10:45:24 -0700187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
Rob Clarka6ae74c2020-10-23 09:51:03 -0700188 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600189 return ERR_PTR(-EBUSY);
190 }
191
Rob Clark05b84912013-09-28 11:28:35 -0400192 p = get_pages(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700193 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400194 return p;
195}
196
197void msm_gem_put_pages(struct drm_gem_object *obj)
198{
199 /* when we start tracking the pin count, then do something here */
200}
201
Rob Clarkc8afe682013-06-26 12:44:06 -0400202int msm_gem_mmap_obj(struct drm_gem_object *obj,
203 struct vm_area_struct *vma)
204{
205 struct msm_gem_object *msm_obj = to_msm_bo(obj);
206
207 vma->vm_flags &= ~VM_PFNMAP;
208 vma->vm_flags |= VM_MIXEDMAP;
209
210 if (msm_obj->flags & MSM_BO_WC) {
211 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
212 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
213 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
214 } else {
215 /*
216 * Shunt off cached objs to shmem file so they have their own
217 * address_space (so unmap_mapping_range does what we want,
218 * in particular in the case of mmap'd dmabufs)
219 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400220 vma->vm_pgoff = 0;
Christian König295992f2020-09-14 15:09:33 +0200221 vma_set_file(vma, obj->filp);
Rob Clarkc8afe682013-06-26 12:44:06 -0400222
223 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
224 }
225
226 return 0;
227}
228
229int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
230{
231 int ret;
232
233 ret = drm_gem_mmap(filp, vma);
234 if (ret) {
235 DBG("mmap failed: %d", ret);
236 return ret;
237 }
238
239 return msm_gem_mmap_obj(vma->vm_private_data, vma);
240}
241
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +0200242static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400243{
Dave Jiang11bac802017-02-24 14:56:41 -0800244 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400245 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600246 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400247 struct page **pages;
248 unsigned long pfn;
249 pgoff_t pgoff;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530250 int err;
251 vm_fault_t ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400252
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600253 /*
254 * vm_ops.open/drm_gem_mmap_obj and close get and put
255 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400256 */
Rob Clarka6ae74c2020-10-23 09:51:03 -0700257 err = msm_gem_lock_interruptible(obj);
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530258 if (err) {
259 ret = VM_FAULT_NOPAGE;
Rob Clarkc8afe682013-06-26 12:44:06 -0400260 goto out;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530261 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400262
Rob Clark90643a22021-04-05 10:45:24 -0700263 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
Rob Clarka6ae74c2020-10-23 09:51:03 -0700264 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600265 return VM_FAULT_SIGBUS;
266 }
267
Rob Clarkc8afe682013-06-26 12:44:06 -0400268 /* make sure we have pages attached now */
269 pages = get_pages(obj);
270 if (IS_ERR(pages)) {
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530271 ret = vmf_error(PTR_ERR(pages));
Rob Clarkc8afe682013-06-26 12:44:06 -0400272 goto out_unlock;
273 }
274
275 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800276 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400277
Rob Clark871d8122013-11-16 12:56:06 -0500278 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400279
Jan Kara1a29d852016-12-14 15:07:01 -0800280 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400281 pfn, pfn << PAGE_SHIFT);
282
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530283 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400284out_unlock:
Rob Clarka6ae74c2020-10-23 09:51:03 -0700285 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400286out:
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530287 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400288}
289
290/** get mmap offset */
291static uint64_t mmap_offset(struct drm_gem_object *obj)
292{
293 struct drm_device *dev = obj->dev;
294 int ret;
295
Rob Clark90643a22021-04-05 10:45:24 -0700296 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarkc8afe682013-06-26 12:44:06 -0400297
298 /* Make it mmapable */
299 ret = drm_gem_create_mmap_offset(obj);
300
301 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530302 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400303 return 0;
304 }
305
306 return drm_vma_node_offset_addr(&obj->vma_node);
307}
308
309uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
310{
311 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600312
Rob Clarka6ae74c2020-10-23 09:51:03 -0700313 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400314 offset = mmap_offset(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700315 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400316 return offset;
317}
318
Rob Clark4b85f7f2017-06-13 13:54:13 -0400319static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
320 struct msm_gem_address_space *aspace)
321{
322 struct msm_gem_object *msm_obj = to_msm_bo(obj);
323 struct msm_gem_vma *vma;
324
Rob Clark90643a22021-04-05 10:45:24 -0700325 GEM_WARN_ON(!msm_gem_is_locked(obj));
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600326
Rob Clark4b85f7f2017-06-13 13:54:13 -0400327 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
328 if (!vma)
329 return ERR_PTR(-ENOMEM);
330
331 vma->aspace = aspace;
332
333 list_add_tail(&vma->list, &msm_obj->vmas);
334
335 return vma;
336}
337
338static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
339 struct msm_gem_address_space *aspace)
340{
341 struct msm_gem_object *msm_obj = to_msm_bo(obj);
342 struct msm_gem_vma *vma;
343
Rob Clark90643a22021-04-05 10:45:24 -0700344 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400345
346 list_for_each_entry(vma, &msm_obj->vmas, list) {
347 if (vma->aspace == aspace)
348 return vma;
349 }
350
351 return NULL;
352}
353
354static void del_vma(struct msm_gem_vma *vma)
355{
356 if (!vma)
357 return;
358
359 list_del(&vma->list);
360 kfree(vma);
361}
362
Rob Clark20d0ae22021-04-05 10:45:27 -0700363/**
364 * If close is true, this also closes the VMA (releasing the allocated
365 * iova range) in addition to removing the iommu mapping. In the eviction
366 * case (!close), we keep the iova allocated, but only remove the iommu
367 * mapping.
368 */
Rob Clark4fe5f652016-06-01 11:38:28 -0400369static void
Rob Clark20d0ae22021-04-05 10:45:27 -0700370put_iova_spaces(struct drm_gem_object *obj, bool close)
Rob Clark4fe5f652016-06-01 11:38:28 -0400371{
Rob Clark4fe5f652016-06-01 11:38:28 -0400372 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200373 struct msm_gem_vma *vma;
Rob Clark4fe5f652016-06-01 11:38:28 -0400374
Rob Clark90643a22021-04-05 10:45:24 -0700375 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4fe5f652016-06-01 11:38:28 -0400376
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200377 list_for_each_entry(vma, &msm_obj->vmas, list) {
Brian Masneyd67f1b62019-06-02 21:01:31 -0400378 if (vma->aspace) {
379 msm_gem_purge_vma(vma->aspace, vma);
Rob Clark20d0ae22021-04-05 10:45:27 -0700380 if (close)
381 msm_gem_close_vma(vma->aspace, vma);
Brian Masneyd67f1b62019-06-02 21:01:31 -0400382 }
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200383 }
384}
385
386/* Called with msm_obj locked */
387static void
388put_iova_vmas(struct drm_gem_object *obj)
Rob Clark4fe5f652016-06-01 11:38:28 -0400389{
390 struct msm_gem_object *msm_obj = to_msm_bo(obj);
391 struct msm_gem_vma *vma, *tmp;
392
Rob Clark90643a22021-04-05 10:45:24 -0700393 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark4fe5f652016-06-01 11:38:28 -0400394
Rob Clark4b85f7f2017-06-13 13:54:13 -0400395 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400396 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400397 }
398}
399
Rob Clark8117e5e2020-10-23 09:51:04 -0700400static int get_iova_locked(struct drm_gem_object *obj,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400401 struct msm_gem_address_space *aspace, uint64_t *iova,
402 u64 range_start, u64 range_end)
Rob Clarkc8afe682013-06-26 12:44:06 -0400403{
Rob Clark4b85f7f2017-06-13 13:54:13 -0400404 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400405 int ret = 0;
406
Rob Clark90643a22021-04-05 10:45:24 -0700407 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarkcb1e3812017-06-13 09:15:36 -0400408
Rob Clark4b85f7f2017-06-13 13:54:13 -0400409 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500410
Rob Clark4b85f7f2017-06-13 13:54:13 -0400411 if (!vma) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400412 vma = add_vma(obj, aspace);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700413 if (IS_ERR(vma))
414 return PTR_ERR(vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400415
Jonathan Marekd3b88772020-04-23 17:09:13 -0400416 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
417 range_start, range_end);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700418 if (ret) {
419 del_vma(vma);
420 return ret;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400421 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400422 }
423
Rob Clark4b85f7f2017-06-13 13:54:13 -0400424 *iova = vma->iova;
425 return 0;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700426}
Rob Clark4b85f7f2017-06-13 13:54:13 -0400427
Jordan Crousec0ee9792018-11-07 15:35:48 -0700428static int msm_gem_pin_iova(struct drm_gem_object *obj,
429 struct msm_gem_address_space *aspace)
430{
431 struct msm_gem_object *msm_obj = to_msm_bo(obj);
432 struct msm_gem_vma *vma;
433 struct page **pages;
Rob Clark64fcbde2021-04-05 10:45:29 -0700434 int ret, prot = IOMMU_READ;
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500435
436 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
437 prot |= IOMMU_WRITE;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700438
Jonathan Marek0b462d72020-04-23 17:09:14 -0400439 if (msm_obj->flags & MSM_BO_MAP_PRIV)
440 prot |= IOMMU_PRIV;
441
Rob Clark90643a22021-04-05 10:45:24 -0700442 GEM_WARN_ON(!msm_gem_is_locked(obj));
Jordan Crousec0ee9792018-11-07 15:35:48 -0700443
Rob Clark90643a22021-04-05 10:45:24 -0700444 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
Jordan Crousec0ee9792018-11-07 15:35:48 -0700445 return -EBUSY;
446
447 vma = lookup_vma(obj, aspace);
Rob Clark90643a22021-04-05 10:45:24 -0700448 if (GEM_WARN_ON(!vma))
Jordan Crousec0ee9792018-11-07 15:35:48 -0700449 return -EINVAL;
450
451 pages = get_pages(obj);
452 if (IS_ERR(pages))
453 return PTR_ERR(pages);
454
Rob Clark64fcbde2021-04-05 10:45:29 -0700455 ret = msm_gem_map_vma(aspace, vma, prot,
Rob Clarkbbc2cd02019-01-09 14:25:05 -0500456 msm_obj->sgt, obj->size >> PAGE_SHIFT);
Rob Clark64fcbde2021-04-05 10:45:29 -0700457
458 if (!ret)
459 msm_obj->pin_count++;
460
461 return ret;
Jordan Crousec0ee9792018-11-07 15:35:48 -0700462}
463
Rob Clarke4b87d22020-10-23 09:51:06 -0700464static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400465 struct msm_gem_address_space *aspace, uint64_t *iova,
466 u64 range_start, u64 range_end)
Jordan Crousec0ee9792018-11-07 15:35:48 -0700467{
Jordan Crousec0ee9792018-11-07 15:35:48 -0700468 u64 local;
469 int ret;
470
Rob Clark90643a22021-04-05 10:45:24 -0700471 GEM_WARN_ON(!msm_gem_is_locked(obj));
Jordan Crousec0ee9792018-11-07 15:35:48 -0700472
Rob Clark8117e5e2020-10-23 09:51:04 -0700473 ret = get_iova_locked(obj, aspace, &local,
Jonathan Marekd3b88772020-04-23 17:09:13 -0400474 range_start, range_end);
Jordan Crousec0ee9792018-11-07 15:35:48 -0700475
476 if (!ret)
477 ret = msm_gem_pin_iova(obj, aspace);
478
479 if (!ret)
480 *iova = local;
481
Rob Clarkc8afe682013-06-26 12:44:06 -0400482 return ret;
483}
484
Rob Clarke4b87d22020-10-23 09:51:06 -0700485/*
486 * get iova and pin it. Should have a matching put
487 * limits iova to specified range (in pages)
488 */
489int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
490 struct msm_gem_address_space *aspace, uint64_t *iova,
491 u64 range_start, u64 range_end)
492{
493 int ret;
494
495 msm_gem_lock(obj);
496 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
497 msm_gem_unlock(obj);
498
499 return ret;
500}
501
502int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
503 struct msm_gem_address_space *aspace, uint64_t *iova)
504{
505 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
506}
507
Jonathan Marekd3b88772020-04-23 17:09:13 -0400508/* get iova and pin it. Should have a matching put */
509int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
510 struct msm_gem_address_space *aspace, uint64_t *iova)
511{
512 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
513}
514
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700515/*
516 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
517 * valid for the life of the object
518 */
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700519int msm_gem_get_iova(struct drm_gem_object *obj,
520 struct msm_gem_address_space *aspace, uint64_t *iova)
521{
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700522 int ret;
523
Rob Clarka6ae74c2020-10-23 09:51:03 -0700524 msm_gem_lock(obj);
Rob Clark8117e5e2020-10-23 09:51:04 -0700525 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700526 msm_gem_unlock(obj);
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700527
528 return ret;
529}
530
Rob Clark2638d902014-11-08 09:13:37 -0500531/* get iova without taking a reference, used in places where you have
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700532 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
Rob Clark2638d902014-11-08 09:13:37 -0500533 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400534uint64_t msm_gem_iova(struct drm_gem_object *obj,
535 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500536{
Rob Clark4b85f7f2017-06-13 13:54:13 -0400537 struct msm_gem_vma *vma;
538
Rob Clarka6ae74c2020-10-23 09:51:03 -0700539 msm_gem_lock(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400540 vma = lookup_vma(obj, aspace);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700541 msm_gem_unlock(obj);
Rob Clark90643a22021-04-05 10:45:24 -0700542 GEM_WARN_ON(!vma);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400543
544 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500545}
546
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700547/*
Rob Clarke4b87d22020-10-23 09:51:06 -0700548 * Locked variant of msm_gem_unpin_iova()
549 */
550void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
551 struct msm_gem_address_space *aspace)
552{
Rob Clark64fcbde2021-04-05 10:45:29 -0700553 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarke4b87d22020-10-23 09:51:06 -0700554 struct msm_gem_vma *vma;
555
Rob Clark90643a22021-04-05 10:45:24 -0700556 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarke4b87d22020-10-23 09:51:06 -0700557
558 vma = lookup_vma(obj, aspace);
559
Rob Clark64fcbde2021-04-05 10:45:29 -0700560 if (!GEM_WARN_ON(!vma)) {
Rob Clarke4b87d22020-10-23 09:51:06 -0700561 msm_gem_unmap_vma(aspace, vma);
Rob Clark64fcbde2021-04-05 10:45:29 -0700562
563 msm_obj->pin_count--;
564 GEM_WARN_ON(msm_obj->pin_count < 0);
565
566 update_inactive(msm_obj);
567 }
Rob Clarke4b87d22020-10-23 09:51:06 -0700568}
569
570/*
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700571 * Unpin a iova by updating the reference counts. The memory isn't actually
572 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
573 * to get rid of it
574 */
575void msm_gem_unpin_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400576 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400577{
Rob Clarka6ae74c2020-10-23 09:51:03 -0700578 msm_gem_lock(obj);
Rob Clarke4b87d22020-10-23 09:51:06 -0700579 msm_gem_unpin_iova_locked(obj, aspace);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700580 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400581}
582
583int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
584 struct drm_mode_create_dumb *args)
585{
586 args->pitch = align_pitch(args->width, args->bpp);
587 args->size = PAGE_ALIGN(args->pitch * args->height);
588 return msm_gem_new_handle(dev, file, args->size,
Jordan Crouse0815d772018-11-07 15:35:52 -0700589 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
Rob Clarkc8afe682013-06-26 12:44:06 -0400590}
591
Rob Clarkc8afe682013-06-26 12:44:06 -0400592int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
593 uint32_t handle, uint64_t *offset)
594{
595 struct drm_gem_object *obj;
596 int ret = 0;
597
598 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100599 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400600 if (obj == NULL) {
601 ret = -ENOENT;
602 goto fail;
603 }
604
605 *offset = msm_gem_mmap_offset(obj);
606
Emil Velikovf7d33952020-05-15 10:51:04 +0100607 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400608
609fail:
610 return ret;
611}
612
Rob Clarkfad33f42017-09-15 08:38:20 -0400613static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400614{
Rob Clarke1e9db22016-05-27 11:16:28 -0400615 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600616 int ret = 0;
617
Rob Clark90643a22021-04-05 10:45:24 -0700618 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clarke4b87d22020-10-23 09:51:06 -0700619
Daniel Vetter8b6b7d82020-05-14 22:11:17 +0200620 if (obj->import_attach)
621 return ERR_PTR(-ENODEV);
622
Rob Clark90643a22021-04-05 10:45:24 -0700623 if (GEM_WARN_ON(msm_obj->madv > madv)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530624 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
Rob Clarkfad33f42017-09-15 08:38:20 -0400625 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600626 return ERR_PTR(-EBUSY);
627 }
628
629 /* increment vmap_count *before* vmap() call, so shrinker can
Rob Clarka6ae74c2020-10-23 09:51:03 -0700630 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600631 * This guarantees that we won't try to msm_gem_vunmap() this
632 * same object from within the vmap() call (while we already
Rob Clarka6ae74c2020-10-23 09:51:03 -0700633 * hold msm_obj lock)
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600634 */
635 msm_obj->vmap_count++;
636
637 if (!msm_obj->vaddr) {
638 struct page **pages = get_pages(obj);
639 if (IS_ERR(pages)) {
640 ret = PTR_ERR(pages);
641 goto fail;
642 }
643 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
644 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
645 if (msm_obj->vaddr == NULL) {
646 ret = -ENOMEM;
647 goto fail;
648 }
649 }
650
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600651 return msm_obj->vaddr;
652
653fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400654 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600655 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400656}
657
Rob Clarke4b87d22020-10-23 09:51:06 -0700658void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
659{
660 return get_vaddr(obj, MSM_MADV_WILLNEED);
661}
662
Rob Clarkfad33f42017-09-15 08:38:20 -0400663void *msm_gem_get_vaddr(struct drm_gem_object *obj)
664{
Rob Clarke4b87d22020-10-23 09:51:06 -0700665 void *ret;
666
667 msm_gem_lock(obj);
668 ret = msm_gem_get_vaddr_locked(obj);
669 msm_gem_unlock(obj);
670
671 return ret;
Rob Clarkfad33f42017-09-15 08:38:20 -0400672}
673
674/*
675 * Don't use this! It is for the very special case of dumping
676 * submits from GPU hangs or faults, were the bo may already
677 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
678 * active list.
679 */
680void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
681{
682 return get_vaddr(obj, __MSM_MADV_PURGED);
683}
684
Rob Clarke4b87d22020-10-23 09:51:06 -0700685void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
Rob Clark18f23042016-05-26 16:24:35 -0400686{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600687 struct msm_gem_object *msm_obj = to_msm_bo(obj);
688
Rob Clark90643a22021-04-05 10:45:24 -0700689 GEM_WARN_ON(!msm_gem_is_locked(obj));
690 GEM_WARN_ON(msm_obj->vmap_count < 1);
Rob Clarke4b87d22020-10-23 09:51:06 -0700691
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600692 msm_obj->vmap_count--;
Rob Clark4cd33c42016-05-17 15:44:49 -0400693}
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600694
695void msm_gem_put_vaddr(struct drm_gem_object *obj)
Rob Clark4cd33c42016-05-17 15:44:49 -0400696{
Rob Clarka6ae74c2020-10-23 09:51:03 -0700697 msm_gem_lock(obj);
Rob Clarke4b87d22020-10-23 09:51:06 -0700698 msm_gem_put_vaddr_locked(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -0700699 msm_gem_unlock(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400700}
701
702/* Update madvise status, returns true if not purged, else
703 * false or -errno.
704 */
705int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
706{
707 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark68209392016-05-17 16:19:32 -0400708
Rob Clarka6ae74c2020-10-23 09:51:03 -0700709 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400710
711 if (msm_obj->madv != __MSM_MADV_PURGED)
712 msm_obj->madv = madv;
713
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600714 madv = msm_obj->madv;
715
Rob Clark3edfa302020-11-16 09:48:51 -0800716 /* If the obj is inactive, we might need to move it
717 * between inactive lists
718 */
719 if (msm_obj->active_count == 0)
720 update_inactive(msm_obj);
721
Rob Clarka6ae74c2020-10-23 09:51:03 -0700722 msm_gem_unlock(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600723
724 return (madv != __MSM_MADV_PURGED);
Rob Clarkc8afe682013-06-26 12:44:06 -0400725}
726
Rob Clark599089c2020-10-23 09:51:07 -0700727void msm_gem_purge(struct drm_gem_object *obj)
Rob Clark68209392016-05-17 16:19:32 -0400728{
729 struct drm_device *dev = obj->dev;
730 struct msm_gem_object *msm_obj = to_msm_bo(obj);
731
Rob Clark81d4d592021-04-05 10:45:30 -0700732 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark90643a22021-04-05 10:45:24 -0700733 GEM_WARN_ON(!is_purgeable(msm_obj));
Rob Clark68209392016-05-17 16:19:32 -0400734
Rob Clark20d0ae22021-04-05 10:45:27 -0700735 /* Get rid of any iommu mapping(s): */
736 put_iova_spaces(obj, true);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600737
Rob Clark599089c2020-10-23 09:51:07 -0700738 msm_gem_vunmap(obj);
Rob Clark68209392016-05-17 16:19:32 -0400739
Rob Clark81d4d592021-04-05 10:45:30 -0700740 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
741
Rob Clark68209392016-05-17 16:19:32 -0400742 put_pages(obj);
743
Iskren Chernev9b73bde2020-11-26 15:02:23 +0200744 put_iova_vmas(obj);
745
Rob Clark68209392016-05-17 16:19:32 -0400746 msm_obj->madv = __MSM_MADV_PURGED;
Rob Clark25ed38b2021-04-02 14:12:26 -0700747 update_inactive(msm_obj);
Rob Clark68209392016-05-17 16:19:32 -0400748
Rob Clark68209392016-05-17 16:19:32 -0400749 drm_gem_free_mmap_offset(obj);
750
751 /* Our goal here is to return as much of the memory as
752 * is possible back to the system as we are called from OOM.
753 * To do this we must instruct the shmfs to drop all of its
754 * backing pages, *now*.
755 */
756 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
757
758 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
759 0, (loff_t)-1);
760}
761
Rob Clark63f17ef2021-04-05 10:45:31 -0700762/**
763 * Unpin the backing pages and make them available to be swapped out.
764 */
765void msm_gem_evict(struct drm_gem_object *obj)
766{
767 struct drm_device *dev = obj->dev;
768 struct msm_gem_object *msm_obj = to_msm_bo(obj);
769
770 GEM_WARN_ON(!msm_gem_is_locked(obj));
771 GEM_WARN_ON(is_unevictable(msm_obj));
772 GEM_WARN_ON(!msm_obj->evictable);
773 GEM_WARN_ON(msm_obj->active_count);
774
775 /* Get rid of any iommu mapping(s): */
776 put_iova_spaces(obj, false);
777
778 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
779
780 put_pages(obj);
781
782 update_inactive(msm_obj);
783}
784
Rob Clark599089c2020-10-23 09:51:07 -0700785void msm_gem_vunmap(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400786{
787 struct msm_gem_object *msm_obj = to_msm_bo(obj);
788
Rob Clark90643a22021-04-05 10:45:24 -0700789 GEM_WARN_ON(!msm_gem_is_locked(obj));
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600790
Rob Clark90643a22021-04-05 10:45:24 -0700791 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
Rob Clarke1e9db22016-05-27 11:16:28 -0400792 return;
793
794 vunmap(msm_obj->vaddr);
795 msm_obj->vaddr = NULL;
796}
797
Rob Clarkb6295f92016-03-15 18:26:28 -0400798/* must be called before _move_to_active().. */
799int msm_gem_sync_object(struct drm_gem_object *obj,
800 struct msm_fence_context *fctx, bool exclusive)
801{
Christian König52791ee2019-08-11 10:06:32 +0200802 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100803 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400804 int i, ret;
805
Christian König52791ee2019-08-11 10:06:32 +0200806 fobj = dma_resv_get_list(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400807 if (!fobj || (fobj->shared_count == 0)) {
Christian König52791ee2019-08-11 10:06:32 +0200808 fence = dma_resv_get_excl(obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400809 /* don't need to wait on our own fences, since ring is fifo */
810 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100811 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400812 if (ret)
813 return ret;
814 }
815 }
816
817 if (!exclusive || !fobj)
818 return 0;
819
820 for (i = 0; i < fobj->shared_count; i++) {
821 fence = rcu_dereference_protected(fobj->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200822 dma_resv_held(obj->resv));
Rob Clarkb6295f92016-03-15 18:26:28 -0400823 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100824 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400825 if (ret)
826 return ret;
827 }
828 }
829
830 return 0;
831}
832
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530833void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
Rob Clark7198e6b2013-07-19 12:59:32 -0400834{
835 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkd9844572020-10-23 09:51:14 -0700836 struct msm_drm_private *priv = obj->dev->dev_private;
837
838 might_sleep();
Rob Clark90643a22021-04-05 10:45:24 -0700839 GEM_WARN_ON(!msm_gem_is_locked(obj));
840 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
841 GEM_WARN_ON(msm_obj->dontneed);
Rob Clark64fcbde2021-04-05 10:45:29 -0700842 GEM_WARN_ON(!msm_obj->sgt);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530843
Rob Clarkab5c54c2020-11-16 09:48:49 -0800844 if (msm_obj->active_count++ == 0) {
Rob Clarkd9844572020-10-23 09:51:14 -0700845 mutex_lock(&priv->mm_lock);
Rob Clark64fcbde2021-04-05 10:45:29 -0700846 if (msm_obj->evictable)
847 mark_unevictable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700848 list_del(&msm_obj->mm_list);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530849 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
Rob Clarkd9844572020-10-23 09:51:14 -0700850 mutex_unlock(&priv->mm_lock);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530851 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400852}
853
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530854void msm_gem_active_put(struct drm_gem_object *obj)
Rob Clark7198e6b2013-07-19 12:59:32 -0400855{
Rob Clark7198e6b2013-07-19 12:59:32 -0400856 struct msm_gem_object *msm_obj = to_msm_bo(obj);
857
Rob Clarkd9844572020-10-23 09:51:14 -0700858 might_sleep();
Rob Clark90643a22021-04-05 10:45:24 -0700859 GEM_WARN_ON(!msm_gem_is_locked(obj));
Rob Clark7198e6b2013-07-19 12:59:32 -0400860
Rob Clarkab5c54c2020-11-16 09:48:49 -0800861 if (--msm_obj->active_count == 0) {
Rob Clark3edfa302020-11-16 09:48:51 -0800862 update_inactive(msm_obj);
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530863 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400864}
865
Rob Clark3edfa302020-11-16 09:48:51 -0800866static void update_inactive(struct msm_gem_object *msm_obj)
867{
868 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
869
Rob Clark64fcbde2021-04-05 10:45:29 -0700870 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
871
872 if (msm_obj->active_count != 0)
873 return;
874
Rob Clark3edfa302020-11-16 09:48:51 -0800875 mutex_lock(&priv->mm_lock);
Rob Clark3edfa302020-11-16 09:48:51 -0800876
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700877 if (msm_obj->dontneed)
Rob Clark0054eeb2021-04-06 08:18:16 -0700878 mark_unpurgeable(msm_obj);
Rob Clark64fcbde2021-04-05 10:45:29 -0700879 if (msm_obj->evictable)
880 mark_unevictable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700881
882 list_del(&msm_obj->mm_list);
Rob Clark64fcbde2021-04-05 10:45:29 -0700883 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
Rob Clark3edfa302020-11-16 09:48:51 -0800884 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
Rob Clark64fcbde2021-04-05 10:45:29 -0700885 mark_evictable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700886 } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
Rob Clark3edfa302020-11-16 09:48:51 -0800887 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
Rob Clark0054eeb2021-04-06 08:18:16 -0700888 mark_purgeable(msm_obj);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700889 } else {
Rob Clark64fcbde2021-04-05 10:45:29 -0700890 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
891 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
Rob Clarkcc8a4d52021-03-31 18:27:19 -0700892 }
Rob Clark3edfa302020-11-16 09:48:51 -0800893
894 mutex_unlock(&priv->mm_lock);
895}
896
Rob Clarkba00c3f2016-03-16 18:18:17 -0400897int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
898{
Rob Clarkb6295f92016-03-15 18:26:28 -0400899 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100900 unsigned long remain =
901 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
902 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400903
Christian König52791ee2019-08-11 10:06:32 +0200904 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
Chris Wilsonf755e222016-08-29 08:08:26 +0100905 true, remain);
906 if (ret == 0)
907 return remain == 0 ? -EBUSY : -ETIMEDOUT;
908 else if (ret < 0)
909 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400910
Rob Clark7198e6b2013-07-19 12:59:32 -0400911 /* TODO cache maintenance */
912
Rob Clarkb6295f92016-03-15 18:26:28 -0400913 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400914}
915
916int msm_gem_cpu_fini(struct drm_gem_object *obj)
917{
918 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400919 return 0;
920}
921
922#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100923static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400924 struct seq_file *m)
925{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100926 if (!dma_fence_is_signaled(fence))
Dave Airliea3115622019-01-10 06:20:15 +1000927 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400928 fence->ops->get_driver_name(fence),
929 fence->ops->get_timeline_name(fence),
930 fence->seqno);
931}
932
Rob Clark528107c2021-03-31 18:27:21 -0700933void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
934 struct msm_gem_stats *stats)
Rob Clarkc8afe682013-06-26 12:44:06 -0400935{
Rob Clarkc8afe682013-06-26 12:44:06 -0400936 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Christian König52791ee2019-08-11 10:06:32 +0200937 struct dma_resv *robj = obj->resv;
938 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100939 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400940 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400941 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400942 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400943
Rob Clarka6ae74c2020-10-23 09:51:03 -0700944 msm_gem_lock(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400945
Rob Clark528107c2021-03-31 18:27:21 -0700946 stats->all.count++;
947 stats->all.size += obj->size;
948
949 if (is_active(msm_obj)) {
950 stats->active.count++;
951 stats->active.size += obj->size;
952 }
953
Rob Clarkf48f3562021-04-05 10:45:28 -0700954 if (msm_obj->pages) {
955 stats->resident.count++;
956 stats->resident.size += obj->size;
957 }
958
Rob Clark4cd33c42016-05-17 15:44:49 -0400959 switch (msm_obj->madv) {
960 case __MSM_MADV_PURGED:
Rob Clark528107c2021-03-31 18:27:21 -0700961 stats->purged.count++;
962 stats->purged.size += obj->size;
Rob Clark4cd33c42016-05-17 15:44:49 -0400963 madv = " purged";
964 break;
965 case MSM_MADV_DONTNEED:
Rob Clark0054eeb2021-04-06 08:18:16 -0700966 stats->purgeable.count++;
967 stats->purgeable.size += obj->size;
Rob Clark4cd33c42016-05-17 15:44:49 -0400968 madv = " purgeable";
969 break;
970 case MSM_MADV_WILLNEED:
971 default:
972 madv = "";
973 break;
974 }
975
Jordan Crouse575f0482018-11-07 15:35:49 -0700976 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
Rob Clark7198e6b2013-07-19 12:59:32 -0400977 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100978 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400979 off, msm_obj->vaddr);
980
Jordan Crouse0815d772018-11-07 15:35:52 -0700981 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
Rob Clark667ce332016-09-28 19:58:32 -0400982
Jordan Crouse575f0482018-11-07 15:35:49 -0700983 if (!list_empty(&msm_obj->vmas)) {
984
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700985 seq_puts(m, " vmas:");
Jordan Crouse575f0482018-11-07 15:35:49 -0700986
Rob Clark25faf2f2020-08-17 15:01:45 -0700987 list_for_each_entry(vma, &msm_obj->vmas, list) {
988 const char *name, *comm;
989 if (vma->aspace) {
990 struct msm_gem_address_space *aspace = vma->aspace;
991 struct task_struct *task =
992 get_pid_task(aspace->pid, PIDTYPE_PID);
993 if (task) {
994 comm = kstrdup(task->comm, GFP_KERNEL);
995 } else {
996 comm = NULL;
997 }
998 name = aspace->name;
999 } else {
1000 name = comm = NULL;
1001 }
1002 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
1003 name, comm ? ":" : "", comm ? comm : "",
1004 vma->aspace, vma->iova,
1005 vma->mapped ? "mapped" : "unmapped",
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001006 vma->inuse);
Rob Clark25faf2f2020-08-17 15:01:45 -07001007 kfree(comm);
1008 }
Jordan Crouse575f0482018-11-07 15:35:49 -07001009
1010 seq_puts(m, "\n");
1011 }
Rob Clarkb6295f92016-03-15 18:26:28 -04001012
1013 rcu_read_lock();
1014 fobj = rcu_dereference(robj->fence);
1015 if (fobj) {
1016 unsigned int i, shared_count = fobj->shared_count;
1017
1018 for (i = 0; i < shared_count; i++) {
1019 fence = rcu_dereference(fobj->shared[i]);
1020 describe_fence(fence, "Shared", m);
1021 }
1022 }
1023
1024 fence = rcu_dereference(robj->fence_excl);
1025 if (fence)
1026 describe_fence(fence, "Exclusive", m);
1027 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001028
Rob Clarka6ae74c2020-10-23 09:51:03 -07001029 msm_gem_unlock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001030}
1031
1032void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1033{
Rob Clark528107c2021-03-31 18:27:21 -07001034 struct msm_gem_stats stats = {};
Rob Clarkc8afe682013-06-26 12:44:06 -04001035 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -04001036
Jordan Crouse0815d772018-11-07 15:35:52 -07001037 seq_puts(m, " flags id ref offset kaddr size madv name\n");
Rob Clark6ed08972021-03-31 18:27:20 -07001038 list_for_each_entry(msm_obj, list, node) {
Rob Clarkc8afe682013-06-26 12:44:06 -04001039 struct drm_gem_object *obj = &msm_obj->base;
Jordan Crouse575f0482018-11-07 15:35:49 -07001040 seq_puts(m, " ");
Rob Clark528107c2021-03-31 18:27:21 -07001041 msm_gem_describe(obj, m, &stats);
Rob Clarkc8afe682013-06-26 12:44:06 -04001042 }
1043
Colin Ian Kingf1902c62021-04-06 14:39:39 +01001044 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -07001045 stats.all.count, stats.all.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +01001046 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -07001047 stats.active.count, stats.active.size);
Rob Clarkf48f3562021-04-05 10:45:28 -07001048 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
1049 stats.resident.count, stats.resident.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +01001050 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
Rob Clark0054eeb2021-04-06 08:18:16 -07001051 stats.purgeable.count, stats.purgeable.size);
Colin Ian Kingf1902c62021-04-06 14:39:39 +01001052 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
Rob Clark528107c2021-03-31 18:27:21 -07001053 stats.purged.count, stats.purged.size);
Rob Clarkc8afe682013-06-26 12:44:06 -04001054}
1055#endif
1056
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001057/* don't call directly! Use drm_gem_object_put_locked() and friends */
Rob Clarkc8afe682013-06-26 12:44:06 -04001058void msm_gem_free_object(struct drm_gem_object *obj)
1059{
Rob Clarkc8afe682013-06-26 12:44:06 -04001060 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Kristian H. Kristensen48e7f182019-03-20 10:09:08 -07001061 struct drm_device *dev = obj->dev;
1062 struct msm_drm_private *priv = dev->dev_private;
1063
Rob Clark6ed08972021-03-31 18:27:20 -07001064 mutex_lock(&priv->obj_lock);
1065 list_del(&msm_obj->node);
1066 mutex_unlock(&priv->obj_lock);
1067
Rob Clarkd9844572020-10-23 09:51:14 -07001068 mutex_lock(&priv->mm_lock);
Rob Clarkcc8a4d52021-03-31 18:27:19 -07001069 if (msm_obj->dontneed)
Rob Clark0054eeb2021-04-06 08:18:16 -07001070 mark_unpurgeable(msm_obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001071 list_del(&msm_obj->mm_list);
Rob Clarkd9844572020-10-23 09:51:14 -07001072 mutex_unlock(&priv->mm_lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001073
Rob Clarka6ae74c2020-10-23 09:51:03 -07001074 msm_gem_lock(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001075
1076 /* object should not be on active list: */
Rob Clark90643a22021-04-05 10:45:24 -07001077 GEM_WARN_ON(is_active(msm_obj));
Rob Clarkc8afe682013-06-26 12:44:06 -04001078
Rob Clark20d0ae22021-04-05 10:45:27 -07001079 put_iova_spaces(obj, true);
Rob Clarkc8afe682013-06-26 12:44:06 -04001080
Rob Clark05b84912013-09-28 11:28:35 -04001081 if (obj->import_attach) {
Rob Clark90643a22021-04-05 10:45:24 -07001082 GEM_WARN_ON(msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -04001083
Rob Clark05b84912013-09-28 11:28:35 -04001084 /* Don't drop the pages for imported dmabuf, as they are not
1085 * ours, just free the array we allocated:
1086 */
Jiapeng Zhongdd5d08b2021-01-26 17:51:19 +08001087 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -04001088
Rob Clark57f04812020-12-10 09:40:28 -08001089 put_iova_vmas(obj);
1090
Rob Clark6c0e3ea2020-10-23 09:51:10 -07001091 /* dma_buf_detach() grabs resv lock, so we need to unlock
1092 * prior to drm_prime_gem_destroy
1093 */
1094 msm_gem_unlock(obj);
1095
jilai wangf28730c2015-04-07 13:51:32 -04001096 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -04001097 } else {
Rob Clark599089c2020-10-23 09:51:07 -07001098 msm_gem_vunmap(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001099 put_pages(obj);
Rob Clark57f04812020-12-10 09:40:28 -08001100 put_iova_vmas(obj);
Rob Clark6c0e3ea2020-10-23 09:51:10 -07001101 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001102 }
Rob Clarkc8afe682013-06-26 12:44:06 -04001103
1104 drm_gem_object_release(obj);
1105
1106 kfree(msm_obj);
1107}
1108
1109/* convenience method to construct a GEM buffer object, and userspace handle */
1110int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Jordan Crouse0815d772018-11-07 15:35:52 -07001111 uint32_t size, uint32_t flags, uint32_t *handle,
1112 char *name)
Rob Clarkc8afe682013-06-26 12:44:06 -04001113{
1114 struct drm_gem_object *obj;
1115 int ret;
1116
Rob Clarkc8afe682013-06-26 12:44:06 -04001117 obj = msm_gem_new(dev, size, flags);
1118
Rob Clarkc8afe682013-06-26 12:44:06 -04001119 if (IS_ERR(obj))
1120 return PTR_ERR(obj);
1121
Jordan Crouse0815d772018-11-07 15:35:52 -07001122 if (name)
1123 msm_gem_object_set_name(obj, "%s", name);
1124
Rob Clarkc8afe682013-06-26 12:44:06 -04001125 ret = drm_gem_handle_create(file, obj, handle);
1126
1127 /* drop reference from allocate - handle holds it now */
Emil Velikovf7d33952020-05-15 10:51:04 +01001128 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001129
1130 return ret;
1131}
1132
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +02001133static const struct vm_operations_struct vm_ops = {
1134 .fault = msm_gem_fault,
1135 .open = drm_gem_vm_open,
1136 .close = drm_gem_vm_close,
1137};
1138
1139static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1140 .free = msm_gem_free_object,
1141 .pin = msm_gem_prime_pin,
1142 .unpin = msm_gem_prime_unpin,
1143 .get_sg_table = msm_gem_prime_get_sg_table,
1144 .vmap = msm_gem_prime_vmap,
1145 .vunmap = msm_gem_prime_vunmap,
1146 .vm_ops = &vm_ops,
1147};
1148
Rob Clark05b84912013-09-28 11:28:35 -04001149static int msm_gem_new_impl(struct drm_device *dev,
1150 uint32_t size, uint32_t flags,
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301151 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -04001152{
Rob Clarkc8afe682013-06-26 12:44:06 -04001153 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -04001154
1155 switch (flags & MSM_BO_CACHE_MASK) {
1156 case MSM_BO_UNCACHED:
1157 case MSM_BO_CACHED:
1158 case MSM_BO_WC:
1159 break;
1160 default:
Mamta Shukla6a41da12018-10-20 23:19:26 +05301161 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
Rob Clarkc8afe682013-06-26 12:44:06 -04001162 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -04001163 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -04001164 }
1165
Rob Clark667ce332016-09-28 19:58:32 -04001166 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001167 if (!msm_obj)
1168 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -04001169
1170 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -04001171 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -04001172
Rob Clark7198e6b2013-07-19 12:59:32 -04001173 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001174 INIT_LIST_HEAD(&msm_obj->vmas);
1175
Rob Clark05b84912013-09-28 11:28:35 -04001176 *obj = &msm_obj->base;
Thomas Zimmermann3c9edd92020-09-23 12:21:46 +02001177 (*obj)->funcs = &msm_gem_object_funcs;
Rob Clark05b84912013-09-28 11:28:35 -04001178
1179 return 0;
1180}
1181
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001182static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1183 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -04001184{
Rob Clarkf4839bd2017-06-13 11:50:05 -04001185 struct msm_drm_private *priv = dev->dev_private;
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301186 struct msm_gem_object *msm_obj;
Rob Clark871d8122013-11-16 12:56:06 -05001187 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001188 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -04001189 int ret;
1190
Rob Clark05b84912013-09-28 11:28:35 -04001191 size = PAGE_ALIGN(size);
1192
Jonathan Marekc2052a42018-11-14 17:08:04 -05001193 if (!msm_use_mmu(dev))
Rob Clarkf4839bd2017-06-13 11:50:05 -04001194 use_vram = true;
Jonathan Marek86f46f22018-11-21 20:52:30 -05001195 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
Rob Clarkf4839bd2017-06-13 11:50:05 -04001196 use_vram = true;
1197
Rob Clark90643a22021-04-05 10:45:24 -07001198 if (GEM_WARN_ON(use_vram && !priv->vram.size))
Rob Clarkf4839bd2017-06-13 11:50:05 -04001199 return ERR_PTR(-EINVAL);
1200
Jordan Crouse1a5dff52017-03-07 10:02:51 -07001201 /* Disallow zero sized objects as they make the underlying
1202 * infrastructure grumpy
1203 */
1204 if (size == 0)
1205 return ERR_PTR(-EINVAL);
1206
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301207 ret = msm_gem_new_impl(dev, size, flags, &obj);
Rob Clark05b84912013-09-28 11:28:35 -04001208 if (ret)
1209 goto fail;
1210
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301211 msm_obj = to_msm_bo(obj);
1212
Rob Clarkf4839bd2017-06-13 11:50:05 -04001213 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -04001214 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -04001215 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +02001216
Iskren Cherneva694ffe2020-12-28 23:31:30 +02001217 drm_gem_private_object_init(dev, obj, size);
1218
Rob Clarka6ae74c2020-10-23 09:51:03 -07001219 msm_gem_lock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001220
Rob Clark4b85f7f2017-06-13 13:54:13 -04001221 vma = add_vma(obj, NULL);
Rob Clarka6ae74c2020-10-23 09:51:03 -07001222 msm_gem_unlock(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -04001223 if (IS_ERR(vma)) {
1224 ret = PTR_ERR(vma);
1225 goto fail;
1226 }
1227
1228 to_msm_bo(obj)->vram_node = &vma->node;
1229
Iskren Chernev07fcad02020-12-28 23:31:31 +02001230 msm_gem_lock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001231 pages = get_pages(obj);
Iskren Chernev07fcad02020-12-28 23:31:31 +02001232 msm_gem_unlock(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001233 if (IS_ERR(pages)) {
1234 ret = PTR_ERR(pages);
1235 goto fail;
1236 }
Rob Clark4b85f7f2017-06-13 13:54:13 -04001237
1238 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -04001239 } else {
Rob Clark871d8122013-11-16 12:56:06 -05001240 ret = drm_gem_object_init(dev, obj, size);
1241 if (ret)
1242 goto fail;
Lucas Stach0abdba42019-02-28 07:23:29 +01001243 /*
1244 * Our buffers are kept pinned, so allocating them from the
1245 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1246 * See comments above new_inode() why this is required _and_
1247 * expected if you're going to pin these pages.
1248 */
1249 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
Rob Clark871d8122013-11-16 12:56:06 -05001250 }
Rob Clark05b84912013-09-28 11:28:35 -04001251
Rob Clarkd9844572020-10-23 09:51:14 -07001252 mutex_lock(&priv->mm_lock);
Rob Clark64fcbde2021-04-05 10:45:29 -07001253 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
Rob Clarkd9844572020-10-23 09:51:14 -07001254 mutex_unlock(&priv->mm_lock);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301255
Rob Clark6ed08972021-03-31 18:27:20 -07001256 mutex_lock(&priv->obj_lock);
1257 list_add_tail(&msm_obj->node, &priv->objects);
1258 mutex_unlock(&priv->obj_lock);
1259
Rob Clark05b84912013-09-28 11:28:35 -04001260 return obj;
1261
1262fail:
Rob Clarkce0a9dc02020-10-23 09:51:11 -07001263 if (struct_mutex_locked) {
1264 drm_gem_object_put_locked(obj);
1265 } else {
1266 drm_gem_object_put(obj);
1267 }
Rob Clark05b84912013-09-28 11:28:35 -04001268 return ERR_PTR(ret);
1269}
1270
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001271struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1272 uint32_t size, uint32_t flags)
1273{
1274 return _msm_gem_new(dev, size, flags, true);
1275}
1276
1277struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1278 uint32_t size, uint32_t flags)
1279{
1280 return _msm_gem_new(dev, size, flags, false);
1281}
1282
Rob Clark05b84912013-09-28 11:28:35 -04001283struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -04001284 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -04001285{
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301286 struct msm_drm_private *priv = dev->dev_private;
Rob Clark05b84912013-09-28 11:28:35 -04001287 struct msm_gem_object *msm_obj;
1288 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -04001289 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -04001290 int ret, npages;
1291
Rob Clark871d8122013-11-16 12:56:06 -05001292 /* if we don't have IOMMU, don't bother pretending we can import: */
Jonathan Marekc2052a42018-11-14 17:08:04 -05001293 if (!msm_use_mmu(dev)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +05301294 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
Rob Clark871d8122013-11-16 12:56:06 -05001295 return ERR_PTR(-EINVAL);
1296 }
1297
Rob Clark79f0e202016-03-16 12:40:35 -04001298 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001299
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301300 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
Rob Clark05b84912013-09-28 11:28:35 -04001301 if (ret)
1302 goto fail;
1303
1304 drm_gem_private_object_init(dev, obj, size);
1305
1306 npages = size / PAGE_SIZE;
1307
1308 msm_obj = to_msm_bo(obj);
Rob Clarka6ae74c2020-10-23 09:51:03 -07001309 msm_gem_lock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001310 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001311 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001312 if (!msm_obj->pages) {
Rob Clarka6ae74c2020-10-23 09:51:03 -07001313 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001314 ret = -ENOMEM;
1315 goto fail;
1316 }
1317
Christian Königc67e6272020-10-08 12:57:32 +02001318 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001319 if (ret) {
Rob Clarka6ae74c2020-10-23 09:51:03 -07001320 msm_gem_unlock(obj);
Rob Clark05b84912013-09-28 11:28:35 -04001321 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001322 }
Rob Clark05b84912013-09-28 11:28:35 -04001323
Rob Clarka6ae74c2020-10-23 09:51:03 -07001324 msm_gem_unlock(obj);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301325
Rob Clarkd9844572020-10-23 09:51:14 -07001326 mutex_lock(&priv->mm_lock);
Rob Clark64fcbde2021-04-05 10:45:29 -07001327 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
Rob Clarkd9844572020-10-23 09:51:14 -07001328 mutex_unlock(&priv->mm_lock);
Akhil P Oommen3cbdc8d2020-07-10 02:01:55 +05301329
Rob Clark6ed08972021-03-31 18:27:20 -07001330 mutex_lock(&priv->obj_lock);
1331 list_add_tail(&msm_obj->node, &priv->objects);
1332 mutex_unlock(&priv->obj_lock);
1333
Rob Clarkc8afe682013-06-26 12:44:06 -04001334 return obj;
1335
1336fail:
Emil Velikovf7d33952020-05-15 10:51:04 +01001337 drm_gem_object_put(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001338 return ERR_PTR(ret);
1339}
Jordan Crouse82232862017-07-27 10:42:40 -06001340
1341static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1342 uint32_t flags, struct msm_gem_address_space *aspace,
1343 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1344{
1345 void *vaddr;
1346 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1347 int ret;
1348
1349 if (IS_ERR(obj))
1350 return ERR_CAST(obj);
1351
1352 if (iova) {
Jordan Crouse9fe041f2018-11-07 15:35:50 -07001353 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001354 if (ret)
1355 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001356 }
1357
1358 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001359 if (IS_ERR(vaddr)) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001360 msm_gem_unpin_iova(obj, aspace);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001361 ret = PTR_ERR(vaddr);
1362 goto err;
Jordan Crouse82232862017-07-27 10:42:40 -06001363 }
1364
1365 if (bo)
1366 *bo = obj;
1367
1368 return vaddr;
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001369err:
1370 if (locked)
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001371 drm_gem_object_put_locked(obj);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001372 else
Emil Velikovf7d33952020-05-15 10:51:04 +01001373 drm_gem_object_put(obj);
Jordan Crouse93f7abf2018-11-02 09:25:19 -06001374
1375 return ERR_PTR(ret);
1376
Jordan Crouse82232862017-07-27 10:42:40 -06001377}
1378
1379void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1380 uint32_t flags, struct msm_gem_address_space *aspace,
1381 struct drm_gem_object **bo, uint64_t *iova)
1382{
1383 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1384}
1385
1386void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1387 uint32_t flags, struct msm_gem_address_space *aspace,
1388 struct drm_gem_object **bo, uint64_t *iova)
1389{
1390 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1391}
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001392
1393void msm_gem_kernel_put(struct drm_gem_object *bo,
1394 struct msm_gem_address_space *aspace, bool locked)
1395{
1396 if (IS_ERR_OR_NULL(bo))
1397 return;
1398
1399 msm_gem_put_vaddr(bo);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001400 msm_gem_unpin_iova(bo, aspace);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001401
1402 if (locked)
Emil Velikoveecd7fd2020-05-15 10:50:51 +01001403 drm_gem_object_put_locked(bo);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001404 else
Emil Velikovf7d33952020-05-15 10:51:04 +01001405 drm_gem_object_put(bo);
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001406}
Jordan Crouse0815d772018-11-07 15:35:52 -07001407
1408void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1409{
1410 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1411 va_list ap;
1412
1413 if (!fmt)
1414 return;
1415
1416 va_start(ap, fmt);
1417 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1418 va_end(ap);
1419}