Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2013 Red Hat |
| 4 | * Author: Rob Clark <robdclark@gmail.com> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/shmem_fs.h> |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 9 | #include <linux/dma-buf.h> |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 10 | #include <linux/pfn_t.h> |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 11 | |
Sam Ravnborg | feea39a | 2019-08-04 08:55:51 +0200 | [diff] [blame] | 12 | #include <drm/drm_prime.h> |
| 13 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 14 | #include "msm_drv.h" |
Rob Clark | fde5de6 | 2016-03-15 15:35:08 -0400 | [diff] [blame] | 15 | #include "msm_fence.h" |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 16 | #include "msm_gem.h" |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 17 | #include "msm_gpu.h" |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 18 | #include "msm_mmu.h" |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 19 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 20 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj); |
| 21 | |
| 22 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 23 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
| 24 | { |
| 25 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 26 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 27 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + |
| 28 | priv->vram.paddr; |
| 29 | } |
| 30 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 31 | static bool use_pages(struct drm_gem_object *obj) |
| 32 | { |
| 33 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 34 | return !msm_obj->vram_node; |
| 35 | } |
| 36 | |
Rob Clark | 3de433c | 2019-07-30 14:46:28 -0700 | [diff] [blame] | 37 | /* |
| 38 | * Cache sync.. this is a bit over-complicated, to fit dma-mapping |
| 39 | * API. Really GPU cache is out of scope here (handled on cmdstream) |
| 40 | * and all we need to do is invalidate newly allocated pages before |
| 41 | * mapping to CPU as uncached/writecombine. |
| 42 | * |
| 43 | * On top of this, we have the added headache, that depending on |
| 44 | * display generation, the display's iommu may be wired up to either |
| 45 | * the toplevel drm device (mdss), or to the mdp sub-node, meaning |
| 46 | * that here we either have dma-direct or iommu ops. |
| 47 | * |
| 48 | * Let this be a cautionary tail of abstraction gone wrong. |
| 49 | */ |
| 50 | |
| 51 | static void sync_for_device(struct msm_gem_object *msm_obj) |
| 52 | { |
| 53 | struct device *dev = msm_obj->base.dev->dev; |
| 54 | |
Rob Clark | 9f61419 | 2019-09-04 09:56:03 -0700 | [diff] [blame] | 55 | if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { |
Rob Clark | 3de433c | 2019-07-30 14:46:28 -0700 | [diff] [blame] | 56 | dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, |
| 57 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
| 58 | } else { |
| 59 | dma_map_sg(dev, msm_obj->sgt->sgl, |
| 60 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | static void sync_for_cpu(struct msm_gem_object *msm_obj) |
| 65 | { |
| 66 | struct device *dev = msm_obj->base.dev->dev; |
| 67 | |
Rob Clark | 9f61419 | 2019-09-04 09:56:03 -0700 | [diff] [blame] | 68 | if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { |
Rob Clark | 3de433c | 2019-07-30 14:46:28 -0700 | [diff] [blame] | 69 | dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, |
| 70 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
| 71 | } else { |
| 72 | dma_unmap_sg(dev, msm_obj->sgt->sgl, |
| 73 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
| 74 | } |
| 75 | } |
| 76 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 77 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 78 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 79 | { |
| 80 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 81 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 82 | dma_addr_t paddr; |
| 83 | struct page **p; |
| 84 | int ret, i; |
| 85 | |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 86 | p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 87 | if (!p) |
| 88 | return ERR_PTR(-ENOMEM); |
| 89 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 90 | spin_lock(&priv->vram.lock); |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 91 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 92 | spin_unlock(&priv->vram.lock); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 93 | if (ret) { |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 94 | kvfree(p); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 95 | return ERR_PTR(ret); |
| 96 | } |
| 97 | |
| 98 | paddr = physaddr(obj); |
| 99 | for (i = 0; i < npages; i++) { |
| 100 | p[i] = phys_to_page(paddr); |
| 101 | paddr += PAGE_SIZE; |
| 102 | } |
| 103 | |
| 104 | return p; |
| 105 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 106 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 107 | static struct page **get_pages(struct drm_gem_object *obj) |
| 108 | { |
| 109 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 110 | |
| 111 | if (!msm_obj->pages) { |
| 112 | struct drm_device *dev = obj->dev; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 113 | struct page **p; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 114 | int npages = obj->size >> PAGE_SHIFT; |
| 115 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 116 | if (use_pages(obj)) |
David Herrmann | 0cdbe8a | 2014-05-25 12:59:47 +0200 | [diff] [blame] | 117 | p = drm_gem_get_pages(obj); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 118 | else |
| 119 | p = get_pages_vram(obj, npages); |
| 120 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 121 | if (IS_ERR(p)) { |
Mamta Shukla | 6a41da1 | 2018-10-20 23:19:26 +0530 | [diff] [blame] | 122 | DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 123 | PTR_ERR(p)); |
| 124 | return p; |
| 125 | } |
| 126 | |
Prakash Kamliya | 62e3a3e | 2017-12-04 19:10:15 +0530 | [diff] [blame] | 127 | msm_obj->pages = p; |
| 128 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 129 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); |
Wei Yongjun | 1f70e07 | 2013-09-11 06:56:12 +0800 | [diff] [blame] | 130 | if (IS_ERR(msm_obj->sgt)) { |
Prakash Kamliya | 62e3a3e | 2017-12-04 19:10:15 +0530 | [diff] [blame] | 131 | void *ptr = ERR_CAST(msm_obj->sgt); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 132 | |
Mamta Shukla | 6a41da1 | 2018-10-20 23:19:26 +0530 | [diff] [blame] | 133 | DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); |
Prakash Kamliya | 62e3a3e | 2017-12-04 19:10:15 +0530 | [diff] [blame] | 134 | msm_obj->sgt = NULL; |
| 135 | return ptr; |
| 136 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 137 | |
| 138 | /* For non-cached buffers, ensure the new pages are clean |
| 139 | * because display controller, GPU, etc. are not coherent: |
| 140 | */ |
| 141 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
Rob Clark | 3de433c | 2019-07-30 14:46:28 -0700 | [diff] [blame] | 142 | sync_for_device(msm_obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 143 | } |
| 144 | |
| 145 | return msm_obj->pages; |
| 146 | } |
| 147 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 148 | static void put_pages_vram(struct drm_gem_object *obj) |
| 149 | { |
| 150 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 151 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 152 | |
| 153 | spin_lock(&priv->vram.lock); |
| 154 | drm_mm_remove_node(msm_obj->vram_node); |
| 155 | spin_unlock(&priv->vram.lock); |
| 156 | |
| 157 | kvfree(msm_obj->pages); |
| 158 | } |
| 159 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 160 | static void put_pages(struct drm_gem_object *obj) |
| 161 | { |
| 162 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 163 | |
| 164 | if (msm_obj->pages) { |
Ben Hutchings | 3976626 | 2018-04-03 23:38:45 +0100 | [diff] [blame] | 165 | if (msm_obj->sgt) { |
| 166 | /* For non-cached buffers, ensure the new |
| 167 | * pages are clean because display controller, |
| 168 | * GPU, etc. are not coherent: |
| 169 | */ |
| 170 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
Rob Clark | 3de433c | 2019-07-30 14:46:28 -0700 | [diff] [blame] | 171 | sync_for_cpu(msm_obj); |
Prakash Kamliya | 62e3a3e | 2017-12-04 19:10:15 +0530 | [diff] [blame] | 172 | |
Prakash Kamliya | 62e3a3e | 2017-12-04 19:10:15 +0530 | [diff] [blame] | 173 | sg_free_table(msm_obj->sgt); |
Ben Hutchings | 3976626 | 2018-04-03 23:38:45 +0100 | [diff] [blame] | 174 | kfree(msm_obj->sgt); |
| 175 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 176 | |
Rob Clark | 072f1f9 | 2015-03-03 15:04:25 -0500 | [diff] [blame] | 177 | if (use_pages(obj)) |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 178 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 179 | else |
| 180 | put_pages_vram(obj); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 181 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 182 | msm_obj->pages = NULL; |
| 183 | } |
| 184 | } |
| 185 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 186 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
| 187 | { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 188 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 189 | struct page **p; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 190 | |
| 191 | mutex_lock(&msm_obj->lock); |
| 192 | |
| 193 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
| 194 | mutex_unlock(&msm_obj->lock); |
| 195 | return ERR_PTR(-EBUSY); |
| 196 | } |
| 197 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 198 | p = get_pages(obj); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 199 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 200 | return p; |
| 201 | } |
| 202 | |
| 203 | void msm_gem_put_pages(struct drm_gem_object *obj) |
| 204 | { |
| 205 | /* when we start tracking the pin count, then do something here */ |
| 206 | } |
| 207 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 208 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
| 209 | struct vm_area_struct *vma) |
| 210 | { |
| 211 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 212 | |
| 213 | vma->vm_flags &= ~VM_PFNMAP; |
| 214 | vma->vm_flags |= VM_MIXEDMAP; |
| 215 | |
| 216 | if (msm_obj->flags & MSM_BO_WC) { |
| 217 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 218 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { |
| 219 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); |
| 220 | } else { |
| 221 | /* |
| 222 | * Shunt off cached objs to shmem file so they have their own |
| 223 | * address_space (so unmap_mapping_range does what we want, |
| 224 | * in particular in the case of mmap'd dmabufs) |
| 225 | */ |
| 226 | fput(vma->vm_file); |
| 227 | get_file(obj->filp); |
| 228 | vma->vm_pgoff = 0; |
| 229 | vma->vm_file = obj->filp; |
| 230 | |
| 231 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
| 232 | } |
| 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | |
| 237 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 238 | { |
| 239 | int ret; |
| 240 | |
| 241 | ret = drm_gem_mmap(filp, vma); |
| 242 | if (ret) { |
| 243 | DBG("mmap failed: %d", ret); |
| 244 | return ret; |
| 245 | } |
| 246 | |
| 247 | return msm_gem_mmap_obj(vma->vm_private_data, vma); |
| 248 | } |
| 249 | |
Souptick Joarder | a5f74ec | 2018-05-21 22:59:48 +0530 | [diff] [blame] | 250 | vm_fault_t msm_gem_fault(struct vm_fault *vmf) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 251 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 252 | struct vm_area_struct *vma = vmf->vma; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 253 | struct drm_gem_object *obj = vma->vm_private_data; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 254 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 255 | struct page **pages; |
| 256 | unsigned long pfn; |
| 257 | pgoff_t pgoff; |
Souptick Joarder | a5f74ec | 2018-05-21 22:59:48 +0530 | [diff] [blame] | 258 | int err; |
| 259 | vm_fault_t ret; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 260 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 261 | /* |
| 262 | * vm_ops.open/drm_gem_mmap_obj and close get and put |
| 263 | * a reference on obj. So, we dont need to hold one here. |
Rob Clark | d78d383 | 2016-08-22 15:28:38 -0400 | [diff] [blame] | 264 | */ |
Souptick Joarder | a5f74ec | 2018-05-21 22:59:48 +0530 | [diff] [blame] | 265 | err = mutex_lock_interruptible(&msm_obj->lock); |
| 266 | if (err) { |
| 267 | ret = VM_FAULT_NOPAGE; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 268 | goto out; |
Souptick Joarder | a5f74ec | 2018-05-21 22:59:48 +0530 | [diff] [blame] | 269 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 270 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 271 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
| 272 | mutex_unlock(&msm_obj->lock); |
| 273 | return VM_FAULT_SIGBUS; |
| 274 | } |
| 275 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 276 | /* make sure we have pages attached now */ |
| 277 | pages = get_pages(obj); |
| 278 | if (IS_ERR(pages)) { |
Souptick Joarder | a5f74ec | 2018-05-21 22:59:48 +0530 | [diff] [blame] | 279 | ret = vmf_error(PTR_ERR(pages)); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 280 | goto out_unlock; |
| 281 | } |
| 282 | |
| 283 | /* We don't use vmf->pgoff since that has the fake offset: */ |
Jan Kara | 1a29d85 | 2016-12-14 15:07:01 -0800 | [diff] [blame] | 284 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 285 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 286 | pfn = page_to_pfn(pages[pgoff]); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 287 | |
Jan Kara | 1a29d85 | 2016-12-14 15:07:01 -0800 | [diff] [blame] | 288 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 289 | pfn, pfn << PAGE_SHIFT); |
| 290 | |
Souptick Joarder | a5f74ec | 2018-05-21 22:59:48 +0530 | [diff] [blame] | 291 | ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 292 | out_unlock: |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 293 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 294 | out: |
Souptick Joarder | a5f74ec | 2018-05-21 22:59:48 +0530 | [diff] [blame] | 295 | return ret; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | /** get mmap offset */ |
| 299 | static uint64_t mmap_offset(struct drm_gem_object *obj) |
| 300 | { |
| 301 | struct drm_device *dev = obj->dev; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 302 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 303 | int ret; |
| 304 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 305 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 306 | |
| 307 | /* Make it mmapable */ |
| 308 | ret = drm_gem_create_mmap_offset(obj); |
| 309 | |
| 310 | if (ret) { |
Mamta Shukla | 6a41da1 | 2018-10-20 23:19:26 +0530 | [diff] [blame] | 311 | DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 312 | return 0; |
| 313 | } |
| 314 | |
| 315 | return drm_vma_node_offset_addr(&obj->vma_node); |
| 316 | } |
| 317 | |
| 318 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) |
| 319 | { |
| 320 | uint64_t offset; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 321 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 322 | |
| 323 | mutex_lock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 324 | offset = mmap_offset(obj); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 325 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 326 | return offset; |
| 327 | } |
| 328 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 329 | static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, |
| 330 | struct msm_gem_address_space *aspace) |
| 331 | { |
| 332 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 333 | struct msm_gem_vma *vma; |
| 334 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 335 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
| 336 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 337 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
| 338 | if (!vma) |
| 339 | return ERR_PTR(-ENOMEM); |
| 340 | |
| 341 | vma->aspace = aspace; |
| 342 | |
| 343 | list_add_tail(&vma->list, &msm_obj->vmas); |
| 344 | |
| 345 | return vma; |
| 346 | } |
| 347 | |
| 348 | static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, |
| 349 | struct msm_gem_address_space *aspace) |
| 350 | { |
| 351 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 352 | struct msm_gem_vma *vma; |
| 353 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 354 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 355 | |
| 356 | list_for_each_entry(vma, &msm_obj->vmas, list) { |
| 357 | if (vma->aspace == aspace) |
| 358 | return vma; |
| 359 | } |
| 360 | |
| 361 | return NULL; |
| 362 | } |
| 363 | |
| 364 | static void del_vma(struct msm_gem_vma *vma) |
| 365 | { |
| 366 | if (!vma) |
| 367 | return; |
| 368 | |
| 369 | list_del(&vma->list); |
| 370 | kfree(vma); |
| 371 | } |
| 372 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 373 | /* Called with msm_obj->lock locked */ |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 374 | static void |
| 375 | put_iova(struct drm_gem_object *obj) |
| 376 | { |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 377 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 378 | struct msm_gem_vma *vma, *tmp; |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 379 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 380 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 381 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 382 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
Brian Masney | d67f1b6 | 2019-06-02 21:01:31 -0400 | [diff] [blame] | 383 | if (vma->aspace) { |
| 384 | msm_gem_purge_vma(vma->aspace, vma); |
| 385 | msm_gem_close_vma(vma->aspace, vma); |
| 386 | } |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 387 | del_vma(vma); |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 388 | } |
| 389 | } |
| 390 | |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 391 | static int msm_gem_get_iova_locked(struct drm_gem_object *obj, |
Jonathan Marek | d3b8877 | 2020-04-23 17:09:13 -0400 | [diff] [blame] | 392 | struct msm_gem_address_space *aspace, uint64_t *iova, |
| 393 | u64 range_start, u64 range_end) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 394 | { |
| 395 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 396 | struct msm_gem_vma *vma; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 397 | int ret = 0; |
| 398 | |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 399 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
Rob Clark | cb1e381 | 2017-06-13 09:15:36 -0400 | [diff] [blame] | 400 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 401 | vma = lookup_vma(obj, aspace); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 402 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 403 | if (!vma) { |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 404 | vma = add_vma(obj, aspace); |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 405 | if (IS_ERR(vma)) |
| 406 | return PTR_ERR(vma); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 407 | |
Jonathan Marek | d3b8877 | 2020-04-23 17:09:13 -0400 | [diff] [blame] | 408 | ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, |
| 409 | range_start, range_end); |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 410 | if (ret) { |
| 411 | del_vma(vma); |
| 412 | return ret; |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 413 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 414 | } |
| 415 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 416 | *iova = vma->iova; |
| 417 | return 0; |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 418 | } |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 419 | |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 420 | static int msm_gem_pin_iova(struct drm_gem_object *obj, |
| 421 | struct msm_gem_address_space *aspace) |
| 422 | { |
| 423 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 424 | struct msm_gem_vma *vma; |
| 425 | struct page **pages; |
Rob Clark | bbc2cd0 | 2019-01-09 14:25:05 -0500 | [diff] [blame] | 426 | int prot = IOMMU_READ; |
| 427 | |
| 428 | if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) |
| 429 | prot |= IOMMU_WRITE; |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 430 | |
Jonathan Marek | 0b462d7 | 2020-04-23 17:09:14 -0400 | [diff] [blame^] | 431 | if (msm_obj->flags & MSM_BO_MAP_PRIV) |
| 432 | prot |= IOMMU_PRIV; |
| 433 | |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 434 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
| 435 | |
| 436 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) |
| 437 | return -EBUSY; |
| 438 | |
| 439 | vma = lookup_vma(obj, aspace); |
| 440 | if (WARN_ON(!vma)) |
| 441 | return -EINVAL; |
| 442 | |
| 443 | pages = get_pages(obj); |
| 444 | if (IS_ERR(pages)) |
| 445 | return PTR_ERR(pages); |
| 446 | |
Rob Clark | bbc2cd0 | 2019-01-09 14:25:05 -0500 | [diff] [blame] | 447 | return msm_gem_map_vma(aspace, vma, prot, |
| 448 | msm_obj->sgt, obj->size >> PAGE_SHIFT); |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 449 | } |
| 450 | |
Jonathan Marek | d3b8877 | 2020-04-23 17:09:13 -0400 | [diff] [blame] | 451 | /* |
| 452 | * get iova and pin it. Should have a matching put |
| 453 | * limits iova to specified range (in pages) |
| 454 | */ |
| 455 | int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, |
| 456 | struct msm_gem_address_space *aspace, uint64_t *iova, |
| 457 | u64 range_start, u64 range_end) |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 458 | { |
| 459 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 460 | u64 local; |
| 461 | int ret; |
| 462 | |
| 463 | mutex_lock(&msm_obj->lock); |
| 464 | |
Jonathan Marek | d3b8877 | 2020-04-23 17:09:13 -0400 | [diff] [blame] | 465 | ret = msm_gem_get_iova_locked(obj, aspace, &local, |
| 466 | range_start, range_end); |
Jordan Crouse | c0ee979 | 2018-11-07 15:35:48 -0700 | [diff] [blame] | 467 | |
| 468 | if (!ret) |
| 469 | ret = msm_gem_pin_iova(obj, aspace); |
| 470 | |
| 471 | if (!ret) |
| 472 | *iova = local; |
| 473 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 474 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 475 | return ret; |
| 476 | } |
| 477 | |
Jonathan Marek | d3b8877 | 2020-04-23 17:09:13 -0400 | [diff] [blame] | 478 | /* get iova and pin it. Should have a matching put */ |
| 479 | int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, |
| 480 | struct msm_gem_address_space *aspace, uint64_t *iova) |
| 481 | { |
| 482 | return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); |
| 483 | } |
| 484 | |
Jordan Crouse | 7ad0e8c | 2018-11-07 15:35:51 -0700 | [diff] [blame] | 485 | /* |
| 486 | * Get an iova but don't pin it. Doesn't need a put because iovas are currently |
| 487 | * valid for the life of the object |
| 488 | */ |
Jordan Crouse | 9fe041f | 2018-11-07 15:35:50 -0700 | [diff] [blame] | 489 | int msm_gem_get_iova(struct drm_gem_object *obj, |
| 490 | struct msm_gem_address_space *aspace, uint64_t *iova) |
| 491 | { |
| 492 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 493 | int ret; |
| 494 | |
| 495 | mutex_lock(&msm_obj->lock); |
Jonathan Marek | d3b8877 | 2020-04-23 17:09:13 -0400 | [diff] [blame] | 496 | ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX); |
Jordan Crouse | 9fe041f | 2018-11-07 15:35:50 -0700 | [diff] [blame] | 497 | mutex_unlock(&msm_obj->lock); |
| 498 | |
| 499 | return ret; |
| 500 | } |
| 501 | |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 502 | /* get iova without taking a reference, used in places where you have |
Jordan Crouse | 9fe041f | 2018-11-07 15:35:50 -0700 | [diff] [blame] | 503 | * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 504 | */ |
Rob Clark | 8bdcd94 | 2017-06-13 11:07:08 -0400 | [diff] [blame] | 505 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
| 506 | struct msm_gem_address_space *aspace) |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 507 | { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 508 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 509 | struct msm_gem_vma *vma; |
| 510 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 511 | mutex_lock(&msm_obj->lock); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 512 | vma = lookup_vma(obj, aspace); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 513 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 514 | WARN_ON(!vma); |
| 515 | |
| 516 | return vma ? vma->iova : 0; |
Rob Clark | 2638d90 | 2014-11-08 09:13:37 -0500 | [diff] [blame] | 517 | } |
| 518 | |
Jordan Crouse | 7ad0e8c | 2018-11-07 15:35:51 -0700 | [diff] [blame] | 519 | /* |
| 520 | * Unpin a iova by updating the reference counts. The memory isn't actually |
| 521 | * purged until something else (shrinker, mm_notifier, destroy, etc) decides |
| 522 | * to get rid of it |
| 523 | */ |
| 524 | void msm_gem_unpin_iova(struct drm_gem_object *obj, |
Rob Clark | 8bdcd94 | 2017-06-13 11:07:08 -0400 | [diff] [blame] | 525 | struct msm_gem_address_space *aspace) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 526 | { |
Jordan Crouse | 7ad0e8c | 2018-11-07 15:35:51 -0700 | [diff] [blame] | 527 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 528 | struct msm_gem_vma *vma; |
| 529 | |
| 530 | mutex_lock(&msm_obj->lock); |
| 531 | vma = lookup_vma(obj, aspace); |
| 532 | |
| 533 | if (!WARN_ON(!vma)) |
| 534 | msm_gem_unmap_vma(aspace, vma); |
| 535 | |
| 536 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 537 | } |
| 538 | |
| 539 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 540 | struct drm_mode_create_dumb *args) |
| 541 | { |
| 542 | args->pitch = align_pitch(args->width, args->bpp); |
| 543 | args->size = PAGE_ALIGN(args->pitch * args->height); |
| 544 | return msm_gem_new_handle(dev, file, args->size, |
Jordan Crouse | 0815d77 | 2018-11-07 15:35:52 -0700 | [diff] [blame] | 545 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 546 | } |
| 547 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 548 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 549 | uint32_t handle, uint64_t *offset) |
| 550 | { |
| 551 | struct drm_gem_object *obj; |
| 552 | int ret = 0; |
| 553 | |
| 554 | /* GEM does all our handle to object mapping */ |
Chris Wilson | a8ad0bd | 2016-05-09 11:04:54 +0100 | [diff] [blame] | 555 | obj = drm_gem_object_lookup(file, handle); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 556 | if (obj == NULL) { |
| 557 | ret = -ENOENT; |
| 558 | goto fail; |
| 559 | } |
| 560 | |
| 561 | *offset = msm_gem_mmap_offset(obj); |
| 562 | |
Steve Kowalik | dc9a9b3 | 2018-01-26 14:55:54 +1100 | [diff] [blame] | 563 | drm_gem_object_put_unlocked(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 564 | |
| 565 | fail: |
| 566 | return ret; |
| 567 | } |
| 568 | |
Rob Clark | fad33f4 | 2017-09-15 08:38:20 -0400 | [diff] [blame] | 569 | static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 570 | { |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 571 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 572 | int ret = 0; |
| 573 | |
| 574 | mutex_lock(&msm_obj->lock); |
| 575 | |
Rob Clark | fad33f4 | 2017-09-15 08:38:20 -0400 | [diff] [blame] | 576 | if (WARN_ON(msm_obj->madv > madv)) { |
Mamta Shukla | 6a41da1 | 2018-10-20 23:19:26 +0530 | [diff] [blame] | 577 | DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", |
Rob Clark | fad33f4 | 2017-09-15 08:38:20 -0400 | [diff] [blame] | 578 | msm_obj->madv, madv); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 579 | mutex_unlock(&msm_obj->lock); |
| 580 | return ERR_PTR(-EBUSY); |
| 581 | } |
| 582 | |
| 583 | /* increment vmap_count *before* vmap() call, so shrinker can |
| 584 | * check vmap_count (is_vunmapable()) outside of msm_obj->lock. |
| 585 | * This guarantees that we won't try to msm_gem_vunmap() this |
| 586 | * same object from within the vmap() call (while we already |
| 587 | * hold msm_obj->lock) |
| 588 | */ |
| 589 | msm_obj->vmap_count++; |
| 590 | |
| 591 | if (!msm_obj->vaddr) { |
| 592 | struct page **pages = get_pages(obj); |
| 593 | if (IS_ERR(pages)) { |
| 594 | ret = PTR_ERR(pages); |
| 595 | goto fail; |
| 596 | } |
| 597 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 598 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 599 | if (msm_obj->vaddr == NULL) { |
| 600 | ret = -ENOMEM; |
| 601 | goto fail; |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | mutex_unlock(&msm_obj->lock); |
| 606 | return msm_obj->vaddr; |
| 607 | |
| 608 | fail: |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 609 | msm_obj->vmap_count--; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 610 | mutex_unlock(&msm_obj->lock); |
| 611 | return ERR_PTR(ret); |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 612 | } |
| 613 | |
Rob Clark | fad33f4 | 2017-09-15 08:38:20 -0400 | [diff] [blame] | 614 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) |
| 615 | { |
| 616 | return get_vaddr(obj, MSM_MADV_WILLNEED); |
| 617 | } |
| 618 | |
| 619 | /* |
| 620 | * Don't use this! It is for the very special case of dumping |
| 621 | * submits from GPU hangs or faults, were the bo may already |
| 622 | * be MSM_MADV_DONTNEED, but we know the buffer is still on the |
| 623 | * active list. |
| 624 | */ |
| 625 | void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) |
| 626 | { |
| 627 | return get_vaddr(obj, __MSM_MADV_PURGED); |
| 628 | } |
| 629 | |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 630 | void msm_gem_put_vaddr(struct drm_gem_object *obj) |
| 631 | { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 632 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 633 | |
| 634 | mutex_lock(&msm_obj->lock); |
| 635 | WARN_ON(msm_obj->vmap_count < 1); |
| 636 | msm_obj->vmap_count--; |
| 637 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 18f2304 | 2016-05-26 16:24:35 -0400 | [diff] [blame] | 638 | } |
| 639 | |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 640 | /* Update madvise status, returns true if not purged, else |
| 641 | * false or -errno. |
| 642 | */ |
| 643 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) |
| 644 | { |
| 645 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 646 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 647 | mutex_lock(&msm_obj->lock); |
| 648 | |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 649 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
| 650 | |
| 651 | if (msm_obj->madv != __MSM_MADV_PURGED) |
| 652 | msm_obj->madv = madv; |
| 653 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 654 | madv = msm_obj->madv; |
| 655 | |
| 656 | mutex_unlock(&msm_obj->lock); |
| 657 | |
| 658 | return (madv != __MSM_MADV_PURGED); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 659 | } |
| 660 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 661 | void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 662 | { |
| 663 | struct drm_device *dev = obj->dev; |
| 664 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 665 | |
| 666 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 667 | WARN_ON(!is_purgeable(msm_obj)); |
| 668 | WARN_ON(obj->import_attach); |
| 669 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 670 | mutex_lock_nested(&msm_obj->lock, subclass); |
| 671 | |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 672 | put_iova(obj); |
| 673 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 674 | msm_gem_vunmap_locked(obj); |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 675 | |
| 676 | put_pages(obj); |
| 677 | |
| 678 | msm_obj->madv = __MSM_MADV_PURGED; |
| 679 | |
| 680 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); |
| 681 | drm_gem_free_mmap_offset(obj); |
| 682 | |
| 683 | /* Our goal here is to return as much of the memory as |
| 684 | * is possible back to the system as we are called from OOM. |
| 685 | * To do this we must instruct the shmfs to drop all of its |
| 686 | * backing pages, *now*. |
| 687 | */ |
| 688 | shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); |
| 689 | |
| 690 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, |
| 691 | 0, (loff_t)-1); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 692 | |
| 693 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 6820939 | 2016-05-17 16:19:32 -0400 | [diff] [blame] | 694 | } |
| 695 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 696 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj) |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 697 | { |
| 698 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 699 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 700 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
| 701 | |
Rob Clark | e1e9db2 | 2016-05-27 11:16:28 -0400 | [diff] [blame] | 702 | if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) |
| 703 | return; |
| 704 | |
| 705 | vunmap(msm_obj->vaddr); |
| 706 | msm_obj->vaddr = NULL; |
| 707 | } |
| 708 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 709 | void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
| 710 | { |
| 711 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 712 | |
| 713 | mutex_lock_nested(&msm_obj->lock, subclass); |
| 714 | msm_gem_vunmap_locked(obj); |
| 715 | mutex_unlock(&msm_obj->lock); |
| 716 | } |
| 717 | |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 718 | /* must be called before _move_to_active().. */ |
| 719 | int msm_gem_sync_object(struct drm_gem_object *obj, |
| 720 | struct msm_fence_context *fctx, bool exclusive) |
| 721 | { |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 722 | struct dma_resv_list *fobj; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 723 | struct dma_fence *fence; |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 724 | int i, ret; |
| 725 | |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 726 | fobj = dma_resv_get_list(obj->resv); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 727 | if (!fobj || (fobj->shared_count == 0)) { |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 728 | fence = dma_resv_get_excl(obj->resv); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 729 | /* don't need to wait on our own fences, since ring is fifo */ |
| 730 | if (fence && (fence->context != fctx->context)) { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 731 | ret = dma_fence_wait(fence, true); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 732 | if (ret) |
| 733 | return ret; |
| 734 | } |
| 735 | } |
| 736 | |
| 737 | if (!exclusive || !fobj) |
| 738 | return 0; |
| 739 | |
| 740 | for (i = 0; i < fobj->shared_count; i++) { |
| 741 | fence = rcu_dereference_protected(fobj->shared[i], |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 742 | dma_resv_held(obj->resv)); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 743 | if (fence->context != fctx->context) { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 744 | ret = dma_fence_wait(fence, true); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 745 | if (ret) |
| 746 | return ret; |
| 747 | } |
| 748 | } |
| 749 | |
| 750 | return 0; |
| 751 | } |
| 752 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 753 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 754 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 755 | { |
| 756 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 757 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 758 | msm_obj->gpu = gpu; |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 759 | if (exclusive) |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 760 | dma_resv_add_excl_fence(obj->resv, fence); |
Rob Clark | bf6811f | 2013-09-01 13:25:09 -0400 | [diff] [blame] | 761 | else |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 762 | dma_resv_add_shared_fence(obj->resv, fence); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 763 | list_del_init(&msm_obj->mm_list); |
| 764 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); |
| 765 | } |
| 766 | |
| 767 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) |
| 768 | { |
| 769 | struct drm_device *dev = obj->dev; |
| 770 | struct msm_drm_private *priv = dev->dev_private; |
| 771 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 772 | |
| 773 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 774 | |
| 775 | msm_obj->gpu = NULL; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 776 | list_del_init(&msm_obj->mm_list); |
| 777 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 778 | } |
| 779 | |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 780 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
| 781 | { |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 782 | bool write = !!(op & MSM_PREP_WRITE); |
Chris Wilson | f755e22 | 2016-08-29 08:08:26 +0100 | [diff] [blame] | 783 | unsigned long remain = |
| 784 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); |
| 785 | long ret; |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 786 | |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 787 | ret = dma_resv_wait_timeout_rcu(obj->resv, write, |
Chris Wilson | f755e22 | 2016-08-29 08:08:26 +0100 | [diff] [blame] | 788 | true, remain); |
| 789 | if (ret == 0) |
| 790 | return remain == 0 ? -EBUSY : -ETIMEDOUT; |
| 791 | else if (ret < 0) |
| 792 | return ret; |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 793 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 794 | /* TODO cache maintenance */ |
| 795 | |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 796 | return 0; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 797 | } |
| 798 | |
| 799 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
| 800 | { |
| 801 | /* TODO cache maintenance */ |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 802 | return 0; |
| 803 | } |
| 804 | |
| 805 | #ifdef CONFIG_DEBUG_FS |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 806 | static void describe_fence(struct dma_fence *fence, const char *type, |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 807 | struct seq_file *m) |
| 808 | { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 809 | if (!dma_fence_is_signaled(fence)) |
Dave Airlie | a311562 | 2019-01-10 06:20:15 +1000 | [diff] [blame] | 810 | seq_printf(m, "\t%9s: %s %s seq %llu\n", type, |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 811 | fence->ops->get_driver_name(fence), |
| 812 | fence->ops->get_timeline_name(fence), |
| 813 | fence->seqno); |
| 814 | } |
| 815 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 816 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
| 817 | { |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 818 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 819 | struct dma_resv *robj = obj->resv; |
| 820 | struct dma_resv_list *fobj; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 821 | struct dma_fence *fence; |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 822 | struct msm_gem_vma *vma; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 823 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 824 | const char *madv; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 825 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 826 | mutex_lock(&msm_obj->lock); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 827 | |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 828 | switch (msm_obj->madv) { |
| 829 | case __MSM_MADV_PURGED: |
| 830 | madv = " purged"; |
| 831 | break; |
| 832 | case MSM_MADV_DONTNEED: |
| 833 | madv = " purgeable"; |
| 834 | break; |
| 835 | case MSM_MADV_WILLNEED: |
| 836 | default: |
| 837 | madv = ""; |
| 838 | break; |
| 839 | } |
| 840 | |
Jordan Crouse | 575f048 | 2018-11-07 15:35:49 -0700 | [diff] [blame] | 841 | seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 842 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
Peter Zijlstra | 2c935bc | 2016-11-14 17:29:48 +0100 | [diff] [blame] | 843 | obj->name, kref_read(&obj->refcount), |
Rob Clark | 667ce33 | 2016-09-28 19:58:32 -0400 | [diff] [blame] | 844 | off, msm_obj->vaddr); |
| 845 | |
Jordan Crouse | 0815d77 | 2018-11-07 15:35:52 -0700 | [diff] [blame] | 846 | seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); |
Rob Clark | 667ce33 | 2016-09-28 19:58:32 -0400 | [diff] [blame] | 847 | |
Jordan Crouse | 575f048 | 2018-11-07 15:35:49 -0700 | [diff] [blame] | 848 | if (!list_empty(&msm_obj->vmas)) { |
| 849 | |
Jordan Crouse | 7ad0e8c | 2018-11-07 15:35:51 -0700 | [diff] [blame] | 850 | seq_puts(m, " vmas:"); |
Jordan Crouse | 575f048 | 2018-11-07 15:35:49 -0700 | [diff] [blame] | 851 | |
| 852 | list_for_each_entry(vma, &msm_obj->vmas, list) |
Brian Masney | 90f9466 | 2019-05-13 19:41:05 -0400 | [diff] [blame] | 853 | seq_printf(m, " [%s: %08llx,%s,inuse=%d]", |
| 854 | vma->aspace != NULL ? vma->aspace->name : NULL, |
Jordan Crouse | 7ad0e8c | 2018-11-07 15:35:51 -0700 | [diff] [blame] | 855 | vma->iova, vma->mapped ? "mapped" : "unmapped", |
| 856 | vma->inuse); |
Jordan Crouse | 575f048 | 2018-11-07 15:35:49 -0700 | [diff] [blame] | 857 | |
| 858 | seq_puts(m, "\n"); |
| 859 | } |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 860 | |
| 861 | rcu_read_lock(); |
| 862 | fobj = rcu_dereference(robj->fence); |
| 863 | if (fobj) { |
| 864 | unsigned int i, shared_count = fobj->shared_count; |
| 865 | |
| 866 | for (i = 0; i < shared_count; i++) { |
| 867 | fence = rcu_dereference(fobj->shared[i]); |
| 868 | describe_fence(fence, "Shared", m); |
| 869 | } |
| 870 | } |
| 871 | |
| 872 | fence = rcu_dereference(robj->fence_excl); |
| 873 | if (fence) |
| 874 | describe_fence(fence, "Exclusive", m); |
| 875 | rcu_read_unlock(); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 876 | |
| 877 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 878 | } |
| 879 | |
| 880 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) |
| 881 | { |
| 882 | struct msm_gem_object *msm_obj; |
| 883 | int count = 0; |
| 884 | size_t size = 0; |
| 885 | |
Jordan Crouse | 0815d77 | 2018-11-07 15:35:52 -0700 | [diff] [blame] | 886 | seq_puts(m, " flags id ref offset kaddr size madv name\n"); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 887 | list_for_each_entry(msm_obj, list, mm_list) { |
| 888 | struct drm_gem_object *obj = &msm_obj->base; |
Jordan Crouse | 575f048 | 2018-11-07 15:35:49 -0700 | [diff] [blame] | 889 | seq_puts(m, " "); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 890 | msm_gem_describe(obj, m); |
| 891 | count++; |
| 892 | size += obj->size; |
| 893 | } |
| 894 | |
| 895 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); |
| 896 | } |
| 897 | #endif |
| 898 | |
Rob Clark | d71b6bd | 2018-02-14 11:14:23 -0500 | [diff] [blame] | 899 | /* don't call directly! Use drm_gem_object_put() and friends */ |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 900 | void msm_gem_free_object(struct drm_gem_object *obj) |
| 901 | { |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 902 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
Kristian H. Kristensen | 48e7f18 | 2019-03-20 10:09:08 -0700 | [diff] [blame] | 903 | struct drm_device *dev = obj->dev; |
| 904 | struct msm_drm_private *priv = dev->dev_private; |
| 905 | |
| 906 | if (llist_add(&msm_obj->freed, &priv->free_list)) |
| 907 | queue_work(priv->wq, &priv->free_work); |
| 908 | } |
| 909 | |
| 910 | static void free_object(struct msm_gem_object *msm_obj) |
| 911 | { |
| 912 | struct drm_gem_object *obj = &msm_obj->base; |
| 913 | struct drm_device *dev = obj->dev; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 914 | |
| 915 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 916 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 917 | /* object should not be on active list: */ |
| 918 | WARN_ON(is_active(msm_obj)); |
| 919 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 920 | list_del(&msm_obj->mm_list); |
| 921 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 922 | mutex_lock(&msm_obj->lock); |
| 923 | |
Rob Clark | 4fe5f65 | 2016-06-01 11:38:28 -0400 | [diff] [blame] | 924 | put_iova(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 925 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 926 | if (obj->import_attach) { |
| 927 | if (msm_obj->vaddr) |
| 928 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 929 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 930 | /* Don't drop the pages for imported dmabuf, as they are not |
| 931 | * ours, just free the array we allocated: |
| 932 | */ |
| 933 | if (msm_obj->pages) |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 934 | kvfree(msm_obj->pages); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 935 | |
jilai wang | f28730c | 2015-04-07 13:51:32 -0400 | [diff] [blame] | 936 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 937 | } else { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 938 | msm_gem_vunmap_locked(obj); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 939 | put_pages(obj); |
| 940 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 941 | |
| 942 | drm_gem_object_release(obj); |
| 943 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 944 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 945 | kfree(msm_obj); |
| 946 | } |
| 947 | |
Kristian H. Kristensen | 48e7f18 | 2019-03-20 10:09:08 -0700 | [diff] [blame] | 948 | void msm_gem_free_work(struct work_struct *work) |
| 949 | { |
| 950 | struct msm_drm_private *priv = |
| 951 | container_of(work, struct msm_drm_private, free_work); |
| 952 | struct drm_device *dev = priv->dev; |
| 953 | struct llist_node *freed; |
| 954 | struct msm_gem_object *msm_obj, *next; |
| 955 | |
| 956 | while ((freed = llist_del_all(&priv->free_list))) { |
| 957 | |
| 958 | mutex_lock(&dev->struct_mutex); |
| 959 | |
| 960 | llist_for_each_entry_safe(msm_obj, next, |
| 961 | freed, freed) |
| 962 | free_object(msm_obj); |
| 963 | |
| 964 | mutex_unlock(&dev->struct_mutex); |
| 965 | |
| 966 | if (need_resched()) |
| 967 | break; |
| 968 | } |
| 969 | } |
| 970 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 971 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
| 972 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
Jordan Crouse | 0815d77 | 2018-11-07 15:35:52 -0700 | [diff] [blame] | 973 | uint32_t size, uint32_t flags, uint32_t *handle, |
| 974 | char *name) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 975 | { |
| 976 | struct drm_gem_object *obj; |
| 977 | int ret; |
| 978 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 979 | obj = msm_gem_new(dev, size, flags); |
| 980 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 981 | if (IS_ERR(obj)) |
| 982 | return PTR_ERR(obj); |
| 983 | |
Jordan Crouse | 0815d77 | 2018-11-07 15:35:52 -0700 | [diff] [blame] | 984 | if (name) |
| 985 | msm_gem_object_set_name(obj, "%s", name); |
| 986 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 987 | ret = drm_gem_handle_create(file, obj, handle); |
| 988 | |
| 989 | /* drop reference from allocate - handle holds it now */ |
Steve Kowalik | dc9a9b3 | 2018-01-26 14:55:54 +1100 | [diff] [blame] | 990 | drm_gem_object_put_unlocked(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 991 | |
| 992 | return ret; |
| 993 | } |
| 994 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 995 | static int msm_gem_new_impl(struct drm_device *dev, |
| 996 | uint32_t size, uint32_t flags, |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 997 | struct drm_gem_object **obj, |
| 998 | bool struct_mutex_locked) |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 999 | { |
| 1000 | struct msm_drm_private *priv = dev->dev_private; |
| 1001 | struct msm_gem_object *msm_obj; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1002 | |
| 1003 | switch (flags & MSM_BO_CACHE_MASK) { |
| 1004 | case MSM_BO_UNCACHED: |
| 1005 | case MSM_BO_CACHED: |
| 1006 | case MSM_BO_WC: |
| 1007 | break; |
| 1008 | default: |
Mamta Shukla | 6a41da1 | 2018-10-20 23:19:26 +0530 | [diff] [blame] | 1009 | DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1010 | (flags & MSM_BO_CACHE_MASK)); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1011 | return -EINVAL; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1012 | } |
| 1013 | |
Rob Clark | 667ce33 | 2016-09-28 19:58:32 -0400 | [diff] [blame] | 1014 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1015 | if (!msm_obj) |
| 1016 | return -ENOMEM; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1017 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1018 | mutex_init(&msm_obj->lock); |
| 1019 | |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1020 | msm_obj->flags = flags; |
Rob Clark | 4cd33c4 | 2016-05-17 15:44:49 -0400 | [diff] [blame] | 1021 | msm_obj->madv = MSM_MADV_WILLNEED; |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1022 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 1023 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 1024 | INIT_LIST_HEAD(&msm_obj->vmas); |
| 1025 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1026 | if (struct_mutex_locked) { |
| 1027 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 1028 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
| 1029 | } else { |
| 1030 | mutex_lock(&dev->struct_mutex); |
| 1031 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
| 1032 | mutex_unlock(&dev->struct_mutex); |
| 1033 | } |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1034 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1035 | *obj = &msm_obj->base; |
| 1036 | |
| 1037 | return 0; |
| 1038 | } |
| 1039 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1040 | static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, |
| 1041 | uint32_t size, uint32_t flags, bool struct_mutex_locked) |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1042 | { |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1043 | struct msm_drm_private *priv = dev->dev_private; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 1044 | struct drm_gem_object *obj = NULL; |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1045 | bool use_vram = false; |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1046 | int ret; |
| 1047 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1048 | size = PAGE_ALIGN(size); |
| 1049 | |
Jonathan Marek | c2052a4 | 2018-11-14 17:08:04 -0500 | [diff] [blame] | 1050 | if (!msm_use_mmu(dev)) |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1051 | use_vram = true; |
Jonathan Marek | 86f46f2 | 2018-11-21 20:52:30 -0500 | [diff] [blame] | 1052 | else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1053 | use_vram = true; |
| 1054 | |
| 1055 | if (WARN_ON(use_vram && !priv->vram.size)) |
| 1056 | return ERR_PTR(-EINVAL); |
| 1057 | |
Jordan Crouse | 1a5dff5 | 2017-03-07 10:02:51 -0700 | [diff] [blame] | 1058 | /* Disallow zero sized objects as they make the underlying |
| 1059 | * infrastructure grumpy |
| 1060 | */ |
| 1061 | if (size == 0) |
| 1062 | return ERR_PTR(-EINVAL); |
| 1063 | |
Daniel Vetter | 5ebeb02 | 2019-06-14 22:36:01 +0200 | [diff] [blame] | 1064 | ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1065 | if (ret) |
| 1066 | goto fail; |
| 1067 | |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1068 | if (use_vram) { |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 1069 | struct msm_gem_vma *vma; |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1070 | struct page **pages; |
Hans Verkuil | b3949a9 | 2017-07-30 14:42:36 +0200 | [diff] [blame] | 1071 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 1072 | |
| 1073 | mutex_lock(&msm_obj->lock); |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1074 | |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 1075 | vma = add_vma(obj, NULL); |
Hans Verkuil | b3949a9 | 2017-07-30 14:42:36 +0200 | [diff] [blame] | 1076 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 1077 | if (IS_ERR(vma)) { |
| 1078 | ret = PTR_ERR(vma); |
| 1079 | goto fail; |
| 1080 | } |
| 1081 | |
| 1082 | to_msm_bo(obj)->vram_node = &vma->node; |
| 1083 | |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1084 | drm_gem_private_object_init(dev, obj, size); |
| 1085 | |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1086 | pages = get_pages(obj); |
| 1087 | if (IS_ERR(pages)) { |
| 1088 | ret = PTR_ERR(pages); |
| 1089 | goto fail; |
| 1090 | } |
Rob Clark | 4b85f7f | 2017-06-13 13:54:13 -0400 | [diff] [blame] | 1091 | |
| 1092 | vma->iova = physaddr(obj); |
Rob Clark | f4839bd | 2017-06-13 11:50:05 -0400 | [diff] [blame] | 1093 | } else { |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 1094 | ret = drm_gem_object_init(dev, obj, size); |
| 1095 | if (ret) |
| 1096 | goto fail; |
Lucas Stach | 0abdba4 | 2019-02-28 07:23:29 +0100 | [diff] [blame] | 1097 | /* |
| 1098 | * Our buffers are kept pinned, so allocating them from the |
| 1099 | * MOVABLE zone is a really bad idea, and conflicts with CMA. |
| 1100 | * See comments above new_inode() why this is required _and_ |
| 1101 | * expected if you're going to pin these pages. |
| 1102 | */ |
| 1103 | mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 1104 | } |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1105 | |
| 1106 | return obj; |
| 1107 | |
| 1108 | fail: |
Steve Kowalik | dc9a9b3 | 2018-01-26 14:55:54 +1100 | [diff] [blame] | 1109 | drm_gem_object_put_unlocked(obj); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1110 | return ERR_PTR(ret); |
| 1111 | } |
| 1112 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1113 | struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, |
| 1114 | uint32_t size, uint32_t flags) |
| 1115 | { |
| 1116 | return _msm_gem_new(dev, size, flags, true); |
| 1117 | } |
| 1118 | |
| 1119 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
| 1120 | uint32_t size, uint32_t flags) |
| 1121 | { |
| 1122 | return _msm_gem_new(dev, size, flags, false); |
| 1123 | } |
| 1124 | |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1125 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 1126 | struct dma_buf *dmabuf, struct sg_table *sgt) |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1127 | { |
| 1128 | struct msm_gem_object *msm_obj; |
| 1129 | struct drm_gem_object *obj; |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 1130 | uint32_t size; |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1131 | int ret, npages; |
| 1132 | |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 1133 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
Jonathan Marek | c2052a4 | 2018-11-14 17:08:04 -0500 | [diff] [blame] | 1134 | if (!msm_use_mmu(dev)) { |
Mamta Shukla | 6a41da1 | 2018-10-20 23:19:26 +0530 | [diff] [blame] | 1135 | DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 1136 | return ERR_PTR(-EINVAL); |
| 1137 | } |
| 1138 | |
Rob Clark | 79f0e20 | 2016-03-16 12:40:35 -0400 | [diff] [blame] | 1139 | size = PAGE_ALIGN(dmabuf->size); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1140 | |
Daniel Vetter | 5ebeb02 | 2019-06-14 22:36:01 +0200 | [diff] [blame] | 1141 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1142 | if (ret) |
| 1143 | goto fail; |
| 1144 | |
| 1145 | drm_gem_private_object_init(dev, obj, size); |
| 1146 | |
| 1147 | npages = size / PAGE_SIZE; |
| 1148 | |
| 1149 | msm_obj = to_msm_bo(obj); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1150 | mutex_lock(&msm_obj->lock); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1151 | msm_obj->sgt = sgt; |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 1152 | msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1153 | if (!msm_obj->pages) { |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1154 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1155 | ret = -ENOMEM; |
| 1156 | goto fail; |
| 1157 | } |
| 1158 | |
| 1159 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1160 | if (ret) { |
| 1161 | mutex_unlock(&msm_obj->lock); |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1162 | goto fail; |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1163 | } |
Rob Clark | 05b8491 | 2013-09-28 11:28:35 -0400 | [diff] [blame] | 1164 | |
Sushmita Susheelendra | 0e08270 | 2017-06-13 16:52:54 -0600 | [diff] [blame] | 1165 | mutex_unlock(&msm_obj->lock); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1166 | return obj; |
| 1167 | |
| 1168 | fail: |
Steve Kowalik | dc9a9b3 | 2018-01-26 14:55:54 +1100 | [diff] [blame] | 1169 | drm_gem_object_put_unlocked(obj); |
Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1170 | return ERR_PTR(ret); |
| 1171 | } |
Jordan Crouse | 8223286 | 2017-07-27 10:42:40 -0600 | [diff] [blame] | 1172 | |
| 1173 | static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, |
| 1174 | uint32_t flags, struct msm_gem_address_space *aspace, |
| 1175 | struct drm_gem_object **bo, uint64_t *iova, bool locked) |
| 1176 | { |
| 1177 | void *vaddr; |
| 1178 | struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); |
| 1179 | int ret; |
| 1180 | |
| 1181 | if (IS_ERR(obj)) |
| 1182 | return ERR_CAST(obj); |
| 1183 | |
| 1184 | if (iova) { |
Jordan Crouse | 9fe041f | 2018-11-07 15:35:50 -0700 | [diff] [blame] | 1185 | ret = msm_gem_get_and_pin_iova(obj, aspace, iova); |
Jordan Crouse | 93f7abf | 2018-11-02 09:25:19 -0600 | [diff] [blame] | 1186 | if (ret) |
| 1187 | goto err; |
Jordan Crouse | 8223286 | 2017-07-27 10:42:40 -0600 | [diff] [blame] | 1188 | } |
| 1189 | |
| 1190 | vaddr = msm_gem_get_vaddr(obj); |
Wei Yongjun | c9811d0 | 2017-10-11 11:36:56 +0000 | [diff] [blame] | 1191 | if (IS_ERR(vaddr)) { |
Jordan Crouse | 7ad0e8c | 2018-11-07 15:35:51 -0700 | [diff] [blame] | 1192 | msm_gem_unpin_iova(obj, aspace); |
Jordan Crouse | 93f7abf | 2018-11-02 09:25:19 -0600 | [diff] [blame] | 1193 | ret = PTR_ERR(vaddr); |
| 1194 | goto err; |
Jordan Crouse | 8223286 | 2017-07-27 10:42:40 -0600 | [diff] [blame] | 1195 | } |
| 1196 | |
| 1197 | if (bo) |
| 1198 | *bo = obj; |
| 1199 | |
| 1200 | return vaddr; |
Jordan Crouse | 93f7abf | 2018-11-02 09:25:19 -0600 | [diff] [blame] | 1201 | err: |
| 1202 | if (locked) |
| 1203 | drm_gem_object_put(obj); |
| 1204 | else |
| 1205 | drm_gem_object_put_unlocked(obj); |
| 1206 | |
| 1207 | return ERR_PTR(ret); |
| 1208 | |
Jordan Crouse | 8223286 | 2017-07-27 10:42:40 -0600 | [diff] [blame] | 1209 | } |
| 1210 | |
| 1211 | void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, |
| 1212 | uint32_t flags, struct msm_gem_address_space *aspace, |
| 1213 | struct drm_gem_object **bo, uint64_t *iova) |
| 1214 | { |
| 1215 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); |
| 1216 | } |
| 1217 | |
| 1218 | void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, |
| 1219 | uint32_t flags, struct msm_gem_address_space *aspace, |
| 1220 | struct drm_gem_object **bo, uint64_t *iova) |
| 1221 | { |
| 1222 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); |
| 1223 | } |
Jordan Crouse | 1e29dff | 2018-11-07 15:35:46 -0700 | [diff] [blame] | 1224 | |
| 1225 | void msm_gem_kernel_put(struct drm_gem_object *bo, |
| 1226 | struct msm_gem_address_space *aspace, bool locked) |
| 1227 | { |
| 1228 | if (IS_ERR_OR_NULL(bo)) |
| 1229 | return; |
| 1230 | |
| 1231 | msm_gem_put_vaddr(bo); |
Jordan Crouse | 7ad0e8c | 2018-11-07 15:35:51 -0700 | [diff] [blame] | 1232 | msm_gem_unpin_iova(bo, aspace); |
Jordan Crouse | 1e29dff | 2018-11-07 15:35:46 -0700 | [diff] [blame] | 1233 | |
| 1234 | if (locked) |
| 1235 | drm_gem_object_put(bo); |
| 1236 | else |
| 1237 | drm_gem_object_put_unlocked(bo); |
| 1238 | } |
Jordan Crouse | 0815d77 | 2018-11-07 15:35:52 -0700 | [diff] [blame] | 1239 | |
| 1240 | void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) |
| 1241 | { |
| 1242 | struct msm_gem_object *msm_obj = to_msm_bo(bo); |
| 1243 | va_list ap; |
| 1244 | |
| 1245 | if (!fmt) |
| 1246 | return; |
| 1247 | |
| 1248 | va_start(ap, fmt); |
| 1249 | vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); |
| 1250 | va_end(ap); |
| 1251 | } |