blob: 4565319fa6b3e908e151847a539906278029a80a [file] [log] [blame]
Dirk Hohndel (VMware)3e2b88cb2018-05-07 01:16:19 +02001// SPDX-License-Identifier: GPL-2.0 OR MIT
David Herrmannfe3078f2013-07-24 21:06:15 +02002/*
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * Copyright (c) 2012 David Airlie <airlied@linux.ie>
5 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
David Herrmannfe3078f2013-07-24 21:06:15 +020026#include <linux/mm.h>
27#include <linux/module.h>
28#include <linux/rbtree.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/types.h>
32
Sam Ravnborg0500c042019-05-26 19:35:35 +020033#include <drm/drm_mm.h>
34#include <drm/drm_vma_manager.h>
35
David Herrmannfe3078f2013-07-24 21:06:15 +020036/**
37 * DOC: vma offset manager
38 *
39 * The vma-manager is responsible to map arbitrary driver-dependent memory
40 * regions into the linear user address-space. It provides offsets to the
41 * caller which can then be used on the address_space of the drm-device. It
42 * takes care to not overlap regions, size them appropriately and to not
43 * confuse mm-core by inconsistent fake vm_pgoff fields.
44 * Drivers shouldn't use this for object placement in VMEM. This manager should
45 * only be used to manage mappings into linear user-space VMs.
46 *
47 * We use drm_mm as backend to manage object allocations. But it is highly
48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49 * speed up offset lookups.
50 *
51 * You must not use multiple offset managers on a single address_space.
52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
Kirill A. Shutemov0661a332015-02-10 14:10:04 -080053 * no longer be linear.
David Herrmannfe3078f2013-07-24 21:06:15 +020054 *
55 * This offset manager works on page-based addresses. That is, every argument
56 * and return code (with the exception of drm_vma_node_offset_addr()) is given
57 * in number of pages, not number of bytes. That means, object sizes and offsets
58 * must always be page-aligned (as usual).
59 * If you want to get a valid byte-based user-space address for a given offset,
60 * please see drm_vma_node_offset_addr().
David Herrmann88d7ebe2013-08-25 18:28:57 +020061 *
62 * Additionally to offset management, the vma offset manager also handles access
63 * management. For every open-file context that is allowed to access a given
64 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
65 * open-file with the offset of the node will fail with -EACCES. To revoke
66 * access again, use drm_vma_node_revoke(). However, the caller is responsible
67 * for destroying already existing mappings, if required.
David Herrmannfe3078f2013-07-24 21:06:15 +020068 */
69
70/**
71 * drm_vma_offset_manager_init - Initialize new offset-manager
72 * @mgr: Manager object
73 * @page_offset: Offset of available memory area (page-based)
74 * @size: Size of available address space range (page-based)
75 *
76 * Initialize a new offset-manager. The offset and area size available for the
77 * manager are given as @page_offset and @size. Both are interpreted as
78 * page-numbers, not bytes.
79 *
80 * Adding/removing nodes from the manager is locked internally and protected
81 * against concurrent access. However, node allocation and destruction is left
82 * for the caller. While calling into the vma-manager, a given node must
83 * always be guaranteed to be referenced.
84 */
85void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
86 unsigned long page_offset, unsigned long size)
87{
88 rwlock_init(&mgr->vm_lock);
David Herrmannfe3078f2013-07-24 21:06:15 +020089 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
90}
91EXPORT_SYMBOL(drm_vma_offset_manager_init);
92
93/**
94 * drm_vma_offset_manager_destroy() - Destroy offset manager
95 * @mgr: Manager object
96 *
97 * Destroy an object manager which was previously created via
98 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
99 * before destroying the manager. Otherwise, drm_mm will refuse to free the
100 * requested resources.
101 *
102 * The manager must not be accessed after this function is called.
103 */
104void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
105{
David Herrmannfe3078f2013-07-24 21:06:15 +0200106 drm_mm_takedown(&mgr->vm_addr_space_mm);
David Herrmannfe3078f2013-07-24 21:06:15 +0200107}
108EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
109
110/**
Daniel Vetter2225cfe2015-10-15 11:33:43 +0200111 * drm_vma_offset_lookup_locked() - Find node in offset space
David Herrmannfe3078f2013-07-24 21:06:15 +0200112 * @mgr: Manager object
113 * @start: Start address for object (page-based)
114 * @pages: Size of object (page-based)
115 *
116 * Find a node given a start address and object size. This returns the _best_
117 * match for the given node. That is, @start may point somewhere into a valid
118 * region and the given node will be returned, as long as the node spans the
119 * whole requested area (given the size in number of pages as @pages).
120 *
Daniel Vetter2225cfe2015-10-15 11:33:43 +0200121 * Note that before lookup the vma offset manager lookup lock must be acquired
122 * with drm_vma_offset_lock_lookup(). See there for an example. This can then be
123 * used to implement weakly referenced lookups using kref_get_unless_zero().
124 *
125 * Example:
Daniel Vetterda5335b2016-05-31 22:55:13 +0200126 *
127 * ::
128 *
Daniel Vetter2225cfe2015-10-15 11:33:43 +0200129 * drm_vma_offset_lock_lookup(mgr);
130 * node = drm_vma_offset_lookup_locked(mgr);
131 * if (node)
132 * kref_get_unless_zero(container_of(node, sth, entr));
133 * drm_vma_offset_unlock_lookup(mgr);
134 *
David Herrmannfe3078f2013-07-24 21:06:15 +0200135 * RETURNS:
136 * Returns NULL if no suitable node can be found. Otherwise, the best match
137 * is returned. It's the caller's responsibility to make sure the node doesn't
138 * get destroyed before the caller can access it.
139 */
David Herrmannfe3078f2013-07-24 21:06:15 +0200140struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
141 unsigned long start,
142 unsigned long pages)
143{
Chris Wilsondb2395e2016-08-03 16:04:10 +0100144 struct drm_mm_node *node, *best;
David Herrmannfe3078f2013-07-24 21:06:15 +0200145 struct rb_node *iter;
146 unsigned long offset;
147
Davidlohr Buesof808c132017-09-08 16:15:08 -0700148 iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
David Herrmannfe3078f2013-07-24 21:06:15 +0200149 best = NULL;
150
151 while (likely(iter)) {
Chris Wilsondb2395e2016-08-03 16:04:10 +0100152 node = rb_entry(iter, struct drm_mm_node, rb);
153 offset = node->start;
David Herrmannfe3078f2013-07-24 21:06:15 +0200154 if (start >= offset) {
155 iter = iter->rb_right;
156 best = node;
157 if (start == offset)
158 break;
159 } else {
160 iter = iter->rb_left;
161 }
162 }
163
164 /* verify that the node spans the requested area */
165 if (best) {
Chris Wilsondb2395e2016-08-03 16:04:10 +0100166 offset = best->start + best->size;
David Herrmannfe3078f2013-07-24 21:06:15 +0200167 if (offset < start + pages)
168 best = NULL;
169 }
170
Chris Wilsondb2395e2016-08-03 16:04:10 +0100171 if (!best)
172 return NULL;
173
174 return container_of(best, struct drm_vma_offset_node, vm_node);
David Herrmannfe3078f2013-07-24 21:06:15 +0200175}
176EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
177
David Herrmannfe3078f2013-07-24 21:06:15 +0200178/**
179 * drm_vma_offset_add() - Add offset node to manager
180 * @mgr: Manager object
181 * @node: Node to be added
182 * @pages: Allocation size visible to user-space (in number of pages)
183 *
184 * Add a node to the offset-manager. If the node was already added, this does
185 * nothing and return 0. @pages is the size of the object given in number of
186 * pages.
187 * After this call succeeds, you can access the offset of the node until it
188 * is removed again.
189 *
190 * If this call fails, it is safe to retry the operation or call
191 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
192 * case.
193 *
194 * @pages is not required to be the same size as the underlying memory object
195 * that you want to map. It only limits the size that user-space can map into
196 * their address space.
197 *
198 * RETURNS:
199 * 0 on success, negative error code on failure.
200 */
201int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
202 struct drm_vma_offset_node *node, unsigned long pages)
203{
Liviu Dudau7fb50b92017-11-01 14:44:58 +0000204 int ret = 0;
David Herrmannfe3078f2013-07-24 21:06:15 +0200205
206 write_lock(&mgr->vm_lock);
207
Liviu Dudau7fb50b92017-11-01 14:44:58 +0000208 if (!drm_mm_node_allocated(&node->vm_node))
209 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
210 &node->vm_node, pages);
David Herrmannfe3078f2013-07-24 21:06:15 +0200211
David Herrmannfe3078f2013-07-24 21:06:15 +0200212 write_unlock(&mgr->vm_lock);
Liviu Dudau7fb50b92017-11-01 14:44:58 +0000213
David Herrmannfe3078f2013-07-24 21:06:15 +0200214 return ret;
215}
216EXPORT_SYMBOL(drm_vma_offset_add);
217
218/**
219 * drm_vma_offset_remove() - Remove offset node from manager
220 * @mgr: Manager object
221 * @node: Node to be removed
222 *
223 * Remove a node from the offset manager. If the node wasn't added before, this
224 * does nothing. After this call returns, the offset and size will be 0 until a
225 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
226 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
227 * offset is allocated.
228 */
229void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
230 struct drm_vma_offset_node *node)
231{
232 write_lock(&mgr->vm_lock);
233
234 if (drm_mm_node_allocated(&node->vm_node)) {
David Herrmannfe3078f2013-07-24 21:06:15 +0200235 drm_mm_remove_node(&node->vm_node);
236 memset(&node->vm_node, 0, sizeof(node->vm_node));
237 }
238
239 write_unlock(&mgr->vm_lock);
240}
241EXPORT_SYMBOL(drm_vma_offset_remove);
David Herrmann88d7ebe2013-08-25 18:28:57 +0200242
243/**
244 * drm_vma_node_allow - Add open-file to list of allowed users
245 * @node: Node to modify
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200246 * @tag: Tag of file to remove
David Herrmann88d7ebe2013-08-25 18:28:57 +0200247 *
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200248 * Add @tag to the list of allowed open-files for this node. If @tag is
David Herrmann88d7ebe2013-08-25 18:28:57 +0200249 * already on this list, the ref-count is incremented.
250 *
251 * The list of allowed-users is preserved across drm_vma_offset_add() and
252 * drm_vma_offset_remove() calls. You may even call it if the node is currently
253 * not added to any offset-manager.
254 *
255 * You must remove all open-files the same number of times as you added them
256 * before destroying the node. Otherwise, you will leak memory.
257 *
258 * This is locked against concurrent access internally.
259 *
260 * RETURNS:
261 * 0 on success, negative error code on internal failure (out-of-mem)
262 */
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200263int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
David Herrmann88d7ebe2013-08-25 18:28:57 +0200264{
265 struct rb_node **iter;
266 struct rb_node *parent = NULL;
267 struct drm_vma_offset_file *new, *entry;
268 int ret = 0;
269
270 /* Preallocate entry to avoid atomic allocations below. It is quite
271 * unlikely that an open-file is added twice to a single node so we
272 * don't optimize for this case. OOM is checked below only if the entry
273 * is actually used. */
274 new = kmalloc(sizeof(*entry), GFP_KERNEL);
275
276 write_lock(&node->vm_lock);
277
278 iter = &node->vm_files.rb_node;
279
280 while (likely(*iter)) {
281 parent = *iter;
282 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
283
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200284 if (tag == entry->vm_tag) {
David Herrmann88d7ebe2013-08-25 18:28:57 +0200285 entry->vm_count++;
286 goto unlock;
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200287 } else if (tag > entry->vm_tag) {
David Herrmann88d7ebe2013-08-25 18:28:57 +0200288 iter = &(*iter)->rb_right;
289 } else {
290 iter = &(*iter)->rb_left;
291 }
292 }
293
294 if (!new) {
295 ret = -ENOMEM;
296 goto unlock;
297 }
298
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200299 new->vm_tag = tag;
David Herrmann88d7ebe2013-08-25 18:28:57 +0200300 new->vm_count = 1;
301 rb_link_node(&new->vm_rb, parent, iter);
302 rb_insert_color(&new->vm_rb, &node->vm_files);
303 new = NULL;
304
305unlock:
306 write_unlock(&node->vm_lock);
307 kfree(new);
308 return ret;
309}
310EXPORT_SYMBOL(drm_vma_node_allow);
311
312/**
313 * drm_vma_node_revoke - Remove open-file from list of allowed users
314 * @node: Node to modify
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200315 * @tag: Tag of file to remove
David Herrmann88d7ebe2013-08-25 18:28:57 +0200316 *
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200317 * Decrement the ref-count of @tag in the list of allowed open-files on @node.
318 * If the ref-count drops to zero, remove @tag from the list. You must call
319 * this once for every drm_vma_node_allow() on @tag.
David Herrmann88d7ebe2013-08-25 18:28:57 +0200320 *
321 * This is locked against concurrent access internally.
322 *
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200323 * If @tag is not on the list, nothing is done.
David Herrmann88d7ebe2013-08-25 18:28:57 +0200324 */
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200325void drm_vma_node_revoke(struct drm_vma_offset_node *node,
326 struct drm_file *tag)
David Herrmann88d7ebe2013-08-25 18:28:57 +0200327{
328 struct drm_vma_offset_file *entry;
329 struct rb_node *iter;
330
331 write_lock(&node->vm_lock);
332
333 iter = node->vm_files.rb_node;
334 while (likely(iter)) {
335 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200336 if (tag == entry->vm_tag) {
David Herrmann88d7ebe2013-08-25 18:28:57 +0200337 if (!--entry->vm_count) {
338 rb_erase(&entry->vm_rb, &node->vm_files);
339 kfree(entry);
340 }
341 break;
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200342 } else if (tag > entry->vm_tag) {
David Herrmann88d7ebe2013-08-25 18:28:57 +0200343 iter = iter->rb_right;
344 } else {
345 iter = iter->rb_left;
346 }
347 }
348
349 write_unlock(&node->vm_lock);
350}
351EXPORT_SYMBOL(drm_vma_node_revoke);
352
353/**
354 * drm_vma_node_is_allowed - Check whether an open-file is granted access
355 * @node: Node to check
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200356 * @tag: Tag of file to remove
David Herrmann88d7ebe2013-08-25 18:28:57 +0200357 *
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200358 * Search the list in @node whether @tag is currently on the list of allowed
David Herrmann88d7ebe2013-08-25 18:28:57 +0200359 * open-files (see drm_vma_node_allow()).
360 *
361 * This is locked against concurrent access internally.
362 *
363 * RETURNS:
364 * true iff @filp is on the list
365 */
366bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200367 struct drm_file *tag)
David Herrmann88d7ebe2013-08-25 18:28:57 +0200368{
369 struct drm_vma_offset_file *entry;
370 struct rb_node *iter;
371
372 read_lock(&node->vm_lock);
373
374 iter = node->vm_files.rb_node;
375 while (likely(iter)) {
376 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200377 if (tag == entry->vm_tag)
David Herrmann88d7ebe2013-08-25 18:28:57 +0200378 break;
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200379 else if (tag > entry->vm_tag)
David Herrmann88d7ebe2013-08-25 18:28:57 +0200380 iter = iter->rb_right;
381 else
382 iter = iter->rb_left;
383 }
384
385 read_unlock(&node->vm_lock);
386
387 return iter;
388}
389EXPORT_SYMBOL(drm_vma_node_is_allowed);