Noralf Trønnes | 2194a63 | 2019-03-12 19:43:44 -0500 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #ifndef __DRM_GEM_SHMEM_HELPER_H__ |
| 4 | #define __DRM_GEM_SHMEM_HELPER_H__ |
| 5 | |
| 6 | #include <linux/fs.h> |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/mutex.h> |
| 9 | |
| 10 | #include <drm/drm_file.h> |
| 11 | #include <drm/drm_gem.h> |
| 12 | #include <drm/drm_ioctl.h> |
| 13 | #include <drm/drm_prime.h> |
| 14 | |
| 15 | struct dma_buf_attachment; |
| 16 | struct drm_mode_create_dumb; |
| 17 | struct drm_printer; |
| 18 | struct sg_table; |
| 19 | |
| 20 | /** |
| 21 | * struct drm_gem_shmem_object - GEM object backed by shmem |
| 22 | */ |
| 23 | struct drm_gem_shmem_object { |
| 24 | /** |
| 25 | * @base: Base GEM object |
| 26 | */ |
| 27 | struct drm_gem_object base; |
| 28 | |
| 29 | /** |
| 30 | * @pages_lock: Protects the page table and use count |
| 31 | */ |
| 32 | struct mutex pages_lock; |
| 33 | |
| 34 | /** |
| 35 | * @pages: Page table |
| 36 | */ |
| 37 | struct page **pages; |
| 38 | |
| 39 | /** |
| 40 | * @pages_use_count: |
| 41 | * |
| 42 | * Reference count on the pages table. |
| 43 | * The pages are put when the count reaches zero. |
| 44 | */ |
| 45 | unsigned int pages_use_count; |
| 46 | |
Rob Herring | 105401b | 2019-11-01 10:37:54 -0500 | [diff] [blame] | 47 | /** |
| 48 | * @madv: State for madvise |
| 49 | * |
| 50 | * 0 is active/inuse. |
| 51 | * A negative value is the object is purged. |
| 52 | * Positive values are driver specific and not used by the helpers. |
| 53 | */ |
Rob Herring | 17acb9f | 2019-08-05 08:33:57 -0600 | [diff] [blame] | 54 | int madv; |
Rob Herring | 105401b | 2019-11-01 10:37:54 -0500 | [diff] [blame] | 55 | |
| 56 | /** |
| 57 | * @madv_list: List entry for madvise tracking |
| 58 | * |
| 59 | * Typically used by drivers to track purgeable objects |
| 60 | */ |
Rob Herring | 17acb9f | 2019-08-05 08:33:57 -0600 | [diff] [blame] | 61 | struct list_head madv_list; |
| 62 | |
Noralf Trønnes | 2194a63 | 2019-03-12 19:43:44 -0500 | [diff] [blame] | 63 | /** |
| 64 | * @pages_mark_dirty_on_put: |
| 65 | * |
| 66 | * Mark pages as dirty when they are put. |
| 67 | */ |
| 68 | unsigned int pages_mark_dirty_on_put : 1; |
| 69 | |
| 70 | /** |
| 71 | * @pages_mark_accessed_on_put: |
| 72 | * |
| 73 | * Mark pages as accessed when they are put. |
| 74 | */ |
| 75 | unsigned int pages_mark_accessed_on_put : 1; |
| 76 | |
| 77 | /** |
| 78 | * @sgt: Scatter/gather table for imported PRIME buffers |
| 79 | */ |
| 80 | struct sg_table *sgt; |
| 81 | |
| 82 | /** |
| 83 | * @vmap_lock: Protects the vmap address and use count |
| 84 | */ |
| 85 | struct mutex vmap_lock; |
| 86 | |
| 87 | /** |
| 88 | * @vaddr: Kernel virtual address of the backing memory |
| 89 | */ |
| 90 | void *vaddr; |
| 91 | |
| 92 | /** |
| 93 | * @vmap_use_count: |
| 94 | * |
| 95 | * Reference count on the virtual address. |
| 96 | * The address are un-mapped when the count reaches zero. |
| 97 | */ |
| 98 | unsigned int vmap_use_count; |
Gerd Hoffmann | 1cad629 | 2020-02-26 16:47:50 +0100 | [diff] [blame] | 99 | |
| 100 | /** |
Thomas Zimmermann | 0cf2ef4 | 2020-11-17 14:31:55 +0100 | [diff] [blame] | 101 | * @map_wc: map object write-combined (instead of using shmem defaults). |
Gerd Hoffmann | 1cad629 | 2020-02-26 16:47:50 +0100 | [diff] [blame] | 102 | */ |
Thomas Zimmermann | 0cf2ef4 | 2020-11-17 14:31:55 +0100 | [diff] [blame] | 103 | bool map_wc; |
Noralf Trønnes | 2194a63 | 2019-03-12 19:43:44 -0500 | [diff] [blame] | 104 | }; |
| 105 | |
| 106 | #define to_drm_gem_shmem_obj(obj) \ |
| 107 | container_of(obj, struct drm_gem_shmem_object, base) |
| 108 | |
Noralf Trønnes | 2194a63 | 2019-03-12 19:43:44 -0500 | [diff] [blame] | 109 | struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); |
| 110 | void drm_gem_shmem_free_object(struct drm_gem_object *obj); |
| 111 | |
| 112 | int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); |
| 113 | void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); |
| 114 | int drm_gem_shmem_pin(struct drm_gem_object *obj); |
| 115 | void drm_gem_shmem_unpin(struct drm_gem_object *obj); |
Thomas Zimmermann | 49a3f51 | 2020-11-03 10:30:11 +0100 | [diff] [blame] | 116 | int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); |
| 117 | void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); |
Noralf Trønnes | 2194a63 | 2019-03-12 19:43:44 -0500 | [diff] [blame] | 118 | |
Rob Herring | 17acb9f | 2019-08-05 08:33:57 -0600 | [diff] [blame] | 119 | int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); |
| 120 | |
| 121 | static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) |
| 122 | { |
| 123 | return (shmem->madv > 0) && |
| 124 | !shmem->vmap_use_count && shmem->sgt && |
| 125 | !shmem->base.dma_buf && !shmem->base.import_attach; |
| 126 | } |
| 127 | |
| 128 | void drm_gem_shmem_purge_locked(struct drm_gem_object *obj); |
Rob Herring | dfbc7a4 | 2019-08-22 21:12:13 -0500 | [diff] [blame] | 129 | bool drm_gem_shmem_purge(struct drm_gem_object *obj); |
Rob Herring | 17acb9f | 2019-08-05 08:33:57 -0600 | [diff] [blame] | 130 | |
Noralf Trønnes | 2194a63 | 2019-03-12 19:43:44 -0500 | [diff] [blame] | 131 | struct drm_gem_shmem_object * |
| 132 | drm_gem_shmem_create_with_handle(struct drm_file *file_priv, |
| 133 | struct drm_device *dev, size_t size, |
| 134 | uint32_t *handle); |
Thomas Zimmermann | d18ee06 | 2020-06-09 11:08:19 +0200 | [diff] [blame] | 135 | |
Noralf Trønnes | 2194a63 | 2019-03-12 19:43:44 -0500 | [diff] [blame] | 136 | int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 137 | struct drm_mode_create_dumb *args); |
| 138 | |
Gerd Hoffmann | 0be8958 | 2019-10-16 13:51:54 +0200 | [diff] [blame] | 139 | int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
Noralf Trønnes | 2194a63 | 2019-03-12 19:43:44 -0500 | [diff] [blame] | 140 | |
| 141 | void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, |
| 142 | const struct drm_gem_object *obj); |
| 143 | |
| 144 | struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj); |
| 145 | struct drm_gem_object * |
| 146 | drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, |
| 147 | struct dma_buf_attachment *attach, |
| 148 | struct sg_table *sgt); |
| 149 | |
| 150 | struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj); |
| 151 | |
| 152 | /** |
| 153 | * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations |
| 154 | * |
| 155 | * This macro provides a shortcut for setting the shmem GEM operations in |
| 156 | * the &drm_driver structure. |
| 157 | */ |
| 158 | #define DRM_GEM_SHMEM_DRIVER_OPS \ |
| 159 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ |
| 160 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ |
| 161 | .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ |
| 162 | .gem_prime_mmap = drm_gem_prime_mmap, \ |
| 163 | .dumb_create = drm_gem_shmem_dumb_create |
| 164 | |
| 165 | #endif /* __DRM_GEM_SHMEM_HELPER_H__ */ |