Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
Sinclair Yeh | 54fbde8 | 2015-07-29 12:38:02 -0700 | [diff] [blame] | 3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
| 28 | #ifndef _VMWGFX_DRV_H_ |
| 29 | #define _VMWGFX_DRV_H_ |
| 30 | |
| 31 | #include "vmwgfx_reg.h" |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 32 | #include <drm/drmP.h> |
| 33 | #include <drm/vmwgfx_drm.h> |
| 34 | #include <drm/drm_hashtab.h> |
Daniel Vetter | 3b96a0b | 2016-06-21 10:54:22 +0200 | [diff] [blame] | 35 | #include <drm/drm_auth.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 36 | #include <linux/suspend.h> |
| 37 | #include <drm/ttm/ttm_bo_driver.h> |
| 38 | #include <drm/ttm/ttm_object.h> |
| 39 | #include <drm/ttm/ttm_lock.h> |
| 40 | #include <drm/ttm/ttm_execbuf_util.h> |
| 41 | #include <drm/ttm/ttm_module.h> |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 42 | #include "vmwgfx_fence.h" |
Sinclair Yeh | c906965d | 2017-07-05 01:49:32 -0700 | [diff] [blame] | 43 | #include <linux/sync_file.h> |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 44 | |
Thomas Hellstrom | e300173 | 2017-08-24 08:06:27 +0200 | [diff] [blame] | 45 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
Thomas Hellstrom | 43bfefe | 2018-03-22 11:14:34 +0100 | [diff] [blame] | 46 | #define VMWGFX_DRIVER_DATE "20180322" |
Thomas Hellstrom | 2ae7b03 | 2011-09-01 20:18:45 +0000 | [diff] [blame] | 47 | #define VMWGFX_DRIVER_MAJOR 2 |
Sinclair Yeh | d78acfe | 2017-07-05 01:51:42 -0700 | [diff] [blame] | 48 | #define VMWGFX_DRIVER_MINOR 14 |
Thomas Hellstrom | 43bfefe | 2018-03-22 11:14:34 +0100 | [diff] [blame] | 49 | #define VMWGFX_DRIVER_PATCHLEVEL 1 |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 50 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 51 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
| 52 | #define VMWGFX_MAX_RELOCATIONS 2048 |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 53 | #define VMWGFX_MAX_VALIDATIONS 2048 |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 54 | #define VMWGFX_MAX_DISPLAYS 16 |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 55 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 56 | #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 57 | |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 58 | /* |
| 59 | * Perhaps we should have sysfs entries for these. |
| 60 | */ |
| 61 | #define VMWGFX_NUM_GB_CONTEXT 256 |
| 62 | #define VMWGFX_NUM_GB_SHADER 20000 |
| 63 | #define VMWGFX_NUM_GB_SURFACE 32768 |
Thomas Hellstrom | 7cba906 | 2014-01-09 11:03:18 +0100 | [diff] [blame] | 64 | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 65 | #define VMWGFX_NUM_DXCONTEXT 256 |
| 66 | #define VMWGFX_NUM_DXQUERY 512 |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 67 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ |
| 68 | VMWGFX_NUM_GB_SHADER +\ |
Thomas Hellstrom | 7cba906 | 2014-01-09 11:03:18 +0100 | [diff] [blame] | 69 | VMWGFX_NUM_GB_SURFACE +\ |
| 70 | VMWGFX_NUM_GB_SCREEN_TARGET) |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 71 | |
Christian König | 283cde6 | 2016-09-12 13:34:37 +0200 | [diff] [blame] | 72 | #define VMW_PL_GMR (TTM_PL_PRIV + 0) |
| 73 | #define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0) |
| 74 | #define VMW_PL_MOB (TTM_PL_PRIV + 1) |
| 75 | #define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1) |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 76 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 77 | #define VMW_RES_CONTEXT ttm_driver_type0 |
| 78 | #define VMW_RES_SURFACE ttm_driver_type1 |
| 79 | #define VMW_RES_STREAM ttm_driver_type2 |
| 80 | #define VMW_RES_FENCE ttm_driver_type3 |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 81 | #define VMW_RES_SHADER ttm_driver_type4 |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 82 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 83 | struct vmw_fpriv { |
| 84 | struct drm_master *locked_master; |
| 85 | struct ttm_object_file *tfile; |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 86 | bool gb_aware; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 87 | }; |
| 88 | |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 89 | struct vmw_buffer_object { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 90 | struct ttm_buffer_object base; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 91 | struct list_head res_list; |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 92 | s32 pin_count; |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 93 | /* Not ref-counted. Protected by binding_mutex */ |
| 94 | struct vmw_resource *dx_query_ctx; |
Thomas Hellstrom | bf833fd | 2018-03-22 10:19:01 +0100 | [diff] [blame] | 95 | /* Protected by reservation */ |
| 96 | struct ttm_bo_kmap_obj map; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 97 | }; |
| 98 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 99 | /** |
| 100 | * struct vmw_validate_buffer - Carries validation info about buffers. |
| 101 | * |
| 102 | * @base: Validation info for TTM. |
| 103 | * @hash: Hash entry for quick lookup of the TTM buffer object. |
| 104 | * |
| 105 | * This structure contains also driver private validation info |
| 106 | * on top of the info needed by TTM. |
| 107 | */ |
| 108 | struct vmw_validate_buffer { |
| 109 | struct ttm_validate_buffer base; |
| 110 | struct drm_hash_item hash; |
Thomas Hellstrom | 96c5f0d | 2012-11-21 11:19:53 +0100 | [diff] [blame] | 111 | bool validate_as_mob; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 112 | }; |
| 113 | |
| 114 | struct vmw_res_func; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 115 | struct vmw_resource { |
| 116 | struct kref kref; |
| 117 | struct vmw_private *dev_priv; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 118 | int id; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 119 | bool avail; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 120 | unsigned long backup_size; |
| 121 | bool res_dirty; /* Protected by backup buffer reserved */ |
| 122 | bool backup_dirty; /* Protected by backup buffer reserved */ |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 123 | struct vmw_buffer_object *backup; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 124 | unsigned long backup_offset; |
Thomas Hellstrom | ed93394 | 2015-03-02 23:26:06 -0800 | [diff] [blame] | 125 | unsigned long pin_count; /* Protected by resource reserved */ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 126 | const struct vmw_res_func *func; |
| 127 | struct list_head lru_head; /* Protected by the resource lock */ |
| 128 | struct list_head mob_head; /* Protected by @backup reserved */ |
Thomas Hellstrom | 173fb7d | 2013-10-08 02:32:36 -0700 | [diff] [blame] | 129 | struct list_head binding_head; /* Protected by binding_mutex */ |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 130 | void (*res_free) (struct vmw_resource *res); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 131 | void (*hw_destroy) (struct vmw_resource *res); |
| 132 | }; |
| 133 | |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 134 | |
| 135 | /* |
| 136 | * Resources that are managed using ioctls. |
| 137 | */ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 138 | enum vmw_res_type { |
| 139 | vmw_res_context, |
| 140 | vmw_res_surface, |
| 141 | vmw_res_stream, |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 142 | vmw_res_shader, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 143 | vmw_res_dx_context, |
| 144 | vmw_res_cotable, |
| 145 | vmw_res_view, |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 146 | vmw_res_max |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 147 | }; |
| 148 | |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 149 | /* |
| 150 | * Resources that are managed using command streams. |
| 151 | */ |
| 152 | enum vmw_cmdbuf_res_type { |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 153 | vmw_cmdbuf_res_shader, |
| 154 | vmw_cmdbuf_res_view |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 155 | }; |
| 156 | |
| 157 | struct vmw_cmdbuf_res_manager; |
| 158 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 159 | struct vmw_cursor_snooper { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 160 | size_t age; |
| 161 | uint32_t *image; |
| 162 | }; |
| 163 | |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 164 | struct vmw_framebuffer; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 165 | struct vmw_surface_offset; |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 166 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 167 | struct vmw_surface { |
| 168 | struct vmw_resource res; |
| 169 | uint32_t flags; |
| 170 | uint32_t format; |
| 171 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 172 | struct drm_vmw_size base_size; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 173 | struct drm_vmw_size *sizes; |
| 174 | uint32_t num_sizes; |
Jakob Bornecrantz | 5ffdb65 | 2010-01-30 03:38:08 +0000 | [diff] [blame] | 175 | bool scanout; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 176 | uint32_t array_size; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 177 | /* TODO so far just a extra pointer */ |
| 178 | struct vmw_cursor_snooper snooper; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 179 | struct vmw_surface_offset *offsets; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 180 | SVGA3dTextureFilter autogen_filter; |
| 181 | uint32_t multisample_count; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 182 | struct list_head view_list; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 183 | }; |
| 184 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 185 | struct vmw_marker_queue { |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 186 | struct list_head head; |
Thomas Gleixner | f166e6d | 2014-07-16 21:05:07 +0000 | [diff] [blame] | 187 | u64 lag; |
| 188 | u64 lag_time; |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 189 | spinlock_t lock; |
| 190 | }; |
| 191 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 192 | struct vmw_fifo_state { |
| 193 | unsigned long reserved_size; |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 194 | u32 *dynamic_buffer; |
| 195 | u32 *static_buffer; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 196 | unsigned long static_buffer_size; |
| 197 | bool using_bounce_buffer; |
| 198 | uint32_t capabilities; |
Thomas Hellstrom | 85b9e48 | 2010-02-08 09:57:25 +0000 | [diff] [blame] | 199 | struct mutex fifo_mutex; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 200 | struct rw_semaphore rwsem; |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 201 | struct vmw_marker_queue marker_queue; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 202 | bool dx; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 203 | }; |
| 204 | |
| 205 | struct vmw_relocation { |
Thomas Hellstrom | ddcda24 | 2012-11-21 11:26:55 +0100 | [diff] [blame] | 206 | SVGAMobId *mob_loc; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 207 | SVGAGuestPtr *location; |
| 208 | uint32_t index; |
| 209 | }; |
| 210 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 211 | /** |
| 212 | * struct vmw_res_cache_entry - resource information cache entry |
| 213 | * |
| 214 | * @valid: Whether the entry is valid, which also implies that the execbuf |
| 215 | * code holds a reference to the resource, and it's placed on the |
| 216 | * validation list. |
| 217 | * @handle: User-space handle of a resource. |
| 218 | * @res: Non-ref-counted pointer to the resource. |
| 219 | * |
| 220 | * Used to avoid frequent repeated user-space handle lookups of the |
| 221 | * same resource. |
| 222 | */ |
| 223 | struct vmw_res_cache_entry { |
| 224 | bool valid; |
| 225 | uint32_t handle; |
| 226 | struct vmw_resource *res; |
| 227 | struct vmw_resource_val_node *node; |
| 228 | }; |
| 229 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 230 | /** |
| 231 | * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. |
| 232 | */ |
| 233 | enum vmw_dma_map_mode { |
| 234 | vmw_dma_phys, /* Use physical page addresses */ |
| 235 | vmw_dma_alloc_coherent, /* Use TTM coherent pages */ |
| 236 | vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ |
| 237 | vmw_dma_map_bind, /* Unmap from DMA just before unbind */ |
| 238 | vmw_dma_map_max |
| 239 | }; |
| 240 | |
| 241 | /** |
| 242 | * struct vmw_sg_table - Scatter/gather table for binding, with additional |
| 243 | * device-specific information. |
| 244 | * |
| 245 | * @sgt: Pointer to a struct sg_table with binding information |
Masahiro Yamada | e1c0506 | 2015-07-07 10:14:59 +0900 | [diff] [blame] | 246 | * @num_regions: Number of regions with device-address contiguous pages |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 247 | */ |
| 248 | struct vmw_sg_table { |
| 249 | enum vmw_dma_map_mode mode; |
| 250 | struct page **pages; |
| 251 | const dma_addr_t *addrs; |
| 252 | struct sg_table *sgt; |
| 253 | unsigned long num_regions; |
| 254 | unsigned long num_pages; |
| 255 | }; |
| 256 | |
| 257 | /** |
| 258 | * struct vmw_piter - Page iterator that iterates over a list of pages |
| 259 | * and DMA addresses that could be either a scatter-gather list or |
| 260 | * arrays |
| 261 | * |
| 262 | * @pages: Array of page pointers to the pages. |
| 263 | * @addrs: DMA addresses to the pages if coherent pages are used. |
| 264 | * @iter: Scatter-gather page iterator. Current position in SG list. |
| 265 | * @i: Current position in arrays. |
| 266 | * @num_pages: Number of pages total. |
| 267 | * @next: Function to advance the iterator. Returns false if past the list |
| 268 | * of pages, true otherwise. |
| 269 | * @dma_address: Function to return the DMA address of the current page. |
| 270 | */ |
| 271 | struct vmw_piter { |
| 272 | struct page **pages; |
| 273 | const dma_addr_t *addrs; |
| 274 | struct sg_page_iter iter; |
| 275 | unsigned long i; |
| 276 | unsigned long num_pages; |
| 277 | bool (*next)(struct vmw_piter *); |
| 278 | dma_addr_t (*dma_address)(struct vmw_piter *); |
| 279 | struct page *(*page)(struct vmw_piter *); |
| 280 | }; |
| 281 | |
Thomas Hellstrom | b5c3b1a6 | 2013-10-08 02:27:17 -0700 | [diff] [blame] | 282 | /* |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 283 | * enum vmw_display_unit_type - Describes the display unit |
Thomas Hellstrom | b5c3b1a6 | 2013-10-08 02:27:17 -0700 | [diff] [blame] | 284 | */ |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 285 | enum vmw_display_unit_type { |
| 286 | vmw_du_invalid = 0, |
| 287 | vmw_du_legacy, |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 288 | vmw_du_screen_object, |
| 289 | vmw_du_screen_target |
Thomas Hellstrom | b5c3b1a6 | 2013-10-08 02:27:17 -0700 | [diff] [blame] | 290 | }; |
| 291 | |
Thomas Hellstrom | b5c3b1a6 | 2013-10-08 02:27:17 -0700 | [diff] [blame] | 292 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 293 | struct vmw_sw_context{ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 294 | struct drm_open_hash res_ht; |
| 295 | bool res_ht_initialized; |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 296 | bool kernel; /**< is the called made from the kernel */ |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 297 | struct vmw_fpriv *fp; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 298 | struct list_head validate_nodes; |
| 299 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
| 300 | uint32_t cur_reloc; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 301 | struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 302 | uint32_t cur_val_buf; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 303 | uint32_t *cmd_bounce; |
| 304 | uint32_t cmd_bounce_size; |
Thomas Hellstrom | f18c884 | 2011-10-04 20:13:31 +0200 | [diff] [blame] | 305 | struct list_head resource_list; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 306 | struct list_head ctx_resource_list; /* For contexts and cotables */ |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 307 | struct vmw_buffer_object *cur_query_bo; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 308 | struct list_head res_relocations; |
| 309 | uint32_t *buf_start; |
| 310 | struct vmw_res_cache_entry res_cache[vmw_res_max]; |
| 311 | struct vmw_resource *last_query_ctx; |
| 312 | bool needs_post_query_barrier; |
| 313 | struct vmw_resource *error_resource; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 314 | struct vmw_ctx_binding_state *staged_bindings; |
| 315 | bool staged_bindings_inuse; |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 316 | struct list_head staged_cmd_res; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 317 | struct vmw_resource_val_node *dx_ctx_node; |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 318 | struct vmw_buffer_object *dx_query_mob; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 319 | struct vmw_resource *dx_query_ctx; |
| 320 | struct vmw_cmdbuf_res_manager *man; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 321 | }; |
| 322 | |
| 323 | struct vmw_legacy_display; |
| 324 | struct vmw_overlay; |
| 325 | |
| 326 | struct vmw_master { |
| 327 | struct ttm_lock lock; |
| 328 | }; |
| 329 | |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 330 | struct vmw_vga_topology_state { |
| 331 | uint32_t width; |
| 332 | uint32_t height; |
| 333 | uint32_t primary; |
| 334 | uint32_t pos_x; |
| 335 | uint32_t pos_y; |
| 336 | }; |
| 337 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 338 | |
| 339 | /* |
| 340 | * struct vmw_otable - Guest Memory OBject table metadata |
| 341 | * |
| 342 | * @size: Size of the table (page-aligned). |
| 343 | * @page_table: Pointer to a struct vmw_mob holding the page table. |
| 344 | */ |
| 345 | struct vmw_otable { |
| 346 | unsigned long size; |
| 347 | struct vmw_mob *page_table; |
| 348 | bool enabled; |
| 349 | }; |
| 350 | |
| 351 | struct vmw_otable_batch { |
| 352 | unsigned num_otables; |
| 353 | struct vmw_otable *otables; |
| 354 | struct vmw_resource *context; |
| 355 | struct ttm_buffer_object *otable_bo; |
| 356 | }; |
| 357 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 358 | enum { |
| 359 | VMW_IRQTHREAD_FENCE, |
| 360 | VMW_IRQTHREAD_CMDBUF, |
| 361 | VMW_IRQTHREAD_MAX |
| 362 | }; |
| 363 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 364 | struct vmw_private { |
| 365 | struct ttm_bo_device bdev; |
| 366 | struct ttm_bo_global_ref bo_global_ref; |
Dave Airlie | ba4420c | 2010-03-09 10:56:52 +1000 | [diff] [blame] | 367 | struct drm_global_reference mem_global_ref; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 368 | |
| 369 | struct vmw_fifo_state fifo; |
| 370 | |
| 371 | struct drm_device *dev; |
| 372 | unsigned long vmw_chipset; |
| 373 | unsigned int io_start; |
| 374 | uint32_t vram_start; |
| 375 | uint32_t vram_size; |
Thomas Hellstrom | bc2d650 | 2012-11-21 10:32:36 +0100 | [diff] [blame] | 376 | uint32_t prim_bb_mem; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 377 | uint32_t mmio_start; |
| 378 | uint32_t mmio_size; |
| 379 | uint32_t fb_max_width; |
| 380 | uint32_t fb_max_height; |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 381 | uint32_t texture_max_width; |
| 382 | uint32_t texture_max_height; |
| 383 | uint32_t stdu_max_width; |
| 384 | uint32_t stdu_max_height; |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 385 | uint32_t initial_width; |
| 386 | uint32_t initial_height; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 387 | u32 *mmio_virt; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 388 | uint32_t capabilities; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 389 | uint32_t max_gmr_ids; |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 390 | uint32_t max_gmr_pages; |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 391 | uint32_t max_mob_pages; |
Charmaine Lee | 857aea1 | 2014-02-12 12:07:38 +0100 | [diff] [blame] | 392 | uint32_t max_mob_size; |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 393 | uint32_t memory_size; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 394 | bool has_gmr; |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 395 | bool has_mob; |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 396 | spinlock_t hw_lock; |
| 397 | spinlock_t cap_lock; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 398 | bool has_dx; |
Sinclair Yeh | 04319d8 | 2016-06-29 12:15:48 -0700 | [diff] [blame] | 399 | bool assume_16bpp; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 400 | |
| 401 | /* |
| 402 | * VGA registers. |
| 403 | */ |
| 404 | |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 405 | struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 406 | uint32_t vga_width; |
| 407 | uint32_t vga_height; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 408 | uint32_t vga_bpp; |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 409 | uint32_t vga_bpl; |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 410 | uint32_t vga_pitchlock; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 411 | |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 412 | uint32_t num_displays; |
| 413 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 414 | /* |
Deepak Rawat | b89e5ff | 2018-06-20 11:32:29 +0200 | [diff] [blame] | 415 | * Currently requested_layout_mutex is used to protect the gui |
| 416 | * positionig state in display unit. With that use case currently this |
| 417 | * mutex is only taken during layout ioctl and atomic check_modeset. |
| 418 | * Other display unit state can be protected with this mutex but that |
| 419 | * needs careful consideration. |
| 420 | */ |
| 421 | struct mutex requested_layout_mutex; |
| 422 | |
| 423 | /* |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 424 | * Framebuffer info. |
| 425 | */ |
| 426 | |
| 427 | void *fb_info; |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 428 | enum vmw_display_unit_type active_display_unit; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 429 | struct vmw_legacy_display *ldu_priv; |
| 430 | struct vmw_overlay *overlay_priv; |
Thomas Hellstrom | 578e609 | 2016-02-12 09:45:42 +0100 | [diff] [blame] | 431 | struct drm_property *hotplug_mode_update_property; |
Thomas Hellstrom | 76404ac | 2016-02-12 09:55:45 +0100 | [diff] [blame] | 432 | struct drm_property *implicit_placement_property; |
Thomas Hellstrom | 75c0685 | 2016-02-12 09:00:26 +0100 | [diff] [blame] | 433 | unsigned num_implicit; |
| 434 | struct vmw_framebuffer *implicit_fb; |
Thomas Hellstrom | 93cd168 | 2016-05-03 11:24:35 +0200 | [diff] [blame] | 435 | struct mutex global_kms_state_mutex; |
Sinclair Yeh | 36cc79b | 2017-03-23 11:28:11 -0700 | [diff] [blame] | 436 | spinlock_t cursor_lock; |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 437 | struct drm_atomic_state *suspend_state; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 438 | |
| 439 | /* |
| 440 | * Context and surface management. |
| 441 | */ |
| 442 | |
| 443 | rwlock_t resource_lock; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 444 | struct idr res_idr[vmw_res_max]; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 445 | /* |
| 446 | * Block lastclose from racing with firstopen. |
| 447 | */ |
| 448 | |
| 449 | struct mutex init_mutex; |
| 450 | |
| 451 | /* |
| 452 | * A resource manager for kernel-only surfaces and |
| 453 | * contexts. |
| 454 | */ |
| 455 | |
| 456 | struct ttm_object_device *tdev; |
| 457 | |
| 458 | /* |
| 459 | * Fencing and IRQs. |
| 460 | */ |
| 461 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 462 | atomic_t marker_seq; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 463 | wait_queue_head_t fence_queue; |
| 464 | wait_queue_head_t fifo_queue; |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 465 | spinlock_t waiter_lock; |
| 466 | int fence_queue_waiters; /* Protected by waiter_lock */ |
| 467 | int goal_queue_waiters; /* Protected by waiter_lock */ |
Thomas Hellstrom | d2e8851 | 2015-10-28 19:07:35 +0100 | [diff] [blame] | 468 | int cmdbuf_waiters; /* Protected by waiter_lock */ |
| 469 | int error_waiters; /* Protected by waiter_lock */ |
| 470 | int fifo_queue_waiters; /* Protected by waiter_lock */ |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 471 | uint32_t last_read_seqno; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 472 | struct vmw_fence_manager *fman; |
Thomas Hellstrom | d2e8851 | 2015-10-28 19:07:35 +0100 | [diff] [blame] | 473 | uint32_t irq_mask; /* Updates protected by waiter_lock */ |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 474 | |
| 475 | /* |
| 476 | * Device state |
| 477 | */ |
| 478 | |
| 479 | uint32_t traces_state; |
| 480 | uint32_t enable_state; |
| 481 | uint32_t config_done_state; |
| 482 | |
| 483 | /** |
| 484 | * Execbuf |
| 485 | */ |
| 486 | /** |
| 487 | * Protected by the cmdbuf mutex. |
| 488 | */ |
| 489 | |
| 490 | struct vmw_sw_context ctx; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 491 | struct mutex cmdbuf_mutex; |
Thomas Hellstrom | 173fb7d | 2013-10-08 02:32:36 -0700 | [diff] [blame] | 492 | struct mutex binding_mutex; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 493 | |
| 494 | /** |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 495 | * Operating mode. |
| 496 | */ |
| 497 | |
| 498 | bool stealth; |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 499 | bool enable_fb; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 500 | spinlock_t svga_lock; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 501 | |
| 502 | /** |
| 503 | * Master management. |
| 504 | */ |
| 505 | |
| 506 | struct vmw_master *active_master; |
| 507 | struct vmw_master fbdev_master; |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 508 | struct notifier_block pm_nb; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 509 | bool refuse_hibernation; |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 510 | bool suspend_locked; |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 511 | |
| 512 | struct mutex release_mutex; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 513 | atomic_t num_fifo_resources; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 514 | |
| 515 | /* |
Thomas Hellstrom | 294adf7 | 2014-02-27 12:34:51 +0100 | [diff] [blame] | 516 | * Replace this with an rwsem as soon as we have down_xx_interruptible() |
| 517 | */ |
| 518 | struct ttm_lock reservation_sem; |
| 519 | |
| 520 | /* |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 521 | * Query processing. These members |
| 522 | * are protected by the cmdbuf mutex. |
| 523 | */ |
| 524 | |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 525 | struct vmw_buffer_object *dummy_query_bo; |
| 526 | struct vmw_buffer_object *pinned_bo; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 527 | uint32_t query_cid; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 528 | uint32_t query_cid_valid; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 529 | bool dummy_query_bo_pinned; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 530 | |
| 531 | /* |
| 532 | * Surface swapping. The "surface_lru" list is protected by the |
| 533 | * resource lock in order to be able to destroy a surface and take |
| 534 | * it off the lru atomically. "used_memory_size" is currently |
| 535 | * protected by the cmdbuf mutex for simplicity. |
| 536 | */ |
| 537 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 538 | struct list_head res_lru[vmw_res_max]; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 539 | uint32_t used_memory_size; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 540 | |
| 541 | /* |
| 542 | * DMA mapping stuff. |
| 543 | */ |
| 544 | enum vmw_dma_map_mode map_mode; |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 545 | |
| 546 | /* |
| 547 | * Guest Backed stuff |
| 548 | */ |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 549 | struct vmw_otable_batch otable_batch; |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 550 | |
| 551 | struct vmw_cmdbuf_man *cman; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 552 | DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 553 | }; |
| 554 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 555 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
| 556 | { |
| 557 | return container_of(res, struct vmw_surface, res); |
| 558 | } |
| 559 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 560 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
| 561 | { |
| 562 | return (struct vmw_private *)dev->dev_private; |
| 563 | } |
| 564 | |
| 565 | static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) |
| 566 | { |
| 567 | return (struct vmw_fpriv *)file_priv->driver_priv; |
| 568 | } |
| 569 | |
| 570 | static inline struct vmw_master *vmw_master(struct drm_master *master) |
| 571 | { |
| 572 | return (struct vmw_master *) master->driver_priv; |
| 573 | } |
| 574 | |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 575 | /* |
| 576 | * The locking here is fine-grained, so that it is performed once |
| 577 | * for every read- and write operation. This is of course costly, but we |
| 578 | * don't perform much register access in the timing critical paths anyway. |
| 579 | * Instead we have the extra benefit of being sure that we don't forget |
| 580 | * the hw lock around register accesses. |
| 581 | */ |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 582 | static inline void vmw_write(struct vmw_private *dev_priv, |
| 583 | unsigned int offset, uint32_t value) |
| 584 | { |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 585 | spin_lock(&dev_priv->hw_lock); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 586 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
| 587 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 588 | spin_unlock(&dev_priv->hw_lock); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 589 | } |
| 590 | |
| 591 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, |
| 592 | unsigned int offset) |
| 593 | { |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 594 | u32 val; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 595 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 596 | spin_lock(&dev_priv->hw_lock); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 597 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
| 598 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 599 | spin_unlock(&dev_priv->hw_lock); |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 600 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 601 | return val; |
| 602 | } |
| 603 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 604 | extern void vmw_svga_enable(struct vmw_private *dev_priv); |
| 605 | extern void vmw_svga_disable(struct vmw_private *dev_priv); |
| 606 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 607 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 608 | /** |
| 609 | * GMR utilities - vmwgfx_gmr.c |
| 610 | */ |
| 611 | |
| 612 | extern int vmw_gmr_bind(struct vmw_private *dev_priv, |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 613 | const struct vmw_sg_table *vsgt, |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 614 | unsigned long num_pages, |
| 615 | int gmr_id); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 616 | extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); |
| 617 | |
| 618 | /** |
| 619 | * Resource utilities - vmwgfx_resource.c |
| 620 | */ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 621 | struct vmw_user_resource_conv; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 622 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 623 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
| 624 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
Thomas Hellstrom | 30f82d81 | 2014-02-05 08:13:56 +0100 | [diff] [blame] | 625 | extern struct vmw_resource * |
| 626 | vmw_resource_reference_unless_doomed(struct vmw_resource *res); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 627 | extern int vmw_resource_validate(struct vmw_resource *res); |
Thomas Hellstrom | 1a4b172 | 2015-06-26 02:03:53 -0700 | [diff] [blame] | 628 | extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, |
| 629 | bool no_backup); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 630 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
Jakob Bornecrantz | 551a669 | 2011-11-28 13:19:11 +0100 | [diff] [blame] | 631 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
| 632 | struct ttm_object_file *tfile, |
| 633 | uint32_t handle, |
| 634 | struct vmw_surface **out_surf, |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 635 | struct vmw_buffer_object **out_buf); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 636 | extern int vmw_user_resource_lookup_handle( |
| 637 | struct vmw_private *dev_priv, |
| 638 | struct ttm_object_file *tfile, |
| 639 | uint32_t handle, |
| 640 | const struct vmw_user_resource_conv *converter, |
| 641 | struct vmw_resource **p_res); |
Thomas Hellstrom | e9431ea | 2018-06-19 15:33:53 +0200 | [diff] [blame] | 642 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
| 643 | struct drm_file *file_priv); |
| 644 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
| 645 | struct drm_file *file_priv); |
| 646 | extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, |
| 647 | struct ttm_object_file *tfile, |
| 648 | uint32_t *inout_id, |
| 649 | struct vmw_resource **out); |
| 650 | extern void vmw_resource_unreserve(struct vmw_resource *res, |
| 651 | bool switch_backup, |
| 652 | struct vmw_buffer_object *new_backup, |
| 653 | unsigned long new_backup_offset); |
| 654 | extern void vmw_query_move_notify(struct ttm_buffer_object *bo, |
| 655 | struct ttm_mem_reg *mem); |
| 656 | extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); |
| 657 | extern void vmw_resource_evict_all(struct vmw_private *dev_priv); |
| 658 | extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); |
| 659 | |
| 660 | /** |
| 661 | * Buffer object helper functions - vmwgfx_bo.c |
| 662 | */ |
| 663 | extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, |
| 664 | struct vmw_buffer_object *bo, |
| 665 | struct ttm_placement *placement, |
| 666 | bool interruptible); |
| 667 | extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, |
| 668 | struct vmw_buffer_object *buf, |
| 669 | bool interruptible); |
| 670 | extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, |
| 671 | struct vmw_buffer_object *buf, |
| 672 | bool interruptible); |
| 673 | extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, |
| 674 | struct vmw_buffer_object *bo, |
| 675 | bool interruptible); |
| 676 | extern int vmw_bo_unpin(struct vmw_private *vmw_priv, |
| 677 | struct vmw_buffer_object *bo, |
| 678 | bool interruptible); |
| 679 | extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, |
| 680 | SVGAGuestPtr *ptr); |
| 681 | extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 682 | extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); |
| 683 | extern int vmw_bo_init(struct vmw_private *dev_priv, |
| 684 | struct vmw_buffer_object *vmw_bo, |
| 685 | size_t size, struct ttm_placement *placement, |
| 686 | bool interuptable, |
| 687 | void (*bo_free)(struct ttm_buffer_object *bo)); |
| 688 | extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, |
| 689 | struct ttm_object_file *tfile); |
| 690 | extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, |
| 691 | struct ttm_object_file *tfile, |
| 692 | uint32_t size, |
| 693 | bool shareable, |
| 694 | uint32_t *handle, |
| 695 | struct vmw_buffer_object **p_dma_buf, |
| 696 | struct ttm_base_object **p_base); |
| 697 | extern int vmw_user_bo_reference(struct ttm_object_file *tfile, |
| 698 | struct vmw_buffer_object *dma_buf, |
| 699 | uint32_t *handle); |
| 700 | extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, |
| 701 | struct drm_file *file_priv); |
| 702 | extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, |
| 703 | struct drm_file *file_priv); |
| 704 | extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, |
| 705 | struct drm_file *file_priv); |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 706 | extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, |
| 707 | uint32_t id, struct vmw_buffer_object **out, |
| 708 | struct ttm_base_object **base); |
Thomas Hellstrom | e9431ea | 2018-06-19 15:33:53 +0200 | [diff] [blame] | 709 | extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 710 | struct vmw_fence_obj *fence); |
Thomas Hellstrom | e9431ea | 2018-06-19 15:33:53 +0200 | [diff] [blame] | 711 | extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); |
| 712 | extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); |
| 713 | extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, |
| 714 | struct ttm_mem_reg *mem); |
| 715 | extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 716 | |
| 717 | /** |
| 718 | * Misc Ioctl functionality - vmwgfx_ioctl.c |
| 719 | */ |
| 720 | |
| 721 | extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
| 722 | struct drm_file *file_priv); |
Thomas Hellstrom | f63f6a5 | 2011-09-01 20:18:41 +0000 | [diff] [blame] | 723 | extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, |
| 724 | struct drm_file *file_priv); |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 725 | extern int vmw_present_ioctl(struct drm_device *dev, void *data, |
| 726 | struct drm_file *file_priv); |
| 727 | extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, |
| 728 | struct drm_file *file_priv); |
Al Viro | afc9a42 | 2017-07-03 06:39:46 -0400 | [diff] [blame] | 729 | extern __poll_t vmw_fops_poll(struct file *filp, |
Thomas Hellstrom | 5438ae8 | 2011-10-10 12:23:27 +0200 | [diff] [blame] | 730 | struct poll_table_struct *wait); |
| 731 | extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, |
| 732 | size_t count, loff_t *offset); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 733 | |
| 734 | /** |
| 735 | * Fifo utilities - vmwgfx_fifo.c |
| 736 | */ |
| 737 | |
| 738 | extern int vmw_fifo_init(struct vmw_private *dev_priv, |
| 739 | struct vmw_fifo_state *fifo); |
| 740 | extern void vmw_fifo_release(struct vmw_private *dev_priv, |
| 741 | struct vmw_fifo_state *fifo); |
| 742 | extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 743 | extern void * |
| 744 | vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 745 | extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 746 | extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 747 | extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 748 | uint32_t *seqno); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 749 | extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 750 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
Jakob Bornecrantz | 8e19a95 | 2010-01-30 03:38:06 +0000 | [diff] [blame] | 751 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 752 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 753 | extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, |
| 754 | uint32_t cid); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 755 | extern int vmw_fifo_flush(struct vmw_private *dev_priv, |
| 756 | bool interruptible); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 757 | |
| 758 | /** |
| 759 | * TTM glue - vmwgfx_ttm_glue.c |
| 760 | */ |
| 761 | |
| 762 | extern int vmw_ttm_global_init(struct vmw_private *dev_priv); |
| 763 | extern void vmw_ttm_global_release(struct vmw_private *dev_priv); |
| 764 | extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); |
| 765 | |
| 766 | /** |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 767 | * TTM buffer object driver - vmwgfx_ttm_buffer.c |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 768 | */ |
| 769 | |
Thomas Hellstrom | 308d17e | 2013-11-28 01:46:56 -0800 | [diff] [blame] | 770 | extern const size_t vmw_tt_size; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 771 | extern struct ttm_placement vmw_vram_placement; |
| 772 | extern struct ttm_placement vmw_vram_ne_placement; |
Thomas Hellstrom | 8ba5152 | 2010-01-16 16:05:05 +0100 | [diff] [blame] | 773 | extern struct ttm_placement vmw_vram_sys_placement; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 774 | extern struct ttm_placement vmw_vram_gmr_placement; |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 775 | extern struct ttm_placement vmw_vram_gmr_ne_placement; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 776 | extern struct ttm_placement vmw_sys_placement; |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 777 | extern struct ttm_placement vmw_sys_ne_placement; |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 778 | extern struct ttm_placement vmw_evictable_placement; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 779 | extern struct ttm_placement vmw_srf_placement; |
Thomas Hellstrom | 96c5f0d | 2012-11-21 11:19:53 +0100 | [diff] [blame] | 780 | extern struct ttm_placement vmw_mob_placement; |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 781 | extern struct ttm_placement vmw_mob_ne_placement; |
Thomas Hellstrom | ef86cfe | 2018-01-16 11:07:30 +0100 | [diff] [blame] | 782 | extern struct ttm_placement vmw_nonfixed_placement; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 783 | extern struct ttm_bo_driver vmw_bo_driver; |
| 784 | extern int vmw_dma_quiescent(struct drm_device *dev); |
Thomas Hellstrom | 0fd53cf | 2013-10-24 13:27:38 -0700 | [diff] [blame] | 785 | extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); |
| 786 | extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); |
| 787 | extern const struct vmw_sg_table * |
| 788 | vmw_bo_sg_table(struct ttm_buffer_object *bo); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 789 | extern void vmw_piter_start(struct vmw_piter *viter, |
| 790 | const struct vmw_sg_table *vsgt, |
| 791 | unsigned long p_offs); |
| 792 | |
| 793 | /** |
| 794 | * vmw_piter_next - Advance the iterator one page. |
| 795 | * |
| 796 | * @viter: Pointer to the iterator to advance. |
| 797 | * |
| 798 | * Returns false if past the list of pages, true otherwise. |
| 799 | */ |
| 800 | static inline bool vmw_piter_next(struct vmw_piter *viter) |
| 801 | { |
| 802 | return viter->next(viter); |
| 803 | } |
| 804 | |
| 805 | /** |
| 806 | * vmw_piter_dma_addr - Return the DMA address of the current page. |
| 807 | * |
| 808 | * @viter: Pointer to the iterator |
| 809 | * |
| 810 | * Returns the DMA address of the page pointed to by @viter. |
| 811 | */ |
| 812 | static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) |
| 813 | { |
| 814 | return viter->dma_address(viter); |
| 815 | } |
| 816 | |
| 817 | /** |
| 818 | * vmw_piter_page - Return a pointer to the current page. |
| 819 | * |
| 820 | * @viter: Pointer to the iterator |
| 821 | * |
| 822 | * Returns the DMA address of the page pointed to by @viter. |
| 823 | */ |
| 824 | static inline struct page *vmw_piter_page(struct vmw_piter *viter) |
| 825 | { |
| 826 | return viter->page(viter); |
| 827 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 828 | |
| 829 | /** |
| 830 | * Command submission - vmwgfx_execbuf.c |
| 831 | */ |
| 832 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 833 | extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, |
| 834 | struct drm_file *file_priv, size_t size); |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 835 | extern int vmw_execbuf_process(struct drm_file *file_priv, |
| 836 | struct vmw_private *dev_priv, |
| 837 | void __user *user_commands, |
| 838 | void *kernel_commands, |
| 839 | uint32_t command_size, |
| 840 | uint64_t throttle_us, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 841 | uint32_t dx_context_handle, |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 842 | struct drm_vmw_fence_rep __user |
Jakob Bornecrantz | bb1bd2f | 2012-02-09 16:56:43 +0100 | [diff] [blame] | 843 | *user_fence_rep, |
Sinclair Yeh | c906965d | 2017-07-05 01:49:32 -0700 | [diff] [blame] | 844 | struct vmw_fence_obj **out_fence, |
| 845 | uint32_t flags); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 846 | extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
| 847 | struct vmw_fence_obj *fence); |
| 848 | extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 849 | |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 850 | extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
| 851 | struct vmw_private *dev_priv, |
| 852 | struct vmw_fence_obj **p_fence, |
| 853 | uint32_t *p_handle); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 854 | extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
| 855 | struct vmw_fpriv *vmw_fp, |
| 856 | int ret, |
| 857 | struct drm_vmw_fence_rep __user |
| 858 | *user_fence_rep, |
| 859 | struct vmw_fence_obj *fence, |
Sinclair Yeh | c906965d | 2017-07-05 01:49:32 -0700 | [diff] [blame] | 860 | uint32_t fence_handle, |
| 861 | int32_t out_fence_fd, |
| 862 | struct sync_file *sync_file); |
Thomas Hellstrom | 1a4b172 | 2015-06-26 02:03:53 -0700 | [diff] [blame] | 863 | extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
| 864 | struct ttm_buffer_object *bo, |
| 865 | bool interruptible, |
| 866 | bool validate_as_mob); |
Thomas Hellstrom | 65b97a2 | 2017-08-24 08:06:29 +0200 | [diff] [blame] | 867 | bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 868 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 869 | /** |
| 870 | * IRQs and wating - vmwgfx_irq.c |
| 871 | */ |
| 872 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 873 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 874 | uint32_t seqno, bool interruptible, |
| 875 | unsigned long timeout); |
Thomas Hellstrom | e300173 | 2017-08-24 08:06:27 +0200 | [diff] [blame] | 876 | extern int vmw_irq_install(struct drm_device *dev, int irq); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 877 | extern void vmw_irq_uninstall(struct drm_device *dev); |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 878 | extern bool vmw_seqno_passed(struct vmw_private *dev_priv, |
| 879 | uint32_t seqno); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 880 | extern int vmw_fallback_wait(struct vmw_private *dev_priv, |
| 881 | bool lazy, |
| 882 | bool fifo_idle, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 883 | uint32_t seqno, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 884 | bool interruptible, |
| 885 | unsigned long timeout); |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 886 | extern void vmw_update_seqno(struct vmw_private *dev_priv, |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 887 | struct vmw_fifo_state *fifo_state); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 888 | extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); |
| 889 | extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 890 | extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); |
| 891 | extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 892 | extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, |
| 893 | int *waiter_count); |
| 894 | extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, |
| 895 | u32 flag, int *waiter_count); |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 896 | |
| 897 | /** |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 898 | * Rudimentary fence-like objects currently used only for throttling - |
| 899 | * vmwgfx_marker.c |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 900 | */ |
| 901 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 902 | extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); |
| 903 | extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); |
| 904 | extern int vmw_marker_push(struct vmw_marker_queue *queue, |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 905 | uint32_t seqno); |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 906 | extern int vmw_marker_pull(struct vmw_marker_queue *queue, |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 907 | uint32_t signaled_seqno); |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 908 | extern int vmw_wait_lag(struct vmw_private *dev_priv, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 909 | struct vmw_marker_queue *queue, uint32_t us); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 910 | |
| 911 | /** |
| 912 | * Kernel framebuffer - vmwgfx_fb.c |
| 913 | */ |
| 914 | |
| 915 | int vmw_fb_init(struct vmw_private *vmw_priv); |
| 916 | int vmw_fb_close(struct vmw_private *dev_priv); |
| 917 | int vmw_fb_off(struct vmw_private *vmw_priv); |
| 918 | int vmw_fb_on(struct vmw_private *vmw_priv); |
| 919 | |
| 920 | /** |
| 921 | * Kernel modesetting - vmwgfx_kms.c |
| 922 | */ |
| 923 | |
| 924 | int vmw_kms_init(struct vmw_private *dev_priv); |
| 925 | int vmw_kms_close(struct vmw_private *dev_priv); |
| 926 | int vmw_kms_save_vga(struct vmw_private *vmw_priv); |
| 927 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv); |
| 928 | int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, |
| 929 | struct drm_file *file_priv); |
| 930 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); |
| 931 | void vmw_kms_cursor_snoop(struct vmw_surface *srf, |
| 932 | struct ttm_object_file *tfile, |
| 933 | struct ttm_buffer_object *bo, |
| 934 | SVGA3dCmdHeader *header); |
Michel Dänzer | 0bef23f | 2011-08-31 07:42:50 +0000 | [diff] [blame] | 935 | int vmw_kms_write_svga(struct vmw_private *vmw_priv, |
| 936 | unsigned width, unsigned height, unsigned pitch, |
| 937 | unsigned bpp, unsigned depth); |
Thomas Hellstrom | 3a939a5 | 2010-10-05 12:43:03 +0200 | [diff] [blame] | 938 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster); |
Thomas Hellstrom | e133e737 | 2010-10-05 12:43:04 +0200 | [diff] [blame] | 939 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, |
| 940 | uint32_t pitch, |
| 941 | uint32_t height); |
Thierry Reding | 88e7271 | 2015-09-24 18:35:31 +0200 | [diff] [blame] | 942 | u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe); |
| 943 | int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe); |
| 944 | void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe); |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 945 | int vmw_kms_present(struct vmw_private *dev_priv, |
| 946 | struct drm_file *file_priv, |
| 947 | struct vmw_framebuffer *vfb, |
| 948 | struct vmw_surface *surface, |
| 949 | uint32_t sid, int32_t destX, int32_t destY, |
| 950 | struct drm_vmw_rect *clips, |
| 951 | uint32_t num_clips); |
Thomas Hellstrom | cd2b89e | 2011-10-25 23:35:53 +0200 | [diff] [blame] | 952 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
| 953 | struct drm_file *file_priv); |
Thomas Hellstrom | 8fbf9d9 | 2015-11-26 19:45:16 +0100 | [diff] [blame] | 954 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 955 | int vmw_kms_suspend(struct drm_device *dev); |
| 956 | int vmw_kms_resume(struct drm_device *dev); |
Thomas Hellstrom | 140bcaa | 2018-03-08 10:07:37 +0100 | [diff] [blame] | 957 | void vmw_kms_lost_device(struct drm_device *dev); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 958 | |
Dave Airlie | 5e1782d | 2012-08-28 01:53:54 +0000 | [diff] [blame] | 959 | int vmw_dumb_create(struct drm_file *file_priv, |
| 960 | struct drm_device *dev, |
| 961 | struct drm_mode_create_dumb *args); |
| 962 | |
| 963 | int vmw_dumb_map_offset(struct drm_file *file_priv, |
| 964 | struct drm_device *dev, uint32_t handle, |
| 965 | uint64_t *offset); |
| 966 | int vmw_dumb_destroy(struct drm_file *file_priv, |
| 967 | struct drm_device *dev, |
| 968 | uint32_t handle); |
Thomas Hellstrom | 1a4b172 | 2015-06-26 02:03:53 -0700 | [diff] [blame] | 969 | extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); |
Thomas Hellstrom | ed93394 | 2015-03-02 23:26:06 -0800 | [diff] [blame] | 970 | extern void vmw_resource_unpin(struct vmw_resource *res); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 971 | extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); |
Thomas Hellstrom | ed93394 | 2015-03-02 23:26:06 -0800 | [diff] [blame] | 972 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 973 | /** |
| 974 | * Overlay control - vmwgfx_overlay.c |
| 975 | */ |
| 976 | |
| 977 | int vmw_overlay_init(struct vmw_private *dev_priv); |
| 978 | int vmw_overlay_close(struct vmw_private *dev_priv); |
| 979 | int vmw_overlay_ioctl(struct drm_device *dev, void *data, |
| 980 | struct drm_file *file_priv); |
| 981 | int vmw_overlay_stop_all(struct vmw_private *dev_priv); |
| 982 | int vmw_overlay_resume_all(struct vmw_private *dev_priv); |
| 983 | int vmw_overlay_pause_all(struct vmw_private *dev_priv); |
| 984 | int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); |
| 985 | int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); |
| 986 | int vmw_overlay_num_overlays(struct vmw_private *dev_priv); |
| 987 | int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); |
| 988 | |
| 989 | /** |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 990 | * GMR Id manager |
| 991 | */ |
| 992 | |
| 993 | extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; |
| 994 | |
| 995 | /** |
Thomas Hellstrom | 69977ff | 2013-11-13 01:50:46 -0800 | [diff] [blame] | 996 | * Prime - vmwgfx_prime.c |
| 997 | */ |
| 998 | |
| 999 | extern const struct dma_buf_ops vmw_prime_dmabuf_ops; |
| 1000 | extern int vmw_prime_fd_to_handle(struct drm_device *dev, |
| 1001 | struct drm_file *file_priv, |
| 1002 | int fd, u32 *handle); |
| 1003 | extern int vmw_prime_handle_to_fd(struct drm_device *dev, |
| 1004 | struct drm_file *file_priv, |
| 1005 | uint32_t handle, uint32_t flags, |
| 1006 | int *prime_fd); |
| 1007 | |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 1008 | /* |
| 1009 | * MemoryOBject management - vmwgfx_mob.c |
| 1010 | */ |
| 1011 | struct vmw_mob; |
| 1012 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, |
Thomas Hellstrom | 0fd53cf | 2013-10-24 13:27:38 -0700 | [diff] [blame] | 1013 | const struct vmw_sg_table *vsgt, |
| 1014 | unsigned long num_data_pages, int32_t mob_id); |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 1015 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, |
| 1016 | struct vmw_mob *mob); |
| 1017 | extern void vmw_mob_destroy(struct vmw_mob *mob); |
| 1018 | extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); |
| 1019 | extern int vmw_otables_setup(struct vmw_private *dev_priv); |
| 1020 | extern void vmw_otables_takedown(struct vmw_private *dev_priv); |
Thomas Hellstrom | 69977ff | 2013-11-13 01:50:46 -0800 | [diff] [blame] | 1021 | |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 1022 | /* |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1023 | * Context management - vmwgfx_context.c |
| 1024 | */ |
| 1025 | |
| 1026 | extern const struct vmw_user_resource_conv *user_context_converter; |
| 1027 | |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1028 | extern int vmw_context_check(struct vmw_private *dev_priv, |
| 1029 | struct ttm_object_file *tfile, |
| 1030 | int id, |
| 1031 | struct vmw_resource **p_res); |
| 1032 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
| 1033 | struct drm_file *file_priv); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1034 | extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, |
| 1035 | struct drm_file *file_priv); |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1036 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
| 1037 | struct drm_file *file_priv); |
Thomas Hellstrom | 30f82d81 | 2014-02-05 08:13:56 +0100 | [diff] [blame] | 1038 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 1039 | extern struct vmw_cmdbuf_res_manager * |
| 1040 | vmw_context_res_man(struct vmw_resource *ctx); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1041 | extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, |
| 1042 | SVGACOTableType cotable_type); |
| 1043 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); |
| 1044 | struct vmw_ctx_binding_state; |
| 1045 | extern struct vmw_ctx_binding_state * |
| 1046 | vmw_context_binding_state(struct vmw_resource *ctx); |
| 1047 | extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, |
| 1048 | bool readback); |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 1049 | extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 1050 | struct vmw_buffer_object *mob); |
| 1051 | extern struct vmw_buffer_object * |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 1052 | vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); |
| 1053 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1054 | |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1055 | /* |
| 1056 | * Surface management - vmwgfx_surface.c |
| 1057 | */ |
| 1058 | |
| 1059 | extern const struct vmw_user_resource_conv *user_surface_converter; |
| 1060 | |
| 1061 | extern void vmw_surface_res_free(struct vmw_resource *res); |
| 1062 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, |
| 1063 | struct drm_file *file_priv); |
| 1064 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, |
| 1065 | struct drm_file *file_priv); |
| 1066 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
| 1067 | struct drm_file *file_priv); |
| 1068 | extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, |
| 1069 | struct drm_file *file_priv); |
| 1070 | extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, |
| 1071 | struct drm_file *file_priv); |
| 1072 | extern int vmw_surface_check(struct vmw_private *dev_priv, |
| 1073 | struct ttm_object_file *tfile, |
| 1074 | uint32_t handle, int *id); |
| 1075 | extern int vmw_surface_validate(struct vmw_private *dev_priv, |
| 1076 | struct vmw_surface *srf); |
Sinclair Yeh | 233826a | 2015-03-05 01:06:13 -0800 | [diff] [blame] | 1077 | int vmw_surface_gb_priv_define(struct drm_device *dev, |
| 1078 | uint32_t user_accounting_size, |
| 1079 | uint32_t svga3d_flags, |
| 1080 | SVGA3dSurfaceFormat format, |
| 1081 | bool for_scanout, |
| 1082 | uint32_t num_mip_levels, |
| 1083 | uint32_t multisample_count, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1084 | uint32_t array_size, |
Sinclair Yeh | 233826a | 2015-03-05 01:06:13 -0800 | [diff] [blame] | 1085 | struct drm_vmw_size size, |
| 1086 | struct vmw_surface **srf_out); |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1087 | |
| 1088 | /* |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 1089 | * Shader management - vmwgfx_shader.c |
| 1090 | */ |
| 1091 | |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1092 | extern const struct vmw_user_resource_conv *user_shader_converter; |
| 1093 | |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 1094 | extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, |
| 1095 | struct drm_file *file_priv); |
| 1096 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, |
| 1097 | struct drm_file *file_priv); |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 1098 | extern int vmw_compat_shader_add(struct vmw_private *dev_priv, |
| 1099 | struct vmw_cmdbuf_res_manager *man, |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 1100 | u32 user_key, const void *bytecode, |
| 1101 | SVGA3dShaderType shader_type, |
| 1102 | size_t size, |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 1103 | struct list_head *list); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1104 | extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, |
| 1105 | u32 user_key, SVGA3dShaderType shader_type, |
| 1106 | struct list_head *list); |
| 1107 | extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, |
| 1108 | struct vmw_resource *ctx, |
| 1109 | u32 user_key, |
| 1110 | SVGA3dShaderType shader_type, |
| 1111 | struct list_head *list); |
| 1112 | extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, |
| 1113 | struct list_head *list, |
| 1114 | bool readback); |
| 1115 | |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 1116 | extern struct vmw_resource * |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1117 | vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, |
| 1118 | u32 user_key, SVGA3dShaderType shader_type); |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 1119 | |
| 1120 | /* |
| 1121 | * Command buffer managed resources - vmwgfx_cmdbuf_res.c |
| 1122 | */ |
| 1123 | |
| 1124 | extern struct vmw_cmdbuf_res_manager * |
| 1125 | vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); |
| 1126 | extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); |
| 1127 | extern size_t vmw_cmdbuf_res_man_size(void); |
| 1128 | extern struct vmw_resource * |
| 1129 | vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, |
| 1130 | enum vmw_cmdbuf_res_type res_type, |
| 1131 | u32 user_key); |
| 1132 | extern void vmw_cmdbuf_res_revert(struct list_head *list); |
| 1133 | extern void vmw_cmdbuf_res_commit(struct list_head *list); |
| 1134 | extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, |
| 1135 | enum vmw_cmdbuf_res_type res_type, |
| 1136 | u32 user_key, |
| 1137 | struct vmw_resource *res, |
| 1138 | struct list_head *list); |
| 1139 | extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, |
| 1140 | enum vmw_cmdbuf_res_type res_type, |
| 1141 | u32 user_key, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1142 | struct list_head *list, |
| 1143 | struct vmw_resource **res); |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 1144 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1145 | /* |
| 1146 | * COTable management - vmwgfx_cotable.c |
| 1147 | */ |
| 1148 | extern const SVGACOTableType vmw_cotable_scrub_order[]; |
| 1149 | extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, |
| 1150 | struct vmw_resource *ctx, |
| 1151 | u32 type); |
| 1152 | extern int vmw_cotable_notify(struct vmw_resource *res, int id); |
| 1153 | extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); |
| 1154 | extern void vmw_cotable_add_resource(struct vmw_resource *ctx, |
| 1155 | struct list_head *head); |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 1156 | |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 1157 | /* |
| 1158 | * Command buffer managerment vmwgfx_cmdbuf.c |
| 1159 | */ |
| 1160 | struct vmw_cmdbuf_man; |
| 1161 | struct vmw_cmdbuf_header; |
| 1162 | |
| 1163 | extern struct vmw_cmdbuf_man * |
| 1164 | vmw_cmdbuf_man_create(struct vmw_private *dev_priv); |
| 1165 | extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, |
| 1166 | size_t size, size_t default_size); |
| 1167 | extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); |
| 1168 | extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); |
| 1169 | extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, |
| 1170 | unsigned long timeout); |
| 1171 | extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, |
| 1172 | int ctx_id, bool interruptible, |
| 1173 | struct vmw_cmdbuf_header *header); |
| 1174 | extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, |
| 1175 | struct vmw_cmdbuf_header *header, |
| 1176 | bool flush); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 1177 | extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, |
| 1178 | size_t size, bool interruptible, |
| 1179 | struct vmw_cmdbuf_header **p_header); |
| 1180 | extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); |
| 1181 | extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, |
| 1182 | bool interruptible); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 1183 | extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man); |
Thomas Hellstrom | bf6f036 | 2012-11-09 12:26:15 +0000 | [diff] [blame] | 1184 | |
Thomas Hellstrom | 79273e1 | 2018-01-16 09:33:27 +0100 | [diff] [blame] | 1185 | /* CPU blit utilities - vmwgfx_blit.c */ |
| 1186 | |
| 1187 | /** |
| 1188 | * struct vmw_diff_cpy - CPU blit information structure |
| 1189 | * |
| 1190 | * @rect: The output bounding box rectangle. |
| 1191 | * @line: The current line of the blit. |
| 1192 | * @line_offset: Offset of the current line segment. |
| 1193 | * @cpp: Bytes per pixel (granularity information). |
| 1194 | * @memcpy: Which memcpy function to use. |
| 1195 | */ |
| 1196 | struct vmw_diff_cpy { |
| 1197 | struct drm_rect rect; |
| 1198 | size_t line; |
| 1199 | size_t line_offset; |
| 1200 | int cpp; |
| 1201 | void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, |
| 1202 | size_t n); |
| 1203 | }; |
| 1204 | |
| 1205 | #define VMW_CPU_BLIT_INITIALIZER { \ |
| 1206 | .do_cpy = vmw_memcpy, \ |
| 1207 | } |
| 1208 | |
| 1209 | #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \ |
| 1210 | .line = 0, \ |
| 1211 | .line_offset = 0, \ |
| 1212 | .rect = { .x1 = INT_MAX/2, \ |
| 1213 | .y1 = INT_MAX/2, \ |
| 1214 | .x2 = INT_MIN/2, \ |
| 1215 | .y2 = INT_MIN/2 \ |
| 1216 | }, \ |
| 1217 | .cpp = _cpp, \ |
| 1218 | .do_cpy = vmw_diff_memcpy, \ |
| 1219 | } |
| 1220 | |
| 1221 | void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, |
| 1222 | size_t n); |
| 1223 | |
| 1224 | void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); |
| 1225 | |
| 1226 | int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, |
| 1227 | u32 dst_offset, u32 dst_stride, |
| 1228 | struct ttm_buffer_object *src, |
| 1229 | u32 src_offset, u32 src_stride, |
| 1230 | u32 w, u32 h, |
| 1231 | struct vmw_diff_cpy *diff); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1232 | |
Thomas Hellstrom | 6ff67ae | 2018-06-21 09:39:21 +0200 | [diff] [blame^] | 1233 | /* Host messaging -vmwgfx_msg.c: */ |
| 1234 | int vmw_host_get_guestinfo(const char *guest_info_param, |
| 1235 | char *buffer, size_t *length); |
| 1236 | int vmw_host_log(const char *log); |
| 1237 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1238 | /** |
| 1239 | * Inline helper functions |
| 1240 | */ |
| 1241 | |
| 1242 | static inline void vmw_surface_unreference(struct vmw_surface **srf) |
| 1243 | { |
| 1244 | struct vmw_surface *tmp_srf = *srf; |
| 1245 | struct vmw_resource *res = &tmp_srf->res; |
| 1246 | *srf = NULL; |
Thomas Hellstrom | bf6f036 | 2012-11-09 12:26:15 +0000 | [diff] [blame] | 1247 | |
| 1248 | vmw_resource_unreference(&res); |
| 1249 | } |
| 1250 | |
| 1251 | static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) |
| 1252 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1253 | (void) vmw_resource_reference(&srf->res); |
| 1254 | return srf; |
| 1255 | } |
| 1256 | |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 1257 | static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1258 | { |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 1259 | struct vmw_buffer_object *tmp_buf = *buf; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1260 | |
| 1261 | *buf = NULL; |
| 1262 | if (tmp_buf != NULL) { |
| 1263 | struct ttm_buffer_object *bo = &tmp_buf->base; |
| 1264 | |
| 1265 | ttm_bo_unref(&bo); |
| 1266 | } |
| 1267 | } |
| 1268 | |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 1269 | static inline struct vmw_buffer_object * |
| 1270 | vmw_bo_reference(struct vmw_buffer_object *buf) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1271 | { |
| 1272 | if (ttm_bo_reference(&buf->base)) |
| 1273 | return buf; |
| 1274 | return NULL; |
| 1275 | } |
| 1276 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1277 | static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) |
| 1278 | { |
| 1279 | return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; |
| 1280 | } |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1281 | |
| 1282 | static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) |
| 1283 | { |
| 1284 | atomic_inc(&dev_priv->num_fifo_resources); |
| 1285 | } |
| 1286 | |
| 1287 | static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) |
| 1288 | { |
| 1289 | atomic_dec(&dev_priv->num_fifo_resources); |
| 1290 | } |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 1291 | |
| 1292 | /** |
| 1293 | * vmw_mmio_read - Perform a MMIO read from volatile memory |
| 1294 | * |
| 1295 | * @addr: The address to read from |
| 1296 | * |
| 1297 | * This function is intended to be equivalent to ioread32() on |
| 1298 | * memremap'd memory, but without byteswapping. |
| 1299 | */ |
| 1300 | static inline u32 vmw_mmio_read(u32 *addr) |
| 1301 | { |
| 1302 | return READ_ONCE(*addr); |
| 1303 | } |
| 1304 | |
| 1305 | /** |
| 1306 | * vmw_mmio_write - Perform a MMIO write to volatile memory |
| 1307 | * |
| 1308 | * @addr: The address to write to |
| 1309 | * |
| 1310 | * This function is intended to be equivalent to iowrite32 on |
| 1311 | * memremap'd memory, but without byteswapping. |
| 1312 | */ |
| 1313 | static inline void vmw_mmio_write(u32 value, u32 *addr) |
| 1314 | { |
| 1315 | WRITE_ONCE(*addr, value); |
| 1316 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1317 | #endif |