blob: a3a0826958a14708ee66e428fd540d7ee563ce0c [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
Daniel Vetter3b96a0b2016-06-21 10:54:22 +020035#include <drm/drm_auth.h>
David Howells760285e2012-10-02 18:01:07 +010036#include <linux/suspend.h>
37#include <drm/ttm/ttm_bo_driver.h>
38#include <drm/ttm/ttm_object.h>
39#include <drm/ttm/ttm_lock.h>
40#include <drm/ttm/ttm_execbuf_util.h>
41#include <drm/ttm/ttm_module.h>
Thomas Hellstromae2a1042011-09-01 20:18:44 +000042#include "vmwgfx_fence.h"
Sinclair Yehc906965d2017-07-05 01:49:32 -070043#include <linux/sync_file.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000044
Thomas Hellstrome3001732017-08-24 08:06:27 +020045#define VMWGFX_DRIVER_NAME "vmwgfx"
Thomas Hellstrom43bfefe2018-03-22 11:14:34 +010046#define VMWGFX_DRIVER_DATE "20180322"
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +000047#define VMWGFX_DRIVER_MAJOR 2
Sinclair Yehd78acfe2017-07-05 01:51:42 -070048#define VMWGFX_DRIVER_MINOR 14
Thomas Hellstrom43bfefe2018-03-22 11:14:34 +010049#define VMWGFX_DRIVER_PATCHLEVEL 1
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000050#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
51#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
52#define VMWGFX_MAX_RELOCATIONS 2048
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000053#define VMWGFX_MAX_VALIDATIONS 2048
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +020054#define VMWGFX_MAX_DISPLAYS 16
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000055#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
Sinclair Yeh35c05122015-06-26 01:42:06 -070056#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000057
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010058/*
59 * Perhaps we should have sysfs entries for these.
60 */
61#define VMWGFX_NUM_GB_CONTEXT 256
62#define VMWGFX_NUM_GB_SHADER 20000
63#define VMWGFX_NUM_GB_SURFACE 32768
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010064#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
Thomas Hellstromd80efd52015-08-10 10:39:35 -070065#define VMWGFX_NUM_DXCONTEXT 256
66#define VMWGFX_NUM_DXQUERY 512
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010067#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
68 VMWGFX_NUM_GB_SHADER +\
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010069 VMWGFX_NUM_GB_SURFACE +\
70 VMWGFX_NUM_GB_SCREEN_TARGET)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010071
Christian König283cde62016-09-12 13:34:37 +020072#define VMW_PL_GMR (TTM_PL_PRIV + 0)
73#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
74#define VMW_PL_MOB (TTM_PL_PRIV + 1)
75#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
Thomas Hellstrom135cba02010-10-26 21:21:47 +020076
Thomas Hellstromae2a1042011-09-01 20:18:44 +000077#define VMW_RES_CONTEXT ttm_driver_type0
78#define VMW_RES_SURFACE ttm_driver_type1
79#define VMW_RES_STREAM ttm_driver_type2
80#define VMW_RES_FENCE ttm_driver_type3
Thomas Hellstromc74c1622012-11-21 12:10:26 +010081#define VMW_RES_SHADER ttm_driver_type4
Thomas Hellstromae2a1042011-09-01 20:18:44 +000082
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000083struct vmw_fpriv {
84 struct drm_master *locked_master;
85 struct ttm_object_file *tfile;
Thomas Hellstromd5bde952014-01-31 10:12:10 +010086 bool gb_aware;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000087};
88
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +020089struct vmw_buffer_object {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000090 struct ttm_buffer_object base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000091 struct list_head res_list;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -070092 s32 pin_count;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -070093 /* Not ref-counted. Protected by binding_mutex */
94 struct vmw_resource *dx_query_ctx;
Thomas Hellstrombf833fd2018-03-22 10:19:01 +010095 /* Protected by reservation */
96 struct ttm_bo_kmap_obj map;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000097};
98
Thomas Hellstromc0951b72012-11-20 12:19:35 +000099/**
100 * struct vmw_validate_buffer - Carries validation info about buffers.
101 *
102 * @base: Validation info for TTM.
103 * @hash: Hash entry for quick lookup of the TTM buffer object.
104 *
105 * This structure contains also driver private validation info
106 * on top of the info needed by TTM.
107 */
108struct vmw_validate_buffer {
109 struct ttm_validate_buffer base;
110 struct drm_hash_item hash;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100111 bool validate_as_mob;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000112};
113
114struct vmw_res_func;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000115struct vmw_resource {
116 struct kref kref;
117 struct vmw_private *dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000118 int id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000119 bool avail;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000120 unsigned long backup_size;
121 bool res_dirty; /* Protected by backup buffer reserved */
122 bool backup_dirty; /* Protected by backup buffer reserved */
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200123 struct vmw_buffer_object *backup;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000124 unsigned long backup_offset;
Thomas Hellstromed933942015-03-02 23:26:06 -0800125 unsigned long pin_count; /* Protected by resource reserved */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000126 const struct vmw_res_func *func;
127 struct list_head lru_head; /* Protected by the resource lock */
128 struct list_head mob_head; /* Protected by @backup reserved */
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700129 struct list_head binding_head; /* Protected by binding_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000130 void (*res_free) (struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000131 void (*hw_destroy) (struct vmw_resource *res);
132};
133
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200134
135/*
136 * Resources that are managed using ioctls.
137 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000138enum vmw_res_type {
139 vmw_res_context,
140 vmw_res_surface,
141 vmw_res_stream,
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100142 vmw_res_shader,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700143 vmw_res_dx_context,
144 vmw_res_cotable,
145 vmw_res_view,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000146 vmw_res_max
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000147};
148
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200149/*
150 * Resources that are managed using command streams.
151 */
152enum vmw_cmdbuf_res_type {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700153 vmw_cmdbuf_res_shader,
154 vmw_cmdbuf_res_view
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200155};
156
157struct vmw_cmdbuf_res_manager;
158
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000159struct vmw_cursor_snooper {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000160 size_t age;
161 uint32_t *image;
162};
163
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200164struct vmw_framebuffer;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200165struct vmw_surface_offset;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200166
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000167struct vmw_surface {
168 struct vmw_resource res;
169 uint32_t flags;
170 uint32_t format;
171 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000172 struct drm_vmw_size base_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000173 struct drm_vmw_size *sizes;
174 uint32_t num_sizes;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000175 bool scanout;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700176 uint32_t array_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000177 /* TODO so far just a extra pointer */
178 struct vmw_cursor_snooper snooper;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200179 struct vmw_surface_offset *offsets;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000180 SVGA3dTextureFilter autogen_filter;
181 uint32_t multisample_count;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700182 struct list_head view_list;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000183};
184
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000185struct vmw_marker_queue {
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200186 struct list_head head;
Thomas Gleixnerf166e6d2014-07-16 21:05:07 +0000187 u64 lag;
188 u64 lag_time;
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200189 spinlock_t lock;
190};
191
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000192struct vmw_fifo_state {
193 unsigned long reserved_size;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700194 u32 *dynamic_buffer;
195 u32 *static_buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000196 unsigned long static_buffer_size;
197 bool using_bounce_buffer;
198 uint32_t capabilities;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000199 struct mutex fifo_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000200 struct rw_semaphore rwsem;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000201 struct vmw_marker_queue marker_queue;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700202 bool dx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000203};
204
205struct vmw_relocation {
Thomas Hellstromddcda242012-11-21 11:26:55 +0100206 SVGAMobId *mob_loc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000207 SVGAGuestPtr *location;
208 uint32_t index;
209};
210
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000211/**
212 * struct vmw_res_cache_entry - resource information cache entry
213 *
214 * @valid: Whether the entry is valid, which also implies that the execbuf
215 * code holds a reference to the resource, and it's placed on the
216 * validation list.
217 * @handle: User-space handle of a resource.
218 * @res: Non-ref-counted pointer to the resource.
219 *
220 * Used to avoid frequent repeated user-space handle lookups of the
221 * same resource.
222 */
223struct vmw_res_cache_entry {
224 bool valid;
225 uint32_t handle;
226 struct vmw_resource *res;
227 struct vmw_resource_val_node *node;
228};
229
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700230/**
231 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
232 */
233enum vmw_dma_map_mode {
234 vmw_dma_phys, /* Use physical page addresses */
235 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
236 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
237 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
238 vmw_dma_map_max
239};
240
241/**
242 * struct vmw_sg_table - Scatter/gather table for binding, with additional
243 * device-specific information.
244 *
245 * @sgt: Pointer to a struct sg_table with binding information
Masahiro Yamadae1c05062015-07-07 10:14:59 +0900246 * @num_regions: Number of regions with device-address contiguous pages
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700247 */
248struct vmw_sg_table {
249 enum vmw_dma_map_mode mode;
250 struct page **pages;
251 const dma_addr_t *addrs;
252 struct sg_table *sgt;
253 unsigned long num_regions;
254 unsigned long num_pages;
255};
256
257/**
258 * struct vmw_piter - Page iterator that iterates over a list of pages
259 * and DMA addresses that could be either a scatter-gather list or
260 * arrays
261 *
262 * @pages: Array of page pointers to the pages.
263 * @addrs: DMA addresses to the pages if coherent pages are used.
264 * @iter: Scatter-gather page iterator. Current position in SG list.
265 * @i: Current position in arrays.
266 * @num_pages: Number of pages total.
267 * @next: Function to advance the iterator. Returns false if past the list
268 * of pages, true otherwise.
269 * @dma_address: Function to return the DMA address of the current page.
270 */
271struct vmw_piter {
272 struct page **pages;
273 const dma_addr_t *addrs;
274 struct sg_page_iter iter;
275 unsigned long i;
276 unsigned long num_pages;
277 bool (*next)(struct vmw_piter *);
278 dma_addr_t (*dma_address)(struct vmw_piter *);
279 struct page *(*page)(struct vmw_piter *);
280};
281
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700282/*
Sinclair Yehc8261a92015-06-26 01:23:42 -0700283 * enum vmw_display_unit_type - Describes the display unit
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700284 */
Sinclair Yehc8261a92015-06-26 01:23:42 -0700285enum vmw_display_unit_type {
286 vmw_du_invalid = 0,
287 vmw_du_legacy,
Sinclair Yeh35c05122015-06-26 01:42:06 -0700288 vmw_du_screen_object,
289 vmw_du_screen_target
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700290};
291
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700292
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000293struct vmw_sw_context{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000294 struct drm_open_hash res_ht;
295 bool res_ht_initialized;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200296 bool kernel; /**< is the called made from the kernel */
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100297 struct vmw_fpriv *fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000298 struct list_head validate_nodes;
299 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
300 uint32_t cur_reloc;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000301 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000302 uint32_t cur_val_buf;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000303 uint32_t *cmd_bounce;
304 uint32_t cmd_bounce_size;
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200305 struct list_head resource_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700306 struct list_head ctx_resource_list; /* For contexts and cotables */
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200307 struct vmw_buffer_object *cur_query_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000308 struct list_head res_relocations;
309 uint32_t *buf_start;
310 struct vmw_res_cache_entry res_cache[vmw_res_max];
311 struct vmw_resource *last_query_ctx;
312 bool needs_post_query_barrier;
313 struct vmw_resource *error_resource;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700314 struct vmw_ctx_binding_state *staged_bindings;
315 bool staged_bindings_inuse;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200316 struct list_head staged_cmd_res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700317 struct vmw_resource_val_node *dx_ctx_node;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200318 struct vmw_buffer_object *dx_query_mob;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700319 struct vmw_resource *dx_query_ctx;
320 struct vmw_cmdbuf_res_manager *man;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000321};
322
323struct vmw_legacy_display;
324struct vmw_overlay;
325
326struct vmw_master {
327 struct ttm_lock lock;
328};
329
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200330struct vmw_vga_topology_state {
331 uint32_t width;
332 uint32_t height;
333 uint32_t primary;
334 uint32_t pos_x;
335 uint32_t pos_y;
336};
337
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700338
339/*
340 * struct vmw_otable - Guest Memory OBject table metadata
341 *
342 * @size: Size of the table (page-aligned).
343 * @page_table: Pointer to a struct vmw_mob holding the page table.
344 */
345struct vmw_otable {
346 unsigned long size;
347 struct vmw_mob *page_table;
348 bool enabled;
349};
350
351struct vmw_otable_batch {
352 unsigned num_otables;
353 struct vmw_otable *otables;
354 struct vmw_resource *context;
355 struct ttm_buffer_object *otable_bo;
356};
357
Thomas Hellstromef369902017-08-24 08:06:28 +0200358enum {
359 VMW_IRQTHREAD_FENCE,
360 VMW_IRQTHREAD_CMDBUF,
361 VMW_IRQTHREAD_MAX
362};
363
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000364struct vmw_private {
365 struct ttm_bo_device bdev;
366 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000367 struct drm_global_reference mem_global_ref;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000368
369 struct vmw_fifo_state fifo;
370
371 struct drm_device *dev;
372 unsigned long vmw_chipset;
373 unsigned int io_start;
374 uint32_t vram_start;
375 uint32_t vram_size;
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100376 uint32_t prim_bb_mem;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000377 uint32_t mmio_start;
378 uint32_t mmio_size;
379 uint32_t fb_max_width;
380 uint32_t fb_max_height;
Sinclair Yeh35c05122015-06-26 01:42:06 -0700381 uint32_t texture_max_width;
382 uint32_t texture_max_height;
383 uint32_t stdu_max_width;
384 uint32_t stdu_max_height;
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100385 uint32_t initial_width;
386 uint32_t initial_height;
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100387 u32 *mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000388 uint32_t capabilities;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000389 uint32_t max_gmr_ids;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000390 uint32_t max_gmr_pages;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100391 uint32_t max_mob_pages;
Charmaine Lee857aea12014-02-12 12:07:38 +0100392 uint32_t max_mob_size;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000393 uint32_t memory_size;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200394 bool has_gmr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100395 bool has_mob;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800396 spinlock_t hw_lock;
397 spinlock_t cap_lock;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700398 bool has_dx;
Sinclair Yeh04319d82016-06-29 12:15:48 -0700399 bool assume_16bpp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000400
401 /*
402 * VGA registers.
403 */
404
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200405 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000406 uint32_t vga_width;
407 uint32_t vga_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000408 uint32_t vga_bpp;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200409 uint32_t vga_bpl;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200410 uint32_t vga_pitchlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000411
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200412 uint32_t num_displays;
413
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000414 /*
Deepak Rawatb89e5ff2018-06-20 11:32:29 +0200415 * Currently requested_layout_mutex is used to protect the gui
416 * positionig state in display unit. With that use case currently this
417 * mutex is only taken during layout ioctl and atomic check_modeset.
418 * Other display unit state can be protected with this mutex but that
419 * needs careful consideration.
420 */
421 struct mutex requested_layout_mutex;
422
423 /*
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000424 * Framebuffer info.
425 */
426
427 void *fb_info;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700428 enum vmw_display_unit_type active_display_unit;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000429 struct vmw_legacy_display *ldu_priv;
430 struct vmw_overlay *overlay_priv;
Thomas Hellstrom578e6092016-02-12 09:45:42 +0100431 struct drm_property *hotplug_mode_update_property;
Thomas Hellstrom76404ac2016-02-12 09:55:45 +0100432 struct drm_property *implicit_placement_property;
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100433 unsigned num_implicit;
434 struct vmw_framebuffer *implicit_fb;
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200435 struct mutex global_kms_state_mutex;
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700436 spinlock_t cursor_lock;
Thomas Hellstromc3b9b162018-03-22 10:26:37 +0100437 struct drm_atomic_state *suspend_state;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000438
439 /*
440 * Context and surface management.
441 */
442
443 rwlock_t resource_lock;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000444 struct idr res_idr[vmw_res_max];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000445 /*
446 * Block lastclose from racing with firstopen.
447 */
448
449 struct mutex init_mutex;
450
451 /*
452 * A resource manager for kernel-only surfaces and
453 * contexts.
454 */
455
456 struct ttm_object_device *tdev;
457
458 /*
459 * Fencing and IRQs.
460 */
461
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000462 atomic_t marker_seq;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000463 wait_queue_head_t fence_queue;
464 wait_queue_head_t fifo_queue;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800465 spinlock_t waiter_lock;
466 int fence_queue_waiters; /* Protected by waiter_lock */
467 int goal_queue_waiters; /* Protected by waiter_lock */
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100468 int cmdbuf_waiters; /* Protected by waiter_lock */
469 int error_waiters; /* Protected by waiter_lock */
470 int fifo_queue_waiters; /* Protected by waiter_lock */
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000471 uint32_t last_read_seqno;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000472 struct vmw_fence_manager *fman;
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100473 uint32_t irq_mask; /* Updates protected by waiter_lock */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000474
475 /*
476 * Device state
477 */
478
479 uint32_t traces_state;
480 uint32_t enable_state;
481 uint32_t config_done_state;
482
483 /**
484 * Execbuf
485 */
486 /**
487 * Protected by the cmdbuf mutex.
488 */
489
490 struct vmw_sw_context ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000491 struct mutex cmdbuf_mutex;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700492 struct mutex binding_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000493
494 /**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000495 * Operating mode.
496 */
497
498 bool stealth;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200499 bool enable_fb;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700500 spinlock_t svga_lock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000501
502 /**
503 * Master management.
504 */
505
506 struct vmw_master *active_master;
507 struct vmw_master fbdev_master;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100508 struct notifier_block pm_nb;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700509 bool refuse_hibernation;
Thomas Hellstromc3b9b162018-03-22 10:26:37 +0100510 bool suspend_locked;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200511
512 struct mutex release_mutex;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700513 atomic_t num_fifo_resources;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200514
515 /*
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100516 * Replace this with an rwsem as soon as we have down_xx_interruptible()
517 */
518 struct ttm_lock reservation_sem;
519
520 /*
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200521 * Query processing. These members
522 * are protected by the cmdbuf mutex.
523 */
524
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200525 struct vmw_buffer_object *dummy_query_bo;
526 struct vmw_buffer_object *pinned_bo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200527 uint32_t query_cid;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000528 uint32_t query_cid_valid;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200529 bool dummy_query_bo_pinned;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200530
531 /*
532 * Surface swapping. The "surface_lru" list is protected by the
533 * resource lock in order to be able to destroy a surface and take
534 * it off the lru atomically. "used_memory_size" is currently
535 * protected by the cmdbuf mutex for simplicity.
536 */
537
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000538 struct list_head res_lru[vmw_res_max];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200539 uint32_t used_memory_size;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700540
541 /*
542 * DMA mapping stuff.
543 */
544 enum vmw_dma_map_mode map_mode;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100545
546 /*
547 * Guest Backed stuff
548 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700549 struct vmw_otable_batch otable_batch;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700550
551 struct vmw_cmdbuf_man *cman;
Thomas Hellstromef369902017-08-24 08:06:28 +0200552 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000553};
554
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000555static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
556{
557 return container_of(res, struct vmw_surface, res);
558}
559
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000560static inline struct vmw_private *vmw_priv(struct drm_device *dev)
561{
562 return (struct vmw_private *)dev->dev_private;
563}
564
565static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
566{
567 return (struct vmw_fpriv *)file_priv->driver_priv;
568}
569
570static inline struct vmw_master *vmw_master(struct drm_master *master)
571{
572 return (struct vmw_master *) master->driver_priv;
573}
574
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800575/*
576 * The locking here is fine-grained, so that it is performed once
577 * for every read- and write operation. This is of course costly, but we
578 * don't perform much register access in the timing critical paths anyway.
579 * Instead we have the extra benefit of being sure that we don't forget
580 * the hw lock around register accesses.
581 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000582static inline void vmw_write(struct vmw_private *dev_priv,
583 unsigned int offset, uint32_t value)
584{
Thomas Hellstromef369902017-08-24 08:06:28 +0200585 spin_lock(&dev_priv->hw_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000586 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
587 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstromef369902017-08-24 08:06:28 +0200588 spin_unlock(&dev_priv->hw_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000589}
590
591static inline uint32_t vmw_read(struct vmw_private *dev_priv,
592 unsigned int offset)
593{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800594 u32 val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000595
Thomas Hellstromef369902017-08-24 08:06:28 +0200596 spin_lock(&dev_priv->hw_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000597 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
598 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstromef369902017-08-24 08:06:28 +0200599 spin_unlock(&dev_priv->hw_lock);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800600
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000601 return val;
602}
603
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700604extern void vmw_svga_enable(struct vmw_private *dev_priv);
605extern void vmw_svga_disable(struct vmw_private *dev_priv);
606
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200607
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000608/**
609 * GMR utilities - vmwgfx_gmr.c
610 */
611
612extern int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700613 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200614 unsigned long num_pages,
615 int gmr_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000616extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
617
618/**
619 * Resource utilities - vmwgfx_resource.c
620 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000621struct vmw_user_resource_conv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000622
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000623extern void vmw_resource_unreference(struct vmw_resource **p_res);
624extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100625extern struct vmw_resource *
626vmw_resource_reference_unless_doomed(struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000627extern int vmw_resource_validate(struct vmw_resource *res);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700628extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
629 bool no_backup);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000630extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
Jakob Bornecrantz551a6692011-11-28 13:19:11 +0100631extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
632 struct ttm_object_file *tfile,
633 uint32_t handle,
634 struct vmw_surface **out_surf,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200635 struct vmw_buffer_object **out_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000636extern int vmw_user_resource_lookup_handle(
637 struct vmw_private *dev_priv,
638 struct ttm_object_file *tfile,
639 uint32_t handle,
640 const struct vmw_user_resource_conv *converter,
641 struct vmw_resource **p_res);
Thomas Hellstrome9431ea2018-06-19 15:33:53 +0200642extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
643 struct drm_file *file_priv);
644extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
645 struct drm_file *file_priv);
646extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
647 struct ttm_object_file *tfile,
648 uint32_t *inout_id,
649 struct vmw_resource **out);
650extern void vmw_resource_unreserve(struct vmw_resource *res,
651 bool switch_backup,
652 struct vmw_buffer_object *new_backup,
653 unsigned long new_backup_offset);
654extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
655 struct ttm_mem_reg *mem);
656extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
657extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
658extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
659
660/**
661 * Buffer object helper functions - vmwgfx_bo.c
662 */
663extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
664 struct vmw_buffer_object *bo,
665 struct ttm_placement *placement,
666 bool interruptible);
667extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
668 struct vmw_buffer_object *buf,
669 bool interruptible);
670extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
671 struct vmw_buffer_object *buf,
672 bool interruptible);
673extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
674 struct vmw_buffer_object *bo,
675 bool interruptible);
676extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
677 struct vmw_buffer_object *bo,
678 bool interruptible);
679extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
680 SVGAGuestPtr *ptr);
681extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200682extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
683extern int vmw_bo_init(struct vmw_private *dev_priv,
684 struct vmw_buffer_object *vmw_bo,
685 size_t size, struct ttm_placement *placement,
686 bool interuptable,
687 void (*bo_free)(struct ttm_buffer_object *bo));
688extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
689 struct ttm_object_file *tfile);
690extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
691 struct ttm_object_file *tfile,
692 uint32_t size,
693 bool shareable,
694 uint32_t *handle,
695 struct vmw_buffer_object **p_dma_buf,
696 struct ttm_base_object **p_base);
697extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
698 struct vmw_buffer_object *dma_buf,
699 uint32_t *handle);
700extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
701 struct drm_file *file_priv);
702extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
703 struct drm_file *file_priv);
704extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200706extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
707 uint32_t id, struct vmw_buffer_object **out,
708 struct ttm_base_object **base);
Thomas Hellstrome9431ea2018-06-19 15:33:53 +0200709extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000710 struct vmw_fence_obj *fence);
Thomas Hellstrome9431ea2018-06-19 15:33:53 +0200711extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
712extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
713extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
714 struct ttm_mem_reg *mem);
715extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000716
717/**
718 * Misc Ioctl functionality - vmwgfx_ioctl.c
719 */
720
721extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
722 struct drm_file *file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000723extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
724 struct drm_file *file_priv);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200725extern int vmw_present_ioctl(struct drm_device *dev, void *data,
726 struct drm_file *file_priv);
727extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
728 struct drm_file *file_priv);
Al Viroafc9a422017-07-03 06:39:46 -0400729extern __poll_t vmw_fops_poll(struct file *filp,
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200730 struct poll_table_struct *wait);
731extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
732 size_t count, loff_t *offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000733
734/**
735 * Fifo utilities - vmwgfx_fifo.c
736 */
737
738extern int vmw_fifo_init(struct vmw_private *dev_priv,
739 struct vmw_fifo_state *fifo);
740extern void vmw_fifo_release(struct vmw_private *dev_priv,
741 struct vmw_fifo_state *fifo);
742extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700743extern void *
744vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000745extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700746extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000747extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000748 uint32_t *seqno);
Maarten Lankhorst2298e802014-03-26 14:07:44 +0100749extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000750extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +0000751extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200752extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200753extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
754 uint32_t cid);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700755extern int vmw_fifo_flush(struct vmw_private *dev_priv,
756 bool interruptible);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000757
758/**
759 * TTM glue - vmwgfx_ttm_glue.c
760 */
761
762extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
763extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
764extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
765
766/**
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200767 * TTM buffer object driver - vmwgfx_ttm_buffer.c
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000768 */
769
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800770extern const size_t vmw_tt_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000771extern struct ttm_placement vmw_vram_placement;
772extern struct ttm_placement vmw_vram_ne_placement;
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100773extern struct ttm_placement vmw_vram_sys_placement;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200774extern struct ttm_placement vmw_vram_gmr_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200775extern struct ttm_placement vmw_vram_gmr_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000776extern struct ttm_placement vmw_sys_placement;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100777extern struct ttm_placement vmw_sys_ne_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200778extern struct ttm_placement vmw_evictable_placement;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200779extern struct ttm_placement vmw_srf_placement;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100780extern struct ttm_placement vmw_mob_placement;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700781extern struct ttm_placement vmw_mob_ne_placement;
Thomas Hellstromef86cfe2018-01-16 11:07:30 +0100782extern struct ttm_placement vmw_nonfixed_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000783extern struct ttm_bo_driver vmw_bo_driver;
784extern int vmw_dma_quiescent(struct drm_device *dev);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700785extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
786extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
787extern const struct vmw_sg_table *
788vmw_bo_sg_table(struct ttm_buffer_object *bo);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700789extern void vmw_piter_start(struct vmw_piter *viter,
790 const struct vmw_sg_table *vsgt,
791 unsigned long p_offs);
792
793/**
794 * vmw_piter_next - Advance the iterator one page.
795 *
796 * @viter: Pointer to the iterator to advance.
797 *
798 * Returns false if past the list of pages, true otherwise.
799 */
800static inline bool vmw_piter_next(struct vmw_piter *viter)
801{
802 return viter->next(viter);
803}
804
805/**
806 * vmw_piter_dma_addr - Return the DMA address of the current page.
807 *
808 * @viter: Pointer to the iterator
809 *
810 * Returns the DMA address of the page pointed to by @viter.
811 */
812static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
813{
814 return viter->dma_address(viter);
815}
816
817/**
818 * vmw_piter_page - Return a pointer to the current page.
819 *
820 * @viter: Pointer to the iterator
821 *
822 * Returns the DMA address of the page pointed to by @viter.
823 */
824static inline struct page *vmw_piter_page(struct vmw_piter *viter)
825{
826 return viter->page(viter);
827}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000828
829/**
830 * Command submission - vmwgfx_execbuf.c
831 */
832
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700833extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
834 struct drm_file *file_priv, size_t size);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200835extern int vmw_execbuf_process(struct drm_file *file_priv,
836 struct vmw_private *dev_priv,
837 void __user *user_commands,
838 void *kernel_commands,
839 uint32_t command_size,
840 uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700841 uint32_t dx_context_handle,
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200842 struct drm_vmw_fence_rep __user
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +0100843 *user_fence_rep,
Sinclair Yehc906965d2017-07-05 01:49:32 -0700844 struct vmw_fence_obj **out_fence,
845 uint32_t flags);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000846extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
847 struct vmw_fence_obj *fence);
848extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200849
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200850extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
851 struct vmw_private *dev_priv,
852 struct vmw_fence_obj **p_fence,
853 uint32_t *p_handle);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200854extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
855 struct vmw_fpriv *vmw_fp,
856 int ret,
857 struct drm_vmw_fence_rep __user
858 *user_fence_rep,
859 struct vmw_fence_obj *fence,
Sinclair Yehc906965d2017-07-05 01:49:32 -0700860 uint32_t fence_handle,
861 int32_t out_fence_fd,
862 struct sync_file *sync_file);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700863extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
864 struct ttm_buffer_object *bo,
865 bool interruptible,
866 bool validate_as_mob);
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200867bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200868
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000869/**
870 * IRQs and wating - vmwgfx_irq.c
871 */
872
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000873extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700874 uint32_t seqno, bool interruptible,
875 unsigned long timeout);
Thomas Hellstrome3001732017-08-24 08:06:27 +0200876extern int vmw_irq_install(struct drm_device *dev, int irq);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000877extern void vmw_irq_uninstall(struct drm_device *dev);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000878extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
879 uint32_t seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000880extern int vmw_fallback_wait(struct vmw_private *dev_priv,
881 bool lazy,
882 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000883 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000884 bool interruptible,
885 unsigned long timeout);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000886extern void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200887 struct vmw_fifo_state *fifo_state);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000888extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
889extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200890extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
891extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700892extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
893 int *waiter_count);
894extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
895 u32 flag, int *waiter_count);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200896
897/**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000898 * Rudimentary fence-like objects currently used only for throttling -
899 * vmwgfx_marker.c
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200900 */
901
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000902extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
903extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
904extern int vmw_marker_push(struct vmw_marker_queue *queue,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700905 uint32_t seqno);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000906extern int vmw_marker_pull(struct vmw_marker_queue *queue,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700907 uint32_t signaled_seqno);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200908extern int vmw_wait_lag(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000909 struct vmw_marker_queue *queue, uint32_t us);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000910
911/**
912 * Kernel framebuffer - vmwgfx_fb.c
913 */
914
915int vmw_fb_init(struct vmw_private *vmw_priv);
916int vmw_fb_close(struct vmw_private *dev_priv);
917int vmw_fb_off(struct vmw_private *vmw_priv);
918int vmw_fb_on(struct vmw_private *vmw_priv);
919
920/**
921 * Kernel modesetting - vmwgfx_kms.c
922 */
923
924int vmw_kms_init(struct vmw_private *dev_priv);
925int vmw_kms_close(struct vmw_private *dev_priv);
926int vmw_kms_save_vga(struct vmw_private *vmw_priv);
927int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
928int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
929 struct drm_file *file_priv);
930void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
931void vmw_kms_cursor_snoop(struct vmw_surface *srf,
932 struct ttm_object_file *tfile,
933 struct ttm_buffer_object *bo,
934 SVGA3dCmdHeader *header);
Michel Dänzer0bef23f2011-08-31 07:42:50 +0000935int vmw_kms_write_svga(struct vmw_private *vmw_priv,
936 unsigned width, unsigned height, unsigned pitch,
937 unsigned bpp, unsigned depth);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200938void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
Thomas Hellstrome133e7372010-10-05 12:43:04 +0200939bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
940 uint32_t pitch,
941 uint32_t height);
Thierry Reding88e72712015-09-24 18:35:31 +0200942u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
943int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
944void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200945int vmw_kms_present(struct vmw_private *dev_priv,
946 struct drm_file *file_priv,
947 struct vmw_framebuffer *vfb,
948 struct vmw_surface *surface,
949 uint32_t sid, int32_t destX, int32_t destY,
950 struct drm_vmw_rect *clips,
951 uint32_t num_clips);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200952int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
953 struct drm_file *file_priv);
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100954void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
Thomas Hellstromc3b9b162018-03-22 10:26:37 +0100955int vmw_kms_suspend(struct drm_device *dev);
956int vmw_kms_resume(struct drm_device *dev);
Thomas Hellstrom140bcaa2018-03-08 10:07:37 +0100957void vmw_kms_lost_device(struct drm_device *dev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000958
Dave Airlie5e1782d2012-08-28 01:53:54 +0000959int vmw_dumb_create(struct drm_file *file_priv,
960 struct drm_device *dev,
961 struct drm_mode_create_dumb *args);
962
963int vmw_dumb_map_offset(struct drm_file *file_priv,
964 struct drm_device *dev, uint32_t handle,
965 uint64_t *offset);
966int vmw_dumb_destroy(struct drm_file *file_priv,
967 struct drm_device *dev,
968 uint32_t handle);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700969extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
Thomas Hellstromed933942015-03-02 23:26:06 -0800970extern void vmw_resource_unpin(struct vmw_resource *res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700971extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
Thomas Hellstromed933942015-03-02 23:26:06 -0800972
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000973/**
974 * Overlay control - vmwgfx_overlay.c
975 */
976
977int vmw_overlay_init(struct vmw_private *dev_priv);
978int vmw_overlay_close(struct vmw_private *dev_priv);
979int vmw_overlay_ioctl(struct drm_device *dev, void *data,
980 struct drm_file *file_priv);
981int vmw_overlay_stop_all(struct vmw_private *dev_priv);
982int vmw_overlay_resume_all(struct vmw_private *dev_priv);
983int vmw_overlay_pause_all(struct vmw_private *dev_priv);
984int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
985int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
986int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
987int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
988
989/**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200990 * GMR Id manager
991 */
992
993extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
994
995/**
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800996 * Prime - vmwgfx_prime.c
997 */
998
999extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
1000extern int vmw_prime_fd_to_handle(struct drm_device *dev,
1001 struct drm_file *file_priv,
1002 int fd, u32 *handle);
1003extern int vmw_prime_handle_to_fd(struct drm_device *dev,
1004 struct drm_file *file_priv,
1005 uint32_t handle, uint32_t flags,
1006 int *prime_fd);
1007
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001008/*
1009 * MemoryOBject management - vmwgfx_mob.c
1010 */
1011struct vmw_mob;
1012extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -07001013 const struct vmw_sg_table *vsgt,
1014 unsigned long num_data_pages, int32_t mob_id);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001015extern void vmw_mob_unbind(struct vmw_private *dev_priv,
1016 struct vmw_mob *mob);
1017extern void vmw_mob_destroy(struct vmw_mob *mob);
1018extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
1019extern int vmw_otables_setup(struct vmw_private *dev_priv);
1020extern void vmw_otables_takedown(struct vmw_private *dev_priv);
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001021
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001022/*
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001023 * Context management - vmwgfx_context.c
1024 */
1025
1026extern const struct vmw_user_resource_conv *user_context_converter;
1027
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001028extern int vmw_context_check(struct vmw_private *dev_priv,
1029 struct ttm_object_file *tfile,
1030 int id,
1031 struct vmw_resource **p_res);
1032extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1033 struct drm_file *file_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001034extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1035 struct drm_file *file_priv);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001036extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1037 struct drm_file *file_priv);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01001038extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001039extern struct vmw_cmdbuf_res_manager *
1040vmw_context_res_man(struct vmw_resource *ctx);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001041extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1042 SVGACOTableType cotable_type);
1043extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1044struct vmw_ctx_binding_state;
1045extern struct vmw_ctx_binding_state *
1046vmw_context_binding_state(struct vmw_resource *ctx);
1047extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1048 bool readback);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001049extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001050 struct vmw_buffer_object *mob);
1051extern struct vmw_buffer_object *
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001052vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1053
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001054
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001055/*
1056 * Surface management - vmwgfx_surface.c
1057 */
1058
1059extern const struct vmw_user_resource_conv *user_surface_converter;
1060
1061extern void vmw_surface_res_free(struct vmw_resource *res);
1062extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1063 struct drm_file *file_priv);
1064extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1065 struct drm_file *file_priv);
1066extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1067 struct drm_file *file_priv);
1068extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1069 struct drm_file *file_priv);
1070extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1071 struct drm_file *file_priv);
1072extern int vmw_surface_check(struct vmw_private *dev_priv,
1073 struct ttm_object_file *tfile,
1074 uint32_t handle, int *id);
1075extern int vmw_surface_validate(struct vmw_private *dev_priv,
1076 struct vmw_surface *srf);
Sinclair Yeh233826a2015-03-05 01:06:13 -08001077int vmw_surface_gb_priv_define(struct drm_device *dev,
1078 uint32_t user_accounting_size,
1079 uint32_t svga3d_flags,
1080 SVGA3dSurfaceFormat format,
1081 bool for_scanout,
1082 uint32_t num_mip_levels,
1083 uint32_t multisample_count,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001084 uint32_t array_size,
Sinclair Yeh233826a2015-03-05 01:06:13 -08001085 struct drm_vmw_size size,
1086 struct vmw_surface **srf_out);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001087
1088/*
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001089 * Shader management - vmwgfx_shader.c
1090 */
1091
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001092extern const struct vmw_user_resource_conv *user_shader_converter;
1093
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001094extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1095 struct drm_file *file_priv);
1096extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1097 struct drm_file *file_priv);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001098extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1099 struct vmw_cmdbuf_res_manager *man,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001100 u32 user_key, const void *bytecode,
1101 SVGA3dShaderType shader_type,
1102 size_t size,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001103 struct list_head *list);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001104extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1105 u32 user_key, SVGA3dShaderType shader_type,
1106 struct list_head *list);
1107extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1108 struct vmw_resource *ctx,
1109 u32 user_key,
1110 SVGA3dShaderType shader_type,
1111 struct list_head *list);
1112extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1113 struct list_head *list,
1114 bool readback);
1115
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001116extern struct vmw_resource *
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001117vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1118 u32 user_key, SVGA3dShaderType shader_type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001119
1120/*
1121 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1122 */
1123
1124extern struct vmw_cmdbuf_res_manager *
1125vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1126extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1127extern size_t vmw_cmdbuf_res_man_size(void);
1128extern struct vmw_resource *
1129vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1130 enum vmw_cmdbuf_res_type res_type,
1131 u32 user_key);
1132extern void vmw_cmdbuf_res_revert(struct list_head *list);
1133extern void vmw_cmdbuf_res_commit(struct list_head *list);
1134extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1135 enum vmw_cmdbuf_res_type res_type,
1136 u32 user_key,
1137 struct vmw_resource *res,
1138 struct list_head *list);
1139extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1140 enum vmw_cmdbuf_res_type res_type,
1141 u32 user_key,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001142 struct list_head *list,
1143 struct vmw_resource **res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001144
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001145/*
1146 * COTable management - vmwgfx_cotable.c
1147 */
1148extern const SVGACOTableType vmw_cotable_scrub_order[];
1149extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1150 struct vmw_resource *ctx,
1151 u32 type);
1152extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1153extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1154extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1155 struct list_head *head);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001156
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001157/*
1158 * Command buffer managerment vmwgfx_cmdbuf.c
1159 */
1160struct vmw_cmdbuf_man;
1161struct vmw_cmdbuf_header;
1162
1163extern struct vmw_cmdbuf_man *
1164vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1165extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1166 size_t size, size_t default_size);
1167extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1168extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1169extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1170 unsigned long timeout);
1171extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1172 int ctx_id, bool interruptible,
1173 struct vmw_cmdbuf_header *header);
1174extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1175 struct vmw_cmdbuf_header *header,
1176 bool flush);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001177extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1178 size_t size, bool interruptible,
1179 struct vmw_cmdbuf_header **p_header);
1180extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1181extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1182 bool interruptible);
Thomas Hellstromef369902017-08-24 08:06:28 +02001183extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001184
Thomas Hellstrom79273e12018-01-16 09:33:27 +01001185/* CPU blit utilities - vmwgfx_blit.c */
1186
1187/**
1188 * struct vmw_diff_cpy - CPU blit information structure
1189 *
1190 * @rect: The output bounding box rectangle.
1191 * @line: The current line of the blit.
1192 * @line_offset: Offset of the current line segment.
1193 * @cpp: Bytes per pixel (granularity information).
1194 * @memcpy: Which memcpy function to use.
1195 */
1196struct vmw_diff_cpy {
1197 struct drm_rect rect;
1198 size_t line;
1199 size_t line_offset;
1200 int cpp;
1201 void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1202 size_t n);
1203};
1204
1205#define VMW_CPU_BLIT_INITIALIZER { \
1206 .do_cpy = vmw_memcpy, \
1207}
1208
1209#define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \
1210 .line = 0, \
1211 .line_offset = 0, \
1212 .rect = { .x1 = INT_MAX/2, \
1213 .y1 = INT_MAX/2, \
1214 .x2 = INT_MIN/2, \
1215 .y2 = INT_MIN/2 \
1216 }, \
1217 .cpp = _cpp, \
1218 .do_cpy = vmw_diff_memcpy, \
1219}
1220
1221void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1222 size_t n);
1223
1224void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
1225
1226int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
1227 u32 dst_offset, u32 dst_stride,
1228 struct ttm_buffer_object *src,
1229 u32 src_offset, u32 src_stride,
1230 u32 w, u32 h,
1231 struct vmw_diff_cpy *diff);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001232
Thomas Hellstrom6ff67ae2018-06-21 09:39:21 +02001233/* Host messaging -vmwgfx_msg.c: */
1234int vmw_host_get_guestinfo(const char *guest_info_param,
1235 char *buffer, size_t *length);
1236int vmw_host_log(const char *log);
1237
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001238/**
1239 * Inline helper functions
1240 */
1241
1242static inline void vmw_surface_unreference(struct vmw_surface **srf)
1243{
1244 struct vmw_surface *tmp_srf = *srf;
1245 struct vmw_resource *res = &tmp_srf->res;
1246 *srf = NULL;
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001247
1248 vmw_resource_unreference(&res);
1249}
1250
1251static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1252{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001253 (void) vmw_resource_reference(&srf->res);
1254 return srf;
1255}
1256
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001257static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001258{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001259 struct vmw_buffer_object *tmp_buf = *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001260
1261 *buf = NULL;
1262 if (tmp_buf != NULL) {
1263 struct ttm_buffer_object *bo = &tmp_buf->base;
1264
1265 ttm_bo_unref(&bo);
1266 }
1267}
1268
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001269static inline struct vmw_buffer_object *
1270vmw_bo_reference(struct vmw_buffer_object *buf)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001271{
1272 if (ttm_bo_reference(&buf->base))
1273 return buf;
1274 return NULL;
1275}
1276
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001277static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1278{
1279 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1280}
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001281
1282static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1283{
1284 atomic_inc(&dev_priv->num_fifo_resources);
1285}
1286
1287static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1288{
1289 atomic_dec(&dev_priv->num_fifo_resources);
1290}
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001291
1292/**
1293 * vmw_mmio_read - Perform a MMIO read from volatile memory
1294 *
1295 * @addr: The address to read from
1296 *
1297 * This function is intended to be equivalent to ioread32() on
1298 * memremap'd memory, but without byteswapping.
1299 */
1300static inline u32 vmw_mmio_read(u32 *addr)
1301{
1302 return READ_ONCE(*addr);
1303}
1304
1305/**
1306 * vmw_mmio_write - Perform a MMIO write to volatile memory
1307 *
1308 * @addr: The address to write to
1309 *
1310 * This function is intended to be equivalent to iowrite32 on
1311 * memremap'd memory, but without byteswapping.
1312 */
1313static inline void vmw_mmio_write(u32 value, u32 *addr)
1314{
1315 WRITE_ONCE(*addr, value);
1316}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001317#endif