blob: f1b803d34c5938c5b82ae13fbfc7e73ae76e3424 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
Daniel Vetter3b96a0b2016-06-21 10:54:22 +020035#include <drm/drm_auth.h>
David Howells760285e2012-10-02 18:01:07 +010036#include <linux/suspend.h>
37#include <drm/ttm/ttm_bo_driver.h>
38#include <drm/ttm/ttm_object.h>
39#include <drm/ttm/ttm_lock.h>
40#include <drm/ttm/ttm_execbuf_util.h>
41#include <drm/ttm/ttm_module.h>
Thomas Hellstromae2a1042011-09-01 20:18:44 +000042#include "vmwgfx_fence.h"
Sinclair Yehc906965d2017-07-05 01:49:32 -070043#include <linux/sync_file.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000044
Thomas Hellstrome3001732017-08-24 08:06:27 +020045#define VMWGFX_DRIVER_NAME "vmwgfx"
Deepak Rawat9b07b282018-06-20 15:09:43 -070046#define VMWGFX_DRIVER_DATE "20180704"
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +000047#define VMWGFX_DRIVER_MAJOR 2
Deepak Rawat9b07b282018-06-20 15:09:43 -070048#define VMWGFX_DRIVER_MINOR 15
49#define VMWGFX_DRIVER_PATCHLEVEL 0
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000050#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
51#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
52#define VMWGFX_MAX_RELOCATIONS 2048
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000053#define VMWGFX_MAX_VALIDATIONS 2048
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +020054#define VMWGFX_MAX_DISPLAYS 16
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000055#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
Sinclair Yeh35c05122015-06-26 01:42:06 -070056#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000057
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010058/*
59 * Perhaps we should have sysfs entries for these.
60 */
61#define VMWGFX_NUM_GB_CONTEXT 256
62#define VMWGFX_NUM_GB_SHADER 20000
63#define VMWGFX_NUM_GB_SURFACE 32768
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010064#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
Thomas Hellstromd80efd52015-08-10 10:39:35 -070065#define VMWGFX_NUM_DXCONTEXT 256
66#define VMWGFX_NUM_DXQUERY 512
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010067#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
68 VMWGFX_NUM_GB_SHADER +\
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010069 VMWGFX_NUM_GB_SURFACE +\
70 VMWGFX_NUM_GB_SCREEN_TARGET)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010071
Christian König283cde62016-09-12 13:34:37 +020072#define VMW_PL_GMR (TTM_PL_PRIV + 0)
73#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
74#define VMW_PL_MOB (TTM_PL_PRIV + 1)
75#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
Thomas Hellstrom135cba02010-10-26 21:21:47 +020076
Thomas Hellstromae2a1042011-09-01 20:18:44 +000077#define VMW_RES_CONTEXT ttm_driver_type0
78#define VMW_RES_SURFACE ttm_driver_type1
79#define VMW_RES_STREAM ttm_driver_type2
80#define VMW_RES_FENCE ttm_driver_type3
Thomas Hellstromc74c1622012-11-21 12:10:26 +010081#define VMW_RES_SHADER ttm_driver_type4
Thomas Hellstromae2a1042011-09-01 20:18:44 +000082
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000083struct vmw_fpriv {
84 struct drm_master *locked_master;
85 struct ttm_object_file *tfile;
Deepak Rawatf9261b32018-06-20 15:24:05 -070086 bool gb_aware; /* user-space is guest-backed aware */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000087};
88
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +020089struct vmw_buffer_object {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000090 struct ttm_buffer_object base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000091 struct list_head res_list;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -070092 s32 pin_count;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -070093 /* Not ref-counted. Protected by binding_mutex */
94 struct vmw_resource *dx_query_ctx;
Thomas Hellstrombf833fd2018-03-22 10:19:01 +010095 /* Protected by reservation */
96 struct ttm_bo_kmap_obj map;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000097};
98
Thomas Hellstromc0951b72012-11-20 12:19:35 +000099/**
100 * struct vmw_validate_buffer - Carries validation info about buffers.
101 *
102 * @base: Validation info for TTM.
103 * @hash: Hash entry for quick lookup of the TTM buffer object.
104 *
105 * This structure contains also driver private validation info
106 * on top of the info needed by TTM.
107 */
108struct vmw_validate_buffer {
109 struct ttm_validate_buffer base;
110 struct drm_hash_item hash;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100111 bool validate_as_mob;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000112};
113
114struct vmw_res_func;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000115struct vmw_resource {
116 struct kref kref;
117 struct vmw_private *dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000118 int id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000119 bool avail;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000120 unsigned long backup_size;
121 bool res_dirty; /* Protected by backup buffer reserved */
122 bool backup_dirty; /* Protected by backup buffer reserved */
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200123 struct vmw_buffer_object *backup;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000124 unsigned long backup_offset;
Thomas Hellstromed933942015-03-02 23:26:06 -0800125 unsigned long pin_count; /* Protected by resource reserved */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000126 const struct vmw_res_func *func;
127 struct list_head lru_head; /* Protected by the resource lock */
128 struct list_head mob_head; /* Protected by @backup reserved */
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700129 struct list_head binding_head; /* Protected by binding_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000130 void (*res_free) (struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000131 void (*hw_destroy) (struct vmw_resource *res);
132};
133
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200134
135/*
136 * Resources that are managed using ioctls.
137 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000138enum vmw_res_type {
139 vmw_res_context,
140 vmw_res_surface,
141 vmw_res_stream,
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100142 vmw_res_shader,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700143 vmw_res_dx_context,
144 vmw_res_cotable,
145 vmw_res_view,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000146 vmw_res_max
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000147};
148
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200149/*
150 * Resources that are managed using command streams.
151 */
152enum vmw_cmdbuf_res_type {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700153 vmw_cmdbuf_res_shader,
154 vmw_cmdbuf_res_view
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200155};
156
157struct vmw_cmdbuf_res_manager;
158
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000159struct vmw_cursor_snooper {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000160 size_t age;
161 uint32_t *image;
162};
163
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200164struct vmw_framebuffer;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200165struct vmw_surface_offset;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200166
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000167struct vmw_surface {
168 struct vmw_resource res;
Deepak Rawat397a1112018-06-20 14:25:07 -0700169 SVGA3dSurfaceAllFlags flags;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000170 uint32_t format;
171 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000172 struct drm_vmw_size base_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000173 struct drm_vmw_size *sizes;
174 uint32_t num_sizes;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000175 bool scanout;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700176 uint32_t array_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000177 /* TODO so far just a extra pointer */
178 struct vmw_cursor_snooper snooper;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200179 struct vmw_surface_offset *offsets;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000180 SVGA3dTextureFilter autogen_filter;
181 uint32_t multisample_count;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700182 struct list_head view_list;
Deepak Rawatcdff8e72018-06-20 14:20:23 -0700183 SVGA3dMSPattern multisample_pattern;
184 SVGA3dMSQualityLevel quality_level;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000185};
186
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000187struct vmw_marker_queue {
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200188 struct list_head head;
Thomas Gleixnerf166e6d2014-07-16 21:05:07 +0000189 u64 lag;
190 u64 lag_time;
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200191 spinlock_t lock;
192};
193
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000194struct vmw_fifo_state {
195 unsigned long reserved_size;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700196 u32 *dynamic_buffer;
197 u32 *static_buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000198 unsigned long static_buffer_size;
199 bool using_bounce_buffer;
200 uint32_t capabilities;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000201 struct mutex fifo_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000202 struct rw_semaphore rwsem;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000203 struct vmw_marker_queue marker_queue;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700204 bool dx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000205};
206
207struct vmw_relocation {
Thomas Hellstromddcda242012-11-21 11:26:55 +0100208 SVGAMobId *mob_loc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000209 SVGAGuestPtr *location;
210 uint32_t index;
211};
212
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000213/**
214 * struct vmw_res_cache_entry - resource information cache entry
215 *
216 * @valid: Whether the entry is valid, which also implies that the execbuf
217 * code holds a reference to the resource, and it's placed on the
218 * validation list.
219 * @handle: User-space handle of a resource.
220 * @res: Non-ref-counted pointer to the resource.
221 *
222 * Used to avoid frequent repeated user-space handle lookups of the
223 * same resource.
224 */
225struct vmw_res_cache_entry {
226 bool valid;
227 uint32_t handle;
228 struct vmw_resource *res;
229 struct vmw_resource_val_node *node;
230};
231
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700232/**
233 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
234 */
235enum vmw_dma_map_mode {
236 vmw_dma_phys, /* Use physical page addresses */
237 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
238 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
239 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
240 vmw_dma_map_max
241};
242
243/**
244 * struct vmw_sg_table - Scatter/gather table for binding, with additional
245 * device-specific information.
246 *
247 * @sgt: Pointer to a struct sg_table with binding information
Masahiro Yamadae1c05062015-07-07 10:14:59 +0900248 * @num_regions: Number of regions with device-address contiguous pages
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700249 */
250struct vmw_sg_table {
251 enum vmw_dma_map_mode mode;
252 struct page **pages;
253 const dma_addr_t *addrs;
254 struct sg_table *sgt;
255 unsigned long num_regions;
256 unsigned long num_pages;
257};
258
259/**
260 * struct vmw_piter - Page iterator that iterates over a list of pages
261 * and DMA addresses that could be either a scatter-gather list or
262 * arrays
263 *
264 * @pages: Array of page pointers to the pages.
265 * @addrs: DMA addresses to the pages if coherent pages are used.
266 * @iter: Scatter-gather page iterator. Current position in SG list.
267 * @i: Current position in arrays.
268 * @num_pages: Number of pages total.
269 * @next: Function to advance the iterator. Returns false if past the list
270 * of pages, true otherwise.
271 * @dma_address: Function to return the DMA address of the current page.
272 */
273struct vmw_piter {
274 struct page **pages;
275 const dma_addr_t *addrs;
276 struct sg_page_iter iter;
277 unsigned long i;
278 unsigned long num_pages;
279 bool (*next)(struct vmw_piter *);
280 dma_addr_t (*dma_address)(struct vmw_piter *);
281 struct page *(*page)(struct vmw_piter *);
282};
283
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700284/*
Sinclair Yehc8261a92015-06-26 01:23:42 -0700285 * enum vmw_display_unit_type - Describes the display unit
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700286 */
Sinclair Yehc8261a92015-06-26 01:23:42 -0700287enum vmw_display_unit_type {
288 vmw_du_invalid = 0,
289 vmw_du_legacy,
Sinclair Yeh35c05122015-06-26 01:42:06 -0700290 vmw_du_screen_object,
291 vmw_du_screen_target
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700292};
293
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700294
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000295struct vmw_sw_context{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000296 struct drm_open_hash res_ht;
297 bool res_ht_initialized;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200298 bool kernel; /**< is the called made from the kernel */
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100299 struct vmw_fpriv *fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000300 struct list_head validate_nodes;
301 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
302 uint32_t cur_reloc;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000303 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000304 uint32_t cur_val_buf;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000305 uint32_t *cmd_bounce;
306 uint32_t cmd_bounce_size;
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200307 struct list_head resource_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700308 struct list_head ctx_resource_list; /* For contexts and cotables */
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200309 struct vmw_buffer_object *cur_query_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000310 struct list_head res_relocations;
311 uint32_t *buf_start;
312 struct vmw_res_cache_entry res_cache[vmw_res_max];
313 struct vmw_resource *last_query_ctx;
314 bool needs_post_query_barrier;
315 struct vmw_resource *error_resource;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700316 struct vmw_ctx_binding_state *staged_bindings;
317 bool staged_bindings_inuse;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200318 struct list_head staged_cmd_res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700319 struct vmw_resource_val_node *dx_ctx_node;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200320 struct vmw_buffer_object *dx_query_mob;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700321 struct vmw_resource *dx_query_ctx;
322 struct vmw_cmdbuf_res_manager *man;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000323};
324
325struct vmw_legacy_display;
326struct vmw_overlay;
327
328struct vmw_master {
329 struct ttm_lock lock;
330};
331
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200332struct vmw_vga_topology_state {
333 uint32_t width;
334 uint32_t height;
335 uint32_t primary;
336 uint32_t pos_x;
337 uint32_t pos_y;
338};
339
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700340
341/*
342 * struct vmw_otable - Guest Memory OBject table metadata
343 *
344 * @size: Size of the table (page-aligned).
345 * @page_table: Pointer to a struct vmw_mob holding the page table.
346 */
347struct vmw_otable {
348 unsigned long size;
349 struct vmw_mob *page_table;
350 bool enabled;
351};
352
353struct vmw_otable_batch {
354 unsigned num_otables;
355 struct vmw_otable *otables;
356 struct vmw_resource *context;
357 struct ttm_buffer_object *otable_bo;
358};
359
Thomas Hellstromef369902017-08-24 08:06:28 +0200360enum {
361 VMW_IRQTHREAD_FENCE,
362 VMW_IRQTHREAD_CMDBUF,
363 VMW_IRQTHREAD_MAX
364};
365
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000366struct vmw_private {
367 struct ttm_bo_device bdev;
368 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000369 struct drm_global_reference mem_global_ref;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000370
371 struct vmw_fifo_state fifo;
372
373 struct drm_device *dev;
374 unsigned long vmw_chipset;
375 unsigned int io_start;
376 uint32_t vram_start;
377 uint32_t vram_size;
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100378 uint32_t prim_bb_mem;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000379 uint32_t mmio_start;
380 uint32_t mmio_size;
381 uint32_t fb_max_width;
382 uint32_t fb_max_height;
Sinclair Yeh35c05122015-06-26 01:42:06 -0700383 uint32_t texture_max_width;
384 uint32_t texture_max_height;
385 uint32_t stdu_max_width;
386 uint32_t stdu_max_height;
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100387 uint32_t initial_width;
388 uint32_t initial_height;
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100389 u32 *mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000390 uint32_t capabilities;
Neha Bhende3b4c2512018-06-18 16:44:48 -0700391 uint32_t capabilities2;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000392 uint32_t max_gmr_ids;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000393 uint32_t max_gmr_pages;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100394 uint32_t max_mob_pages;
Charmaine Lee857aea12014-02-12 12:07:38 +0100395 uint32_t max_mob_size;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000396 uint32_t memory_size;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200397 bool has_gmr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100398 bool has_mob;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800399 spinlock_t hw_lock;
400 spinlock_t cap_lock;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700401 bool has_dx;
Sinclair Yeh04319d82016-06-29 12:15:48 -0700402 bool assume_16bpp;
Deepak Rawat30aeee62018-06-20 13:52:32 -0700403 bool has_sm4_1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000404
405 /*
406 * VGA registers.
407 */
408
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200409 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000410 uint32_t vga_width;
411 uint32_t vga_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000412 uint32_t vga_bpp;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200413 uint32_t vga_bpl;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200414 uint32_t vga_pitchlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000415
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200416 uint32_t num_displays;
417
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000418 /*
Deepak Rawatb89e5ff2018-06-20 11:32:29 +0200419 * Currently requested_layout_mutex is used to protect the gui
420 * positionig state in display unit. With that use case currently this
421 * mutex is only taken during layout ioctl and atomic check_modeset.
422 * Other display unit state can be protected with this mutex but that
423 * needs careful consideration.
424 */
425 struct mutex requested_layout_mutex;
426
427 /*
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000428 * Framebuffer info.
429 */
430
431 void *fb_info;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700432 enum vmw_display_unit_type active_display_unit;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000433 struct vmw_legacy_display *ldu_priv;
434 struct vmw_overlay *overlay_priv;
Thomas Hellstrom578e6092016-02-12 09:45:42 +0100435 struct drm_property *hotplug_mode_update_property;
Thomas Hellstrom76404ac2016-02-12 09:55:45 +0100436 struct drm_property *implicit_placement_property;
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100437 unsigned num_implicit;
438 struct vmw_framebuffer *implicit_fb;
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200439 struct mutex global_kms_state_mutex;
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700440 spinlock_t cursor_lock;
Thomas Hellstromc3b9b162018-03-22 10:26:37 +0100441 struct drm_atomic_state *suspend_state;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000442
443 /*
444 * Context and surface management.
445 */
446
447 rwlock_t resource_lock;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000448 struct idr res_idr[vmw_res_max];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000449 /*
450 * Block lastclose from racing with firstopen.
451 */
452
453 struct mutex init_mutex;
454
455 /*
456 * A resource manager for kernel-only surfaces and
457 * contexts.
458 */
459
460 struct ttm_object_device *tdev;
461
462 /*
463 * Fencing and IRQs.
464 */
465
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000466 atomic_t marker_seq;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000467 wait_queue_head_t fence_queue;
468 wait_queue_head_t fifo_queue;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800469 spinlock_t waiter_lock;
470 int fence_queue_waiters; /* Protected by waiter_lock */
471 int goal_queue_waiters; /* Protected by waiter_lock */
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100472 int cmdbuf_waiters; /* Protected by waiter_lock */
473 int error_waiters; /* Protected by waiter_lock */
474 int fifo_queue_waiters; /* Protected by waiter_lock */
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000475 uint32_t last_read_seqno;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000476 struct vmw_fence_manager *fman;
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100477 uint32_t irq_mask; /* Updates protected by waiter_lock */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000478
479 /*
480 * Device state
481 */
482
483 uint32_t traces_state;
484 uint32_t enable_state;
485 uint32_t config_done_state;
486
487 /**
488 * Execbuf
489 */
490 /**
491 * Protected by the cmdbuf mutex.
492 */
493
494 struct vmw_sw_context ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000495 struct mutex cmdbuf_mutex;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700496 struct mutex binding_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000497
498 /**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000499 * Operating mode.
500 */
501
502 bool stealth;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200503 bool enable_fb;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700504 spinlock_t svga_lock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000505
506 /**
507 * Master management.
508 */
509
510 struct vmw_master *active_master;
511 struct vmw_master fbdev_master;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100512 struct notifier_block pm_nb;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700513 bool refuse_hibernation;
Thomas Hellstromc3b9b162018-03-22 10:26:37 +0100514 bool suspend_locked;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200515
516 struct mutex release_mutex;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700517 atomic_t num_fifo_resources;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200518
519 /*
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100520 * Replace this with an rwsem as soon as we have down_xx_interruptible()
521 */
522 struct ttm_lock reservation_sem;
523
524 /*
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200525 * Query processing. These members
526 * are protected by the cmdbuf mutex.
527 */
528
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200529 struct vmw_buffer_object *dummy_query_bo;
530 struct vmw_buffer_object *pinned_bo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200531 uint32_t query_cid;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000532 uint32_t query_cid_valid;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200533 bool dummy_query_bo_pinned;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200534
535 /*
536 * Surface swapping. The "surface_lru" list is protected by the
537 * resource lock in order to be able to destroy a surface and take
538 * it off the lru atomically. "used_memory_size" is currently
539 * protected by the cmdbuf mutex for simplicity.
540 */
541
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000542 struct list_head res_lru[vmw_res_max];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200543 uint32_t used_memory_size;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700544
545 /*
546 * DMA mapping stuff.
547 */
548 enum vmw_dma_map_mode map_mode;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100549
550 /*
551 * Guest Backed stuff
552 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700553 struct vmw_otable_batch otable_batch;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700554
555 struct vmw_cmdbuf_man *cman;
Thomas Hellstromef369902017-08-24 08:06:28 +0200556 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000557};
558
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000559static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
560{
561 return container_of(res, struct vmw_surface, res);
562}
563
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000564static inline struct vmw_private *vmw_priv(struct drm_device *dev)
565{
566 return (struct vmw_private *)dev->dev_private;
567}
568
569static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
570{
571 return (struct vmw_fpriv *)file_priv->driver_priv;
572}
573
574static inline struct vmw_master *vmw_master(struct drm_master *master)
575{
576 return (struct vmw_master *) master->driver_priv;
577}
578
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800579/*
580 * The locking here is fine-grained, so that it is performed once
581 * for every read- and write operation. This is of course costly, but we
582 * don't perform much register access in the timing critical paths anyway.
583 * Instead we have the extra benefit of being sure that we don't forget
584 * the hw lock around register accesses.
585 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000586static inline void vmw_write(struct vmw_private *dev_priv,
587 unsigned int offset, uint32_t value)
588{
Thomas Hellstromef369902017-08-24 08:06:28 +0200589 spin_lock(&dev_priv->hw_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000590 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
591 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstromef369902017-08-24 08:06:28 +0200592 spin_unlock(&dev_priv->hw_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000593}
594
595static inline uint32_t vmw_read(struct vmw_private *dev_priv,
596 unsigned int offset)
597{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800598 u32 val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000599
Thomas Hellstromef369902017-08-24 08:06:28 +0200600 spin_lock(&dev_priv->hw_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000601 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
602 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstromef369902017-08-24 08:06:28 +0200603 spin_unlock(&dev_priv->hw_lock);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800604
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000605 return val;
606}
607
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700608extern void vmw_svga_enable(struct vmw_private *dev_priv);
609extern void vmw_svga_disable(struct vmw_private *dev_priv);
610
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200611
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000612/**
613 * GMR utilities - vmwgfx_gmr.c
614 */
615
616extern int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700617 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200618 unsigned long num_pages,
619 int gmr_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000620extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
621
622/**
623 * Resource utilities - vmwgfx_resource.c
624 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000625struct vmw_user_resource_conv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000626
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000627extern void vmw_resource_unreference(struct vmw_resource **p_res);
628extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100629extern struct vmw_resource *
630vmw_resource_reference_unless_doomed(struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000631extern int vmw_resource_validate(struct vmw_resource *res);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700632extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
633 bool no_backup);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000634extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
Jakob Bornecrantz551a6692011-11-28 13:19:11 +0100635extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
636 struct ttm_object_file *tfile,
637 uint32_t handle,
638 struct vmw_surface **out_surf,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200639 struct vmw_buffer_object **out_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000640extern int vmw_user_resource_lookup_handle(
641 struct vmw_private *dev_priv,
642 struct ttm_object_file *tfile,
643 uint32_t handle,
644 const struct vmw_user_resource_conv *converter,
645 struct vmw_resource **p_res);
Thomas Hellstrome9431ea2018-06-19 15:33:53 +0200646extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
647 struct drm_file *file_priv);
648extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
649 struct drm_file *file_priv);
650extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
651 struct ttm_object_file *tfile,
652 uint32_t *inout_id,
653 struct vmw_resource **out);
654extern void vmw_resource_unreserve(struct vmw_resource *res,
655 bool switch_backup,
656 struct vmw_buffer_object *new_backup,
657 unsigned long new_backup_offset);
658extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
659 struct ttm_mem_reg *mem);
660extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
661extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
662extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
663
664/**
665 * Buffer object helper functions - vmwgfx_bo.c
666 */
667extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
668 struct vmw_buffer_object *bo,
669 struct ttm_placement *placement,
670 bool interruptible);
671extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
672 struct vmw_buffer_object *buf,
673 bool interruptible);
674extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
675 struct vmw_buffer_object *buf,
676 bool interruptible);
677extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
678 struct vmw_buffer_object *bo,
679 bool interruptible);
680extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
681 struct vmw_buffer_object *bo,
682 bool interruptible);
683extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
684 SVGAGuestPtr *ptr);
685extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200686extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
687extern int vmw_bo_init(struct vmw_private *dev_priv,
688 struct vmw_buffer_object *vmw_bo,
689 size_t size, struct ttm_placement *placement,
690 bool interuptable,
691 void (*bo_free)(struct ttm_buffer_object *bo));
692extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
693 struct ttm_object_file *tfile);
694extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
695 struct ttm_object_file *tfile,
696 uint32_t size,
697 bool shareable,
698 uint32_t *handle,
699 struct vmw_buffer_object **p_dma_buf,
700 struct ttm_base_object **p_base);
701extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
702 struct vmw_buffer_object *dma_buf,
703 uint32_t *handle);
704extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv);
706extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
707 struct drm_file *file_priv);
708extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
709 struct drm_file *file_priv);
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200710extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
711 uint32_t id, struct vmw_buffer_object **out,
712 struct ttm_base_object **base);
Thomas Hellstrome9431ea2018-06-19 15:33:53 +0200713extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000714 struct vmw_fence_obj *fence);
Thomas Hellstrome9431ea2018-06-19 15:33:53 +0200715extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
716extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
717extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
718 struct ttm_mem_reg *mem);
719extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000720
721/**
722 * Misc Ioctl functionality - vmwgfx_ioctl.c
723 */
724
725extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
726 struct drm_file *file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000727extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
728 struct drm_file *file_priv);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200729extern int vmw_present_ioctl(struct drm_device *dev, void *data,
730 struct drm_file *file_priv);
731extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
732 struct drm_file *file_priv);
Al Viroafc9a422017-07-03 06:39:46 -0400733extern __poll_t vmw_fops_poll(struct file *filp,
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200734 struct poll_table_struct *wait);
735extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
736 size_t count, loff_t *offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000737
738/**
739 * Fifo utilities - vmwgfx_fifo.c
740 */
741
742extern int vmw_fifo_init(struct vmw_private *dev_priv,
743 struct vmw_fifo_state *fifo);
744extern void vmw_fifo_release(struct vmw_private *dev_priv,
745 struct vmw_fifo_state *fifo);
746extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700747extern void *
748vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000749extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700750extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000751extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000752 uint32_t *seqno);
Maarten Lankhorst2298e802014-03-26 14:07:44 +0100753extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000754extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +0000755extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200756extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200757extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
758 uint32_t cid);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700759extern int vmw_fifo_flush(struct vmw_private *dev_priv,
760 bool interruptible);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000761
762/**
763 * TTM glue - vmwgfx_ttm_glue.c
764 */
765
766extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
767extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
768extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
769
770/**
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200771 * TTM buffer object driver - vmwgfx_ttm_buffer.c
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000772 */
773
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800774extern const size_t vmw_tt_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000775extern struct ttm_placement vmw_vram_placement;
776extern struct ttm_placement vmw_vram_ne_placement;
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100777extern struct ttm_placement vmw_vram_sys_placement;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200778extern struct ttm_placement vmw_vram_gmr_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200779extern struct ttm_placement vmw_vram_gmr_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000780extern struct ttm_placement vmw_sys_placement;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100781extern struct ttm_placement vmw_sys_ne_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200782extern struct ttm_placement vmw_evictable_placement;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200783extern struct ttm_placement vmw_srf_placement;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100784extern struct ttm_placement vmw_mob_placement;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700785extern struct ttm_placement vmw_mob_ne_placement;
Thomas Hellstromef86cfe2018-01-16 11:07:30 +0100786extern struct ttm_placement vmw_nonfixed_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000787extern struct ttm_bo_driver vmw_bo_driver;
788extern int vmw_dma_quiescent(struct drm_device *dev);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700789extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
790extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
791extern const struct vmw_sg_table *
792vmw_bo_sg_table(struct ttm_buffer_object *bo);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700793extern void vmw_piter_start(struct vmw_piter *viter,
794 const struct vmw_sg_table *vsgt,
795 unsigned long p_offs);
796
797/**
798 * vmw_piter_next - Advance the iterator one page.
799 *
800 * @viter: Pointer to the iterator to advance.
801 *
802 * Returns false if past the list of pages, true otherwise.
803 */
804static inline bool vmw_piter_next(struct vmw_piter *viter)
805{
806 return viter->next(viter);
807}
808
809/**
810 * vmw_piter_dma_addr - Return the DMA address of the current page.
811 *
812 * @viter: Pointer to the iterator
813 *
814 * Returns the DMA address of the page pointed to by @viter.
815 */
816static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
817{
818 return viter->dma_address(viter);
819}
820
821/**
822 * vmw_piter_page - Return a pointer to the current page.
823 *
824 * @viter: Pointer to the iterator
825 *
826 * Returns the DMA address of the page pointed to by @viter.
827 */
828static inline struct page *vmw_piter_page(struct vmw_piter *viter)
829{
830 return viter->page(viter);
831}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000832
833/**
834 * Command submission - vmwgfx_execbuf.c
835 */
836
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700837extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
838 struct drm_file *file_priv, size_t size);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200839extern int vmw_execbuf_process(struct drm_file *file_priv,
840 struct vmw_private *dev_priv,
841 void __user *user_commands,
842 void *kernel_commands,
843 uint32_t command_size,
844 uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700845 uint32_t dx_context_handle,
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200846 struct drm_vmw_fence_rep __user
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +0100847 *user_fence_rep,
Sinclair Yehc906965d2017-07-05 01:49:32 -0700848 struct vmw_fence_obj **out_fence,
849 uint32_t flags);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000850extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
851 struct vmw_fence_obj *fence);
852extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200853
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200854extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
855 struct vmw_private *dev_priv,
856 struct vmw_fence_obj **p_fence,
857 uint32_t *p_handle);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200858extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
859 struct vmw_fpriv *vmw_fp,
860 int ret,
861 struct drm_vmw_fence_rep __user
862 *user_fence_rep,
863 struct vmw_fence_obj *fence,
Sinclair Yehc906965d2017-07-05 01:49:32 -0700864 uint32_t fence_handle,
865 int32_t out_fence_fd,
866 struct sync_file *sync_file);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700867extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
868 struct ttm_buffer_object *bo,
869 bool interruptible,
870 bool validate_as_mob);
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200871bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200872
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000873/**
874 * IRQs and wating - vmwgfx_irq.c
875 */
876
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000877extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700878 uint32_t seqno, bool interruptible,
879 unsigned long timeout);
Thomas Hellstrome3001732017-08-24 08:06:27 +0200880extern int vmw_irq_install(struct drm_device *dev, int irq);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000881extern void vmw_irq_uninstall(struct drm_device *dev);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000882extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
883 uint32_t seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000884extern int vmw_fallback_wait(struct vmw_private *dev_priv,
885 bool lazy,
886 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000887 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000888 bool interruptible,
889 unsigned long timeout);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000890extern void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200891 struct vmw_fifo_state *fifo_state);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000892extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
893extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200894extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
895extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700896extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
897 int *waiter_count);
898extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
899 u32 flag, int *waiter_count);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200900
901/**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000902 * Rudimentary fence-like objects currently used only for throttling -
903 * vmwgfx_marker.c
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200904 */
905
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000906extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
907extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
908extern int vmw_marker_push(struct vmw_marker_queue *queue,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700909 uint32_t seqno);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000910extern int vmw_marker_pull(struct vmw_marker_queue *queue,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700911 uint32_t signaled_seqno);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200912extern int vmw_wait_lag(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000913 struct vmw_marker_queue *queue, uint32_t us);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000914
915/**
916 * Kernel framebuffer - vmwgfx_fb.c
917 */
918
919int vmw_fb_init(struct vmw_private *vmw_priv);
920int vmw_fb_close(struct vmw_private *dev_priv);
921int vmw_fb_off(struct vmw_private *vmw_priv);
922int vmw_fb_on(struct vmw_private *vmw_priv);
923
924/**
925 * Kernel modesetting - vmwgfx_kms.c
926 */
927
928int vmw_kms_init(struct vmw_private *dev_priv);
929int vmw_kms_close(struct vmw_private *dev_priv);
930int vmw_kms_save_vga(struct vmw_private *vmw_priv);
931int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
932int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
933 struct drm_file *file_priv);
934void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
935void vmw_kms_cursor_snoop(struct vmw_surface *srf,
936 struct ttm_object_file *tfile,
937 struct ttm_buffer_object *bo,
938 SVGA3dCmdHeader *header);
Michel Dänzer0bef23f2011-08-31 07:42:50 +0000939int vmw_kms_write_svga(struct vmw_private *vmw_priv,
940 unsigned width, unsigned height, unsigned pitch,
941 unsigned bpp, unsigned depth);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200942void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
Thomas Hellstrome133e7372010-10-05 12:43:04 +0200943bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
944 uint32_t pitch,
945 uint32_t height);
Thierry Reding88e72712015-09-24 18:35:31 +0200946u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
947int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
948void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200949int vmw_kms_present(struct vmw_private *dev_priv,
950 struct drm_file *file_priv,
951 struct vmw_framebuffer *vfb,
952 struct vmw_surface *surface,
953 uint32_t sid, int32_t destX, int32_t destY,
954 struct drm_vmw_rect *clips,
955 uint32_t num_clips);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200956int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
957 struct drm_file *file_priv);
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100958void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
Thomas Hellstromc3b9b162018-03-22 10:26:37 +0100959int vmw_kms_suspend(struct drm_device *dev);
960int vmw_kms_resume(struct drm_device *dev);
Thomas Hellstrom140bcaa2018-03-08 10:07:37 +0100961void vmw_kms_lost_device(struct drm_device *dev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000962
Dave Airlie5e1782d2012-08-28 01:53:54 +0000963int vmw_dumb_create(struct drm_file *file_priv,
964 struct drm_device *dev,
965 struct drm_mode_create_dumb *args);
966
967int vmw_dumb_map_offset(struct drm_file *file_priv,
968 struct drm_device *dev, uint32_t handle,
969 uint64_t *offset);
970int vmw_dumb_destroy(struct drm_file *file_priv,
971 struct drm_device *dev,
972 uint32_t handle);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700973extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
Thomas Hellstromed933942015-03-02 23:26:06 -0800974extern void vmw_resource_unpin(struct vmw_resource *res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700975extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
Thomas Hellstromed933942015-03-02 23:26:06 -0800976
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000977/**
978 * Overlay control - vmwgfx_overlay.c
979 */
980
981int vmw_overlay_init(struct vmw_private *dev_priv);
982int vmw_overlay_close(struct vmw_private *dev_priv);
983int vmw_overlay_ioctl(struct drm_device *dev, void *data,
984 struct drm_file *file_priv);
985int vmw_overlay_stop_all(struct vmw_private *dev_priv);
986int vmw_overlay_resume_all(struct vmw_private *dev_priv);
987int vmw_overlay_pause_all(struct vmw_private *dev_priv);
988int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
989int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
990int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
991int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
992
993/**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200994 * GMR Id manager
995 */
996
997extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
998
999/**
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001000 * Prime - vmwgfx_prime.c
1001 */
1002
1003extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
1004extern int vmw_prime_fd_to_handle(struct drm_device *dev,
1005 struct drm_file *file_priv,
1006 int fd, u32 *handle);
1007extern int vmw_prime_handle_to_fd(struct drm_device *dev,
1008 struct drm_file *file_priv,
1009 uint32_t handle, uint32_t flags,
1010 int *prime_fd);
1011
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001012/*
1013 * MemoryOBject management - vmwgfx_mob.c
1014 */
1015struct vmw_mob;
1016extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -07001017 const struct vmw_sg_table *vsgt,
1018 unsigned long num_data_pages, int32_t mob_id);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001019extern void vmw_mob_unbind(struct vmw_private *dev_priv,
1020 struct vmw_mob *mob);
1021extern void vmw_mob_destroy(struct vmw_mob *mob);
1022extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
1023extern int vmw_otables_setup(struct vmw_private *dev_priv);
1024extern void vmw_otables_takedown(struct vmw_private *dev_priv);
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001025
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001026/*
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001027 * Context management - vmwgfx_context.c
1028 */
1029
1030extern const struct vmw_user_resource_conv *user_context_converter;
1031
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001032extern int vmw_context_check(struct vmw_private *dev_priv,
1033 struct ttm_object_file *tfile,
1034 int id,
1035 struct vmw_resource **p_res);
1036extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1037 struct drm_file *file_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001038extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1039 struct drm_file *file_priv);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001040extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1041 struct drm_file *file_priv);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01001042extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001043extern struct vmw_cmdbuf_res_manager *
1044vmw_context_res_man(struct vmw_resource *ctx);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001045extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1046 SVGACOTableType cotable_type);
1047extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1048struct vmw_ctx_binding_state;
1049extern struct vmw_ctx_binding_state *
1050vmw_context_binding_state(struct vmw_resource *ctx);
1051extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1052 bool readback);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001053extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001054 struct vmw_buffer_object *mob);
1055extern struct vmw_buffer_object *
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001056vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1057
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001058
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001059/*
1060 * Surface management - vmwgfx_surface.c
1061 */
1062
1063extern const struct vmw_user_resource_conv *user_surface_converter;
1064
1065extern void vmw_surface_res_free(struct vmw_resource *res);
1066extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1067 struct drm_file *file_priv);
1068extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1069 struct drm_file *file_priv);
1070extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1071 struct drm_file *file_priv);
1072extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1073 struct drm_file *file_priv);
1074extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1075 struct drm_file *file_priv);
1076extern int vmw_surface_check(struct vmw_private *dev_priv,
1077 struct ttm_object_file *tfile,
1078 uint32_t handle, int *id);
1079extern int vmw_surface_validate(struct vmw_private *dev_priv,
1080 struct vmw_surface *srf);
Sinclair Yeh233826a2015-03-05 01:06:13 -08001081int vmw_surface_gb_priv_define(struct drm_device *dev,
1082 uint32_t user_accounting_size,
Deepak Rawat397a1112018-06-20 14:25:07 -07001083 SVGA3dSurfaceAllFlags svga3d_flags,
Sinclair Yeh233826a2015-03-05 01:06:13 -08001084 SVGA3dSurfaceFormat format,
1085 bool for_scanout,
1086 uint32_t num_mip_levels,
1087 uint32_t multisample_count,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001088 uint32_t array_size,
Sinclair Yeh233826a2015-03-05 01:06:13 -08001089 struct drm_vmw_size size,
Deepak Rawat14b1c332018-06-20 14:48:35 -07001090 SVGA3dMSPattern multisample_pattern,
1091 SVGA3dMSQualityLevel quality_level,
Sinclair Yeh233826a2015-03-05 01:06:13 -08001092 struct vmw_surface **srf_out);
Deepak Rawat14b1c332018-06-20 14:48:35 -07001093extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
1094 void *data,
1095 struct drm_file *file_priv);
1096extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
1097 void *data,
1098 struct drm_file *file_priv);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001099
1100/*
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001101 * Shader management - vmwgfx_shader.c
1102 */
1103
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001104extern const struct vmw_user_resource_conv *user_shader_converter;
1105
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001106extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1107 struct drm_file *file_priv);
1108extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1109 struct drm_file *file_priv);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001110extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1111 struct vmw_cmdbuf_res_manager *man,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001112 u32 user_key, const void *bytecode,
1113 SVGA3dShaderType shader_type,
1114 size_t size,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001115 struct list_head *list);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001116extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1117 u32 user_key, SVGA3dShaderType shader_type,
1118 struct list_head *list);
1119extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1120 struct vmw_resource *ctx,
1121 u32 user_key,
1122 SVGA3dShaderType shader_type,
1123 struct list_head *list);
1124extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1125 struct list_head *list,
1126 bool readback);
1127
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001128extern struct vmw_resource *
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001129vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1130 u32 user_key, SVGA3dShaderType shader_type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001131
1132/*
1133 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1134 */
1135
1136extern struct vmw_cmdbuf_res_manager *
1137vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1138extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1139extern size_t vmw_cmdbuf_res_man_size(void);
1140extern struct vmw_resource *
1141vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1142 enum vmw_cmdbuf_res_type res_type,
1143 u32 user_key);
1144extern void vmw_cmdbuf_res_revert(struct list_head *list);
1145extern void vmw_cmdbuf_res_commit(struct list_head *list);
1146extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1147 enum vmw_cmdbuf_res_type res_type,
1148 u32 user_key,
1149 struct vmw_resource *res,
1150 struct list_head *list);
1151extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1152 enum vmw_cmdbuf_res_type res_type,
1153 u32 user_key,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001154 struct list_head *list,
1155 struct vmw_resource **res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001156
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001157/*
1158 * COTable management - vmwgfx_cotable.c
1159 */
1160extern const SVGACOTableType vmw_cotable_scrub_order[];
1161extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1162 struct vmw_resource *ctx,
1163 u32 type);
1164extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1165extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1166extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1167 struct list_head *head);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001168
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001169/*
1170 * Command buffer managerment vmwgfx_cmdbuf.c
1171 */
1172struct vmw_cmdbuf_man;
1173struct vmw_cmdbuf_header;
1174
1175extern struct vmw_cmdbuf_man *
1176vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1177extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1178 size_t size, size_t default_size);
1179extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1180extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1181extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1182 unsigned long timeout);
1183extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1184 int ctx_id, bool interruptible,
1185 struct vmw_cmdbuf_header *header);
1186extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1187 struct vmw_cmdbuf_header *header,
1188 bool flush);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001189extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1190 size_t size, bool interruptible,
1191 struct vmw_cmdbuf_header **p_header);
1192extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1193extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1194 bool interruptible);
Thomas Hellstromef369902017-08-24 08:06:28 +02001195extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001196
Thomas Hellstrom79273e12018-01-16 09:33:27 +01001197/* CPU blit utilities - vmwgfx_blit.c */
1198
1199/**
1200 * struct vmw_diff_cpy - CPU blit information structure
1201 *
1202 * @rect: The output bounding box rectangle.
1203 * @line: The current line of the blit.
1204 * @line_offset: Offset of the current line segment.
1205 * @cpp: Bytes per pixel (granularity information).
1206 * @memcpy: Which memcpy function to use.
1207 */
1208struct vmw_diff_cpy {
1209 struct drm_rect rect;
1210 size_t line;
1211 size_t line_offset;
1212 int cpp;
1213 void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1214 size_t n);
1215};
1216
1217#define VMW_CPU_BLIT_INITIALIZER { \
1218 .do_cpy = vmw_memcpy, \
1219}
1220
1221#define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \
1222 .line = 0, \
1223 .line_offset = 0, \
1224 .rect = { .x1 = INT_MAX/2, \
1225 .y1 = INT_MAX/2, \
1226 .x2 = INT_MIN/2, \
1227 .y2 = INT_MIN/2 \
1228 }, \
1229 .cpp = _cpp, \
1230 .do_cpy = vmw_diff_memcpy, \
1231}
1232
1233void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1234 size_t n);
1235
1236void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
1237
1238int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
1239 u32 dst_offset, u32 dst_stride,
1240 struct ttm_buffer_object *src,
1241 u32 src_offset, u32 src_stride,
1242 u32 w, u32 h,
1243 struct vmw_diff_cpy *diff);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001244
Thomas Hellstrom6ff67ae2018-06-21 09:39:21 +02001245/* Host messaging -vmwgfx_msg.c: */
1246int vmw_host_get_guestinfo(const char *guest_info_param,
1247 char *buffer, size_t *length);
1248int vmw_host_log(const char *log);
1249
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001250/**
1251 * Inline helper functions
1252 */
1253
1254static inline void vmw_surface_unreference(struct vmw_surface **srf)
1255{
1256 struct vmw_surface *tmp_srf = *srf;
1257 struct vmw_resource *res = &tmp_srf->res;
1258 *srf = NULL;
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001259
1260 vmw_resource_unreference(&res);
1261}
1262
1263static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1264{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001265 (void) vmw_resource_reference(&srf->res);
1266 return srf;
1267}
1268
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001269static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001270{
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001271 struct vmw_buffer_object *tmp_buf = *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001272
1273 *buf = NULL;
1274 if (tmp_buf != NULL) {
1275 struct ttm_buffer_object *bo = &tmp_buf->base;
1276
1277 ttm_bo_unref(&bo);
1278 }
1279}
1280
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +02001281static inline struct vmw_buffer_object *
1282vmw_bo_reference(struct vmw_buffer_object *buf)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001283{
1284 if (ttm_bo_reference(&buf->base))
1285 return buf;
1286 return NULL;
1287}
1288
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001289static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1290{
1291 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1292}
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001293
1294static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1295{
1296 atomic_inc(&dev_priv->num_fifo_resources);
1297}
1298
1299static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1300{
1301 atomic_dec(&dev_priv->num_fifo_resources);
1302}
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001303
1304/**
1305 * vmw_mmio_read - Perform a MMIO read from volatile memory
1306 *
1307 * @addr: The address to read from
1308 *
1309 * This function is intended to be equivalent to ioread32() on
1310 * memremap'd memory, but without byteswapping.
1311 */
1312static inline u32 vmw_mmio_read(u32 *addr)
1313{
1314 return READ_ONCE(*addr);
1315}
1316
1317/**
1318 * vmw_mmio_write - Perform a MMIO write to volatile memory
1319 *
1320 * @addr: The address to write to
1321 *
1322 * This function is intended to be equivalent to iowrite32 on
1323 * memremap'd memory, but without byteswapping.
1324 */
1325static inline void vmw_mmio_write(u32 value, u32 *addr)
1326{
1327 WRITE_ONCE(*addr, value);
1328}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001329#endif