blob: d26a6daa9719a23542cb8c575691f1d63851dba4 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
35#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h>
38#include <drm/ttm/ttm_lock.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
Thomas Hellstromae2a1042011-09-01 20:18:44 +000041#include "vmwgfx_fence.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042
Thomas Hellstrom18e4a462014-06-09 12:39:22 +020043#define VMWGFX_DRIVER_DATE "20140704"
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +000044#define VMWGFX_DRIVER_MAJOR 2
Thomas Hellstrom03c5b8f2014-03-20 13:07:44 +010045#define VMWGFX_DRIVER_MINOR 6
Thomas Hellstrom18e4a462014-06-09 12:39:22 +020046#define VMWGFX_DRIVER_PATCHLEVEL 1
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000047#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000050#define VMWGFX_MAX_VALIDATIONS 2048
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +020051#define VMWGFX_MAX_DISPLAYS 16
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000052#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010053#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010055/*
56 * Perhaps we should have sysfs entries for these.
57 */
58#define VMWGFX_NUM_GB_CONTEXT 256
59#define VMWGFX_NUM_GB_SHADER 20000
60#define VMWGFX_NUM_GB_SURFACE 32768
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010061#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010062#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
63 VMWGFX_NUM_GB_SHADER +\
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010064 VMWGFX_NUM_GB_SURFACE +\
65 VMWGFX_NUM_GB_SCREEN_TARGET)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010066
Thomas Hellstrom135cba02010-10-26 21:21:47 +020067#define VMW_PL_GMR TTM_PL_PRIV0
68#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
Thomas Hellstrom6da768a2012-11-21 11:06:22 +010069#define VMW_PL_MOB TTM_PL_PRIV1
70#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
Thomas Hellstrom135cba02010-10-26 21:21:47 +020071
Thomas Hellstromae2a1042011-09-01 20:18:44 +000072#define VMW_RES_CONTEXT ttm_driver_type0
73#define VMW_RES_SURFACE ttm_driver_type1
74#define VMW_RES_STREAM ttm_driver_type2
75#define VMW_RES_FENCE ttm_driver_type3
Thomas Hellstromc74c1622012-11-21 12:10:26 +010076#define VMW_RES_SHADER ttm_driver_type4
Thomas Hellstromae2a1042011-09-01 20:18:44 +000077
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000078struct vmw_fpriv {
79 struct drm_master *locked_master;
80 struct ttm_object_file *tfile;
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +010081 struct list_head fence_events;
Thomas Hellstromd5bde952014-01-31 10:12:10 +010082 bool gb_aware;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000083};
84
85struct vmw_dma_buffer {
86 struct ttm_buffer_object base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000087 struct list_head res_list;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000088};
89
Thomas Hellstromc0951b72012-11-20 12:19:35 +000090/**
91 * struct vmw_validate_buffer - Carries validation info about buffers.
92 *
93 * @base: Validation info for TTM.
94 * @hash: Hash entry for quick lookup of the TTM buffer object.
95 *
96 * This structure contains also driver private validation info
97 * on top of the info needed by TTM.
98 */
99struct vmw_validate_buffer {
100 struct ttm_validate_buffer base;
101 struct drm_hash_item hash;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100102 bool validate_as_mob;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000103};
104
105struct vmw_res_func;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000106struct vmw_resource {
107 struct kref kref;
108 struct vmw_private *dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000109 int id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000110 bool avail;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000111 unsigned long backup_size;
112 bool res_dirty; /* Protected by backup buffer reserved */
113 bool backup_dirty; /* Protected by backup buffer reserved */
114 struct vmw_dma_buffer *backup;
115 unsigned long backup_offset;
116 const struct vmw_res_func *func;
117 struct list_head lru_head; /* Protected by the resource lock */
118 struct list_head mob_head; /* Protected by @backup reserved */
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700119 struct list_head binding_head; /* Protected by binding_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000120 void (*res_free) (struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000121 void (*hw_destroy) (struct vmw_resource *res);
122};
123
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200124
125/*
126 * Resources that are managed using ioctls.
127 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000128enum vmw_res_type {
129 vmw_res_context,
130 vmw_res_surface,
131 vmw_res_stream,
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100132 vmw_res_shader,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000133 vmw_res_max
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000134};
135
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200136/*
137 * Resources that are managed using command streams.
138 */
139enum vmw_cmdbuf_res_type {
140 vmw_cmdbuf_res_compat_shader
141};
142
143struct vmw_cmdbuf_res_manager;
144
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000145struct vmw_cursor_snooper {
146 struct drm_crtc *crtc;
147 size_t age;
148 uint32_t *image;
149};
150
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200151struct vmw_framebuffer;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200152struct vmw_surface_offset;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200153
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000154struct vmw_surface {
155 struct vmw_resource res;
156 uint32_t flags;
157 uint32_t format;
158 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000159 struct drm_vmw_size base_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000160 struct drm_vmw_size *sizes;
161 uint32_t num_sizes;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000162 bool scanout;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000163 /* TODO so far just a extra pointer */
164 struct vmw_cursor_snooper snooper;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200165 struct vmw_surface_offset *offsets;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000166 SVGA3dTextureFilter autogen_filter;
167 uint32_t multisample_count;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000168};
169
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000170struct vmw_marker_queue {
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200171 struct list_head head;
Thomas Gleixnerf166e6d2014-07-16 21:05:07 +0000172 u64 lag;
173 u64 lag_time;
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200174 spinlock_t lock;
175};
176
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000177struct vmw_fifo_state {
178 unsigned long reserved_size;
179 __le32 *dynamic_buffer;
180 __le32 *static_buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000181 unsigned long static_buffer_size;
182 bool using_bounce_buffer;
183 uint32_t capabilities;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000184 struct mutex fifo_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000185 struct rw_semaphore rwsem;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000186 struct vmw_marker_queue marker_queue;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000187};
188
189struct vmw_relocation {
Thomas Hellstromddcda242012-11-21 11:26:55 +0100190 SVGAMobId *mob_loc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000191 SVGAGuestPtr *location;
192 uint32_t index;
193};
194
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000195/**
196 * struct vmw_res_cache_entry - resource information cache entry
197 *
198 * @valid: Whether the entry is valid, which also implies that the execbuf
199 * code holds a reference to the resource, and it's placed on the
200 * validation list.
201 * @handle: User-space handle of a resource.
202 * @res: Non-ref-counted pointer to the resource.
203 *
204 * Used to avoid frequent repeated user-space handle lookups of the
205 * same resource.
206 */
207struct vmw_res_cache_entry {
208 bool valid;
209 uint32_t handle;
210 struct vmw_resource *res;
211 struct vmw_resource_val_node *node;
212};
213
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700214/**
215 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
216 */
217enum vmw_dma_map_mode {
218 vmw_dma_phys, /* Use physical page addresses */
219 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
220 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
221 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
222 vmw_dma_map_max
223};
224
225/**
226 * struct vmw_sg_table - Scatter/gather table for binding, with additional
227 * device-specific information.
228 *
229 * @sgt: Pointer to a struct sg_table with binding information
230 * @num_regions: Number of regions with device-address contigous pages
231 */
232struct vmw_sg_table {
233 enum vmw_dma_map_mode mode;
234 struct page **pages;
235 const dma_addr_t *addrs;
236 struct sg_table *sgt;
237 unsigned long num_regions;
238 unsigned long num_pages;
239};
240
241/**
242 * struct vmw_piter - Page iterator that iterates over a list of pages
243 * and DMA addresses that could be either a scatter-gather list or
244 * arrays
245 *
246 * @pages: Array of page pointers to the pages.
247 * @addrs: DMA addresses to the pages if coherent pages are used.
248 * @iter: Scatter-gather page iterator. Current position in SG list.
249 * @i: Current position in arrays.
250 * @num_pages: Number of pages total.
251 * @next: Function to advance the iterator. Returns false if past the list
252 * of pages, true otherwise.
253 * @dma_address: Function to return the DMA address of the current page.
254 */
255struct vmw_piter {
256 struct page **pages;
257 const dma_addr_t *addrs;
258 struct sg_page_iter iter;
259 unsigned long i;
260 unsigned long num_pages;
261 bool (*next)(struct vmw_piter *);
262 dma_addr_t (*dma_address)(struct vmw_piter *);
263 struct page *(*page)(struct vmw_piter *);
264};
265
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700266/*
267 * enum vmw_ctx_binding_type - abstract resource to context binding types
268 */
269enum vmw_ctx_binding_type {
270 vmw_ctx_binding_shader,
271 vmw_ctx_binding_rt,
272 vmw_ctx_binding_tex,
273 vmw_ctx_binding_max
274};
275
276/**
277 * struct vmw_ctx_bindinfo - structure representing a single context binding
278 *
279 * @ctx: Pointer to the context structure. NULL means the binding is not
280 * active.
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700281 * @res: Non ref-counted pointer to the bound resource.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700282 * @bt: The binding type.
283 * @i1: Union of information needed to unbind.
284 */
285struct vmw_ctx_bindinfo {
286 struct vmw_resource *ctx;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700287 struct vmw_resource *res;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700288 enum vmw_ctx_binding_type bt;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100289 bool scrubbed;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700290 union {
291 SVGA3dShaderType shader_type;
292 SVGA3dRenderTargetType rt_type;
293 uint32 texture_stage;
294 } i1;
295};
296
297/**
298 * struct vmw_ctx_binding - structure representing a single context binding
299 * - suitable for tracking in a context
300 *
301 * @ctx_list: List head for context.
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700302 * @res_list: List head for bound resource.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700303 * @bi: Binding info
304 */
305struct vmw_ctx_binding {
306 struct list_head ctx_list;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700307 struct list_head res_list;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700308 struct vmw_ctx_bindinfo bi;
309};
310
311
312/**
313 * struct vmw_ctx_binding_state - context binding state
314 *
315 * @list: linked list of individual bindings.
316 * @render_targets: Render target bindings.
317 * @texture_units: Texture units/samplers bindings.
318 * @shaders: Shader bindings.
319 *
320 * Note that this structure also provides storage space for the individual
321 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
322 * for individual bindings.
323 *
324 */
325struct vmw_ctx_binding_state {
326 struct list_head list;
327 struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
328 struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
329 struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
330};
331
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000332struct vmw_sw_context{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000333 struct drm_open_hash res_ht;
334 bool res_ht_initialized;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200335 bool kernel; /**< is the called made from the kernel */
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100336 struct vmw_fpriv *fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000337 struct list_head validate_nodes;
338 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
339 uint32_t cur_reloc;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000340 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000341 uint32_t cur_val_buf;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000342 uint32_t *cmd_bounce;
343 uint32_t cmd_bounce_size;
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200344 struct list_head resource_list;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200345 struct ttm_buffer_object *cur_query_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000346 struct list_head res_relocations;
347 uint32_t *buf_start;
348 struct vmw_res_cache_entry res_cache[vmw_res_max];
349 struct vmw_resource *last_query_ctx;
350 bool needs_post_query_barrier;
351 struct vmw_resource *error_resource;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700352 struct vmw_ctx_binding_state staged_bindings;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200353 struct list_head staged_cmd_res;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000354};
355
356struct vmw_legacy_display;
357struct vmw_overlay;
358
359struct vmw_master {
360 struct ttm_lock lock;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200361 struct mutex fb_surf_mutex;
362 struct list_head fb_surf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000363};
364
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200365struct vmw_vga_topology_state {
366 uint32_t width;
367 uint32_t height;
368 uint32_t primary;
369 uint32_t pos_x;
370 uint32_t pos_y;
371};
372
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000373struct vmw_private {
374 struct ttm_bo_device bdev;
375 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000376 struct drm_global_reference mem_global_ref;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000377
378 struct vmw_fifo_state fifo;
379
380 struct drm_device *dev;
381 unsigned long vmw_chipset;
382 unsigned int io_start;
383 uint32_t vram_start;
384 uint32_t vram_size;
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100385 uint32_t prim_bb_mem;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000386 uint32_t mmio_start;
387 uint32_t mmio_size;
388 uint32_t fb_max_width;
389 uint32_t fb_max_height;
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100390 uint32_t initial_width;
391 uint32_t initial_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000392 __le32 __iomem *mmio_virt;
393 int mmio_mtrr;
394 uint32_t capabilities;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000395 uint32_t max_gmr_ids;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000396 uint32_t max_gmr_pages;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100397 uint32_t max_mob_pages;
Charmaine Lee857aea12014-02-12 12:07:38 +0100398 uint32_t max_mob_size;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000399 uint32_t memory_size;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200400 bool has_gmr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100401 bool has_mob;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800402 spinlock_t hw_lock;
403 spinlock_t cap_lock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000404
405 /*
406 * VGA registers.
407 */
408
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200409 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000410 uint32_t vga_width;
411 uint32_t vga_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000412 uint32_t vga_bpp;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200413 uint32_t vga_bpl;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200414 uint32_t vga_pitchlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000415
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200416 uint32_t num_displays;
417
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000418 /*
419 * Framebuffer info.
420 */
421
422 void *fb_info;
423 struct vmw_legacy_display *ldu_priv;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200424 struct vmw_screen_object_display *sou_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000425 struct vmw_overlay *overlay_priv;
426
427 /*
428 * Context and surface management.
429 */
430
431 rwlock_t resource_lock;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000432 struct idr res_idr[vmw_res_max];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000433 /*
434 * Block lastclose from racing with firstopen.
435 */
436
437 struct mutex init_mutex;
438
439 /*
440 * A resource manager for kernel-only surfaces and
441 * contexts.
442 */
443
444 struct ttm_object_device *tdev;
445
446 /*
447 * Fencing and IRQs.
448 */
449
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000450 atomic_t marker_seq;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000451 wait_queue_head_t fence_queue;
452 wait_queue_head_t fifo_queue;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800453 spinlock_t waiter_lock;
454 int fence_queue_waiters; /* Protected by waiter_lock */
455 int goal_queue_waiters; /* Protected by waiter_lock */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000456 atomic_t fifo_queue_waiters;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000457 uint32_t last_read_seqno;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000458 spinlock_t irq_lock;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000459 struct vmw_fence_manager *fman;
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200460 uint32_t irq_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000461
462 /*
463 * Device state
464 */
465
466 uint32_t traces_state;
467 uint32_t enable_state;
468 uint32_t config_done_state;
469
470 /**
471 * Execbuf
472 */
473 /**
474 * Protected by the cmdbuf mutex.
475 */
476
477 struct vmw_sw_context ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000478 struct mutex cmdbuf_mutex;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700479 struct mutex binding_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000480
481 /**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000482 * Operating mode.
483 */
484
485 bool stealth;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200486 bool enable_fb;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000487
488 /**
489 * Master management.
490 */
491
492 struct vmw_master *active_master;
493 struct vmw_master fbdev_master;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100494 struct notifier_block pm_nb;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200495 bool suspended;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200496
497 struct mutex release_mutex;
498 uint32_t num_3d_resources;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200499
500 /*
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100501 * Replace this with an rwsem as soon as we have down_xx_interruptible()
502 */
503 struct ttm_lock reservation_sem;
504
505 /*
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200506 * Query processing. These members
507 * are protected by the cmdbuf mutex.
508 */
509
510 struct ttm_buffer_object *dummy_query_bo;
511 struct ttm_buffer_object *pinned_bo;
512 uint32_t query_cid;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000513 uint32_t query_cid_valid;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200514 bool dummy_query_bo_pinned;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200515
516 /*
517 * Surface swapping. The "surface_lru" list is protected by the
518 * resource lock in order to be able to destroy a surface and take
519 * it off the lru atomically. "used_memory_size" is currently
520 * protected by the cmdbuf mutex for simplicity.
521 */
522
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000523 struct list_head res_lru[vmw_res_max];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200524 uint32_t used_memory_size;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700525
526 /*
527 * DMA mapping stuff.
528 */
529 enum vmw_dma_map_mode map_mode;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100530
531 /*
532 * Guest Backed stuff
533 */
534 struct ttm_buffer_object *otable_bo;
535 struct vmw_otable *otables;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000536};
537
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000538static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
539{
540 return container_of(res, struct vmw_surface, res);
541}
542
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000543static inline struct vmw_private *vmw_priv(struct drm_device *dev)
544{
545 return (struct vmw_private *)dev->dev_private;
546}
547
548static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
549{
550 return (struct vmw_fpriv *)file_priv->driver_priv;
551}
552
553static inline struct vmw_master *vmw_master(struct drm_master *master)
554{
555 return (struct vmw_master *) master->driver_priv;
556}
557
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800558/*
559 * The locking here is fine-grained, so that it is performed once
560 * for every read- and write operation. This is of course costly, but we
561 * don't perform much register access in the timing critical paths anyway.
562 * Instead we have the extra benefit of being sure that we don't forget
563 * the hw lock around register accesses.
564 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000565static inline void vmw_write(struct vmw_private *dev_priv,
566 unsigned int offset, uint32_t value)
567{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800568 unsigned long irq_flags;
569
570 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000571 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
572 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800573 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000574}
575
576static inline uint32_t vmw_read(struct vmw_private *dev_priv,
577 unsigned int offset)
578{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800579 unsigned long irq_flags;
580 u32 val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000581
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800582 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000583 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
584 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800585 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
586
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000587 return val;
588}
589
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000590int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
591void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200592
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000593/**
594 * GMR utilities - vmwgfx_gmr.c
595 */
596
597extern int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700598 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200599 unsigned long num_pages,
600 int gmr_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000601extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
602
603/**
604 * Resource utilities - vmwgfx_resource.c
605 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000606struct vmw_user_resource_conv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000607
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000608extern void vmw_resource_unreference(struct vmw_resource **p_res);
609extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100610extern struct vmw_resource *
611vmw_resource_reference_unless_doomed(struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000612extern int vmw_resource_validate(struct vmw_resource *res);
613extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
614extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
Jakob Bornecrantz551a6692011-11-28 13:19:11 +0100615extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
616 struct ttm_object_file *tfile,
617 uint32_t handle,
618 struct vmw_surface **out_surf,
619 struct vmw_dma_buffer **out_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000620extern int vmw_user_resource_lookup_handle(
621 struct vmw_private *dev_priv,
622 struct ttm_object_file *tfile,
623 uint32_t handle,
624 const struct vmw_user_resource_conv *converter,
625 struct vmw_resource **p_res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000626extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
627extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
628 struct vmw_dma_buffer *vmw_bo,
629 size_t size, struct ttm_placement *placement,
630 bool interuptable,
631 void (*bo_free) (struct ttm_buffer_object *bo));
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100632extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
633 struct ttm_object_file *tfile);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100634extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
635 struct ttm_object_file *tfile,
636 uint32_t size,
637 bool shareable,
638 uint32_t *handle,
639 struct vmw_dma_buffer **p_dma_buf);
640extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
641 struct vmw_dma_buffer *dma_buf,
642 uint32_t *handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000643extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
644 struct drm_file *file_priv);
645extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
646 struct drm_file *file_priv);
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100647extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
648 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000649extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
650 uint32_t cur_validate_node);
651extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
652extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
653 uint32_t id, struct vmw_dma_buffer **out);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000654extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
655 struct drm_file *file_priv);
656extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
657 struct drm_file *file_priv);
658extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
659 struct ttm_object_file *tfile,
660 uint32_t *inout_id,
661 struct vmw_resource **out);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000662extern void vmw_resource_unreserve(struct vmw_resource *res,
663 struct vmw_dma_buffer *new_backup,
664 unsigned long new_backup_offset);
665extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
666 struct ttm_mem_reg *mem);
667extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
668 struct vmw_fence_obj *fence);
669extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000670
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200671/**
672 * DMA buffer helper routines - vmwgfx_dmabuf.c
673 */
674extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
675 struct vmw_dma_buffer *bo,
676 struct ttm_placement *placement,
677 bool interruptible);
678extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
679 struct vmw_dma_buffer *buf,
680 bool pin, bool interruptible);
681extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
682 struct vmw_dma_buffer *buf,
683 bool pin, bool interruptible);
684extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
685 struct vmw_dma_buffer *bo,
686 bool pin, bool interruptible);
687extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
688 struct vmw_dma_buffer *bo,
689 bool interruptible);
Thomas Hellstromb37a6b92011-10-04 20:13:28 +0200690extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
691 SVGAGuestPtr *ptr);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200692extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000693
694/**
695 * Misc Ioctl functionality - vmwgfx_ioctl.c
696 */
697
698extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
699 struct drm_file *file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000700extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
701 struct drm_file *file_priv);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200702extern int vmw_present_ioctl(struct drm_device *dev, void *data,
703 struct drm_file *file_priv);
704extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv);
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200706extern unsigned int vmw_fops_poll(struct file *filp,
707 struct poll_table_struct *wait);
708extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
709 size_t count, loff_t *offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000710
711/**
712 * Fifo utilities - vmwgfx_fifo.c
713 */
714
715extern int vmw_fifo_init(struct vmw_private *dev_priv,
716 struct vmw_fifo_state *fifo);
717extern void vmw_fifo_release(struct vmw_private *dev_priv,
718 struct vmw_fifo_state *fifo);
719extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
720extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
721extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000722 uint32_t *seqno);
Maarten Lankhorst2298e802014-03-26 14:07:44 +0100723extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000724extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +0000725extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200726extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200727extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
728 uint32_t cid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000729
730/**
731 * TTM glue - vmwgfx_ttm_glue.c
732 */
733
734extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
735extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
736extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
737
738/**
739 * TTM buffer object driver - vmwgfx_buffer.c
740 */
741
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800742extern const size_t vmw_tt_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000743extern struct ttm_placement vmw_vram_placement;
744extern struct ttm_placement vmw_vram_ne_placement;
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100745extern struct ttm_placement vmw_vram_sys_placement;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200746extern struct ttm_placement vmw_vram_gmr_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200747extern struct ttm_placement vmw_vram_gmr_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000748extern struct ttm_placement vmw_sys_placement;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100749extern struct ttm_placement vmw_sys_ne_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200750extern struct ttm_placement vmw_evictable_placement;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200751extern struct ttm_placement vmw_srf_placement;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100752extern struct ttm_placement vmw_mob_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000753extern struct ttm_bo_driver vmw_bo_driver;
754extern int vmw_dma_quiescent(struct drm_device *dev);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700755extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
756extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
757extern const struct vmw_sg_table *
758vmw_bo_sg_table(struct ttm_buffer_object *bo);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700759extern void vmw_piter_start(struct vmw_piter *viter,
760 const struct vmw_sg_table *vsgt,
761 unsigned long p_offs);
762
763/**
764 * vmw_piter_next - Advance the iterator one page.
765 *
766 * @viter: Pointer to the iterator to advance.
767 *
768 * Returns false if past the list of pages, true otherwise.
769 */
770static inline bool vmw_piter_next(struct vmw_piter *viter)
771{
772 return viter->next(viter);
773}
774
775/**
776 * vmw_piter_dma_addr - Return the DMA address of the current page.
777 *
778 * @viter: Pointer to the iterator
779 *
780 * Returns the DMA address of the page pointed to by @viter.
781 */
782static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
783{
784 return viter->dma_address(viter);
785}
786
787/**
788 * vmw_piter_page - Return a pointer to the current page.
789 *
790 * @viter: Pointer to the iterator
791 *
792 * Returns the DMA address of the page pointed to by @viter.
793 */
794static inline struct page *vmw_piter_page(struct vmw_piter *viter)
795{
796 return viter->page(viter);
797}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000798
799/**
800 * Command submission - vmwgfx_execbuf.c
801 */
802
803extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
804 struct drm_file *file_priv);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200805extern int vmw_execbuf_process(struct drm_file *file_priv,
806 struct vmw_private *dev_priv,
807 void __user *user_commands,
808 void *kernel_commands,
809 uint32_t command_size,
810 uint64_t throttle_us,
811 struct drm_vmw_fence_rep __user
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +0100812 *user_fence_rep,
813 struct vmw_fence_obj **out_fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000814extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
815 struct vmw_fence_obj *fence);
816extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200817
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200818extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
819 struct vmw_private *dev_priv,
820 struct vmw_fence_obj **p_fence,
821 uint32_t *p_handle);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200822extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
823 struct vmw_fpriv *vmw_fp,
824 int ret,
825 struct drm_vmw_fence_rep __user
826 *user_fence_rep,
827 struct vmw_fence_obj *fence,
828 uint32_t fence_handle);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200829
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000830/**
831 * IRQs and wating - vmwgfx_irq.c
832 */
833
Daniel Vettere9f0d762013-12-11 11:34:42 +0100834extern irqreturn_t vmw_irq_handler(int irq, void *arg);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000835extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
836 uint32_t seqno, bool interruptible,
837 unsigned long timeout);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000838extern void vmw_irq_preinstall(struct drm_device *dev);
839extern int vmw_irq_postinstall(struct drm_device *dev);
840extern void vmw_irq_uninstall(struct drm_device *dev);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000841extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
842 uint32_t seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000843extern int vmw_fallback_wait(struct vmw_private *dev_priv,
844 bool lazy,
845 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000846 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000847 bool interruptible,
848 unsigned long timeout);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000849extern void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200850 struct vmw_fifo_state *fifo_state);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000851extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
852extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200853extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
854extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200855
856/**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000857 * Rudimentary fence-like objects currently used only for throttling -
858 * vmwgfx_marker.c
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200859 */
860
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000861extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
862extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
863extern int vmw_marker_push(struct vmw_marker_queue *queue,
864 uint32_t seqno);
865extern int vmw_marker_pull(struct vmw_marker_queue *queue,
866 uint32_t signaled_seqno);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200867extern int vmw_wait_lag(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000868 struct vmw_marker_queue *queue, uint32_t us);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000869
870/**
871 * Kernel framebuffer - vmwgfx_fb.c
872 */
873
874int vmw_fb_init(struct vmw_private *vmw_priv);
875int vmw_fb_close(struct vmw_private *dev_priv);
876int vmw_fb_off(struct vmw_private *vmw_priv);
877int vmw_fb_on(struct vmw_private *vmw_priv);
878
879/**
880 * Kernel modesetting - vmwgfx_kms.c
881 */
882
883int vmw_kms_init(struct vmw_private *dev_priv);
884int vmw_kms_close(struct vmw_private *dev_priv);
885int vmw_kms_save_vga(struct vmw_private *vmw_priv);
886int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
887int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
888 struct drm_file *file_priv);
889void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
890void vmw_kms_cursor_snoop(struct vmw_surface *srf,
891 struct ttm_object_file *tfile,
892 struct ttm_buffer_object *bo,
893 SVGA3dCmdHeader *header);
Michel Dänzer0bef23f2011-08-31 07:42:50 +0000894int vmw_kms_write_svga(struct vmw_private *vmw_priv,
895 unsigned width, unsigned height, unsigned pitch,
896 unsigned bpp, unsigned depth);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200897void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
Thomas Hellstrome133e7372010-10-05 12:43:04 +0200898bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
899 uint32_t pitch,
900 uint32_t height);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200901u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +0200902int vmw_enable_vblank(struct drm_device *dev, int crtc);
903void vmw_disable_vblank(struct drm_device *dev, int crtc);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200904int vmw_kms_present(struct vmw_private *dev_priv,
905 struct drm_file *file_priv,
906 struct vmw_framebuffer *vfb,
907 struct vmw_surface *surface,
908 uint32_t sid, int32_t destX, int32_t destY,
909 struct drm_vmw_rect *clips,
910 uint32_t num_clips);
911int vmw_kms_readback(struct vmw_private *dev_priv,
912 struct drm_file *file_priv,
913 struct vmw_framebuffer *vfb,
914 struct drm_vmw_fence_rep __user *user_fence_rep,
915 struct drm_vmw_rect *clips,
916 uint32_t num_clips);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200917int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
918 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000919
Dave Airlie5e1782d2012-08-28 01:53:54 +0000920int vmw_dumb_create(struct drm_file *file_priv,
921 struct drm_device *dev,
922 struct drm_mode_create_dumb *args);
923
924int vmw_dumb_map_offset(struct drm_file *file_priv,
925 struct drm_device *dev, uint32_t handle,
926 uint64_t *offset);
927int vmw_dumb_destroy(struct drm_file *file_priv,
928 struct drm_device *dev,
929 uint32_t handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000930/**
931 * Overlay control - vmwgfx_overlay.c
932 */
933
934int vmw_overlay_init(struct vmw_private *dev_priv);
935int vmw_overlay_close(struct vmw_private *dev_priv);
936int vmw_overlay_ioctl(struct drm_device *dev, void *data,
937 struct drm_file *file_priv);
938int vmw_overlay_stop_all(struct vmw_private *dev_priv);
939int vmw_overlay_resume_all(struct vmw_private *dev_priv);
940int vmw_overlay_pause_all(struct vmw_private *dev_priv);
941int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
942int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
943int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
944int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
945
946/**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200947 * GMR Id manager
948 */
949
950extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
951
952/**
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800953 * Prime - vmwgfx_prime.c
954 */
955
956extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
957extern int vmw_prime_fd_to_handle(struct drm_device *dev,
958 struct drm_file *file_priv,
959 int fd, u32 *handle);
960extern int vmw_prime_handle_to_fd(struct drm_device *dev,
961 struct drm_file *file_priv,
962 uint32_t handle, uint32_t flags,
963 int *prime_fd);
964
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100965/*
966 * MemoryOBject management - vmwgfx_mob.c
967 */
968struct vmw_mob;
969extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700970 const struct vmw_sg_table *vsgt,
971 unsigned long num_data_pages, int32_t mob_id);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100972extern void vmw_mob_unbind(struct vmw_private *dev_priv,
973 struct vmw_mob *mob);
974extern void vmw_mob_destroy(struct vmw_mob *mob);
975extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
976extern int vmw_otables_setup(struct vmw_private *dev_priv);
977extern void vmw_otables_takedown(struct vmw_private *dev_priv);
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800978
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100979/*
Thomas Hellstrom7086d092012-11-21 12:20:53 +0100980 * Context management - vmwgfx_context.c
981 */
982
983extern const struct vmw_user_resource_conv *user_context_converter;
984
985extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
986
987extern int vmw_context_check(struct vmw_private *dev_priv,
988 struct ttm_object_file *tfile,
989 int id,
990 struct vmw_resource **p_res);
991extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
992 struct drm_file *file_priv);
993extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
994 struct drm_file *file_priv);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700995extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
996 const struct vmw_ctx_bindinfo *ci);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700997extern void
998vmw_context_binding_state_transfer(struct vmw_resource *res,
999 struct vmw_ctx_binding_state *cbs);
1000extern void vmw_context_binding_res_list_kill(struct list_head *head);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01001001extern void vmw_context_binding_res_list_scrub(struct list_head *head);
1002extern int vmw_context_rebind_all(struct vmw_resource *ctx);
1003extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001004extern struct vmw_cmdbuf_res_manager *
1005vmw_context_res_man(struct vmw_resource *ctx);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001006/*
1007 * Surface management - vmwgfx_surface.c
1008 */
1009
1010extern const struct vmw_user_resource_conv *user_surface_converter;
1011
1012extern void vmw_surface_res_free(struct vmw_resource *res);
1013extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1014 struct drm_file *file_priv);
1015extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1016 struct drm_file *file_priv);
1017extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1018 struct drm_file *file_priv);
1019extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1020 struct drm_file *file_priv);
1021extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1022 struct drm_file *file_priv);
1023extern int vmw_surface_check(struct vmw_private *dev_priv,
1024 struct ttm_object_file *tfile,
1025 uint32_t handle, int *id);
1026extern int vmw_surface_validate(struct vmw_private *dev_priv,
1027 struct vmw_surface *srf);
1028
1029/*
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001030 * Shader management - vmwgfx_shader.c
1031 */
1032
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001033extern const struct vmw_user_resource_conv *user_shader_converter;
1034
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001035extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv);
1037extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1038 struct drm_file *file_priv);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001039extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1040 struct vmw_cmdbuf_res_manager *man,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001041 u32 user_key, const void *bytecode,
1042 SVGA3dShaderType shader_type,
1043 size_t size,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001044 struct list_head *list);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001045extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
1046 u32 user_key, SVGA3dShaderType shader_type,
1047 struct list_head *list);
1048extern struct vmw_resource *
1049vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1050 u32 user_key, SVGA3dShaderType shader_type);
1051
1052/*
1053 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1054 */
1055
1056extern struct vmw_cmdbuf_res_manager *
1057vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1058extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1059extern size_t vmw_cmdbuf_res_man_size(void);
1060extern struct vmw_resource *
1061vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1062 enum vmw_cmdbuf_res_type res_type,
1063 u32 user_key);
1064extern void vmw_cmdbuf_res_revert(struct list_head *list);
1065extern void vmw_cmdbuf_res_commit(struct list_head *list);
1066extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1067 enum vmw_cmdbuf_res_type res_type,
1068 u32 user_key,
1069 struct vmw_resource *res,
1070 struct list_head *list);
1071extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1072 enum vmw_cmdbuf_res_type res_type,
1073 u32 user_key,
1074 struct list_head *list);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001075
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001076
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001077/**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001078 * Inline helper functions
1079 */
1080
1081static inline void vmw_surface_unreference(struct vmw_surface **srf)
1082{
1083 struct vmw_surface *tmp_srf = *srf;
1084 struct vmw_resource *res = &tmp_srf->res;
1085 *srf = NULL;
1086
1087 vmw_resource_unreference(&res);
1088}
1089
1090static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1091{
1092 (void) vmw_resource_reference(&srf->res);
1093 return srf;
1094}
1095
1096static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1097{
1098 struct vmw_dma_buffer *tmp_buf = *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001099
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001100 *buf = NULL;
1101 if (tmp_buf != NULL) {
1102 struct ttm_buffer_object *bo = &tmp_buf->base;
1103
1104 ttm_bo_unref(&bo);
1105 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001106}
1107
1108static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1109{
1110 if (ttm_bo_reference(&buf->base))
1111 return buf;
1112 return NULL;
1113}
1114
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001115static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1116{
1117 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1118}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001119#endif