blob: 541a5887dd6c49ab6f5a2f3ec684e775cda39357 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yehf9217912016-04-27 19:11:18 -07003 * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
Rob Clark96c5d072014-10-15 15:00:47 -040028#include <linux/console.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000029
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/drmP.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031#include "vmwgfx_drv.h"
Thomas Hellstromd80efd52015-08-10 10:39:35 -070032#include "vmwgfx_binding.h"
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/ttm/ttm_placement.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_object.h>
36#include <drm/ttm/ttm_module.h>
Thomas Hellstromd92d9852013-10-24 01:49:26 -070037#include <linux/dma_remapping.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000038
39#define VMWGFX_DRIVER_NAME "vmwgfx"
40#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41#define VMWGFX_CHIP_SVGAII 0
42#define VMW_FB_RESERVATION 0
43
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010044#define VMW_MIN_INITIAL_WIDTH 800
45#define VMW_MIN_INITIAL_HEIGHT 600
46
Sinclair Yehf9217912016-04-27 19:11:18 -070047#ifndef VMWGFX_GIT_VERSION
48#define VMWGFX_GIT_VERSION "Unknown"
49#endif
50
51#define VMWGFX_REPO "In Tree"
52
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010053
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054/**
55 * Fully encoded drm commands. Might move to vmw_drm.h
56 */
57
58#define DRM_IOCTL_VMW_GET_PARAM \
59 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
60 struct drm_vmw_getparam_arg)
61#define DRM_IOCTL_VMW_ALLOC_DMABUF \
62 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
63 union drm_vmw_alloc_dmabuf_arg)
64#define DRM_IOCTL_VMW_UNREF_DMABUF \
65 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
66 struct drm_vmw_unref_dmabuf_arg)
67#define DRM_IOCTL_VMW_CURSOR_BYPASS \
68 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
69 struct drm_vmw_cursor_bypass_arg)
70
71#define DRM_IOCTL_VMW_CONTROL_STREAM \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
73 struct drm_vmw_control_stream_arg)
74#define DRM_IOCTL_VMW_CLAIM_STREAM \
75 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
76 struct drm_vmw_stream_arg)
77#define DRM_IOCTL_VMW_UNREF_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
79 struct drm_vmw_stream_arg)
80
81#define DRM_IOCTL_VMW_CREATE_CONTEXT \
82 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
83 struct drm_vmw_context_arg)
84#define DRM_IOCTL_VMW_UNREF_CONTEXT \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
86 struct drm_vmw_context_arg)
87#define DRM_IOCTL_VMW_CREATE_SURFACE \
88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
89 union drm_vmw_surface_create_arg)
90#define DRM_IOCTL_VMW_UNREF_SURFACE \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
92 struct drm_vmw_surface_arg)
93#define DRM_IOCTL_VMW_REF_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
95 union drm_vmw_surface_reference_arg)
96#define DRM_IOCTL_VMW_EXECBUF \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
98 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000099#define DRM_IOCTL_VMW_GET_3D_CAP \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
101 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000102#define DRM_IOCTL_VMW_FENCE_WAIT \
103 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
104 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000105#define DRM_IOCTL_VMW_FENCE_SIGNALED \
106 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
107 struct drm_vmw_fence_signaled_arg)
108#define DRM_IOCTL_VMW_FENCE_UNREF \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
110 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200111#define DRM_IOCTL_VMW_FENCE_EVENT \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
113 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200114#define DRM_IOCTL_VMW_PRESENT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
116 struct drm_vmw_present_arg)
117#define DRM_IOCTL_VMW_PRESENT_READBACK \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
119 struct drm_vmw_present_readback_arg)
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200120#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
122 struct drm_vmw_update_layout_arg)
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100123#define DRM_IOCTL_VMW_CREATE_SHADER \
124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
125 struct drm_vmw_shader_create_arg)
126#define DRM_IOCTL_VMW_UNREF_SHADER \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
128 struct drm_vmw_shader_arg)
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100129#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
131 union drm_vmw_gb_surface_create_arg)
132#define DRM_IOCTL_VMW_GB_SURFACE_REF \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
134 union drm_vmw_gb_surface_reference_arg)
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100135#define DRM_IOCTL_VMW_SYNCCPU \
136 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
137 struct drm_vmw_synccpu_arg)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700138#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
140 struct drm_vmw_context_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000141
142/**
143 * The core DRM version of this macro doesn't account for
144 * DRM_COMMAND_BASE.
145 */
146
147#define VMW_IOCTL_DEF(ioctl, func, flags) \
Ville Syrjälä7e7392a2015-03-27 15:51:56 +0200148 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000149
150/**
151 * Ioctl definitions.
152 */
153
Rob Clarkbaa70942013-08-02 13:27:49 -0400154static const struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000155 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200156 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000157 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200158 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000159 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200160 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000161 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100162 vmw_kms_cursor_bypass_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200163 DRM_MASTER | DRM_CONTROL_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000164
Dave Airlie1b2f1482010-08-14 20:20:34 +1000165 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200166 DRM_MASTER | DRM_CONTROL_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000167 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200168 DRM_MASTER | DRM_CONTROL_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000169 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200170 DRM_MASTER | DRM_CONTROL_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000171
Dave Airlie1b2f1482010-08-14 20:20:34 +1000172 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200173 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000174 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200175 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000176 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200177 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000178 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200179 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000180 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200181 DRM_AUTH | DRM_RENDER_ALLOW),
182 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700183 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000184 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200185 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000186 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
187 vmw_fence_obj_signaled_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200188 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000189 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200190 DRM_RENDER_ALLOW),
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100191 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200192 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000193 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200194 DRM_AUTH | DRM_RENDER_ALLOW),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200195
196 /* these allow direct access to the framebuffers mark as master only */
197 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200198 DRM_MASTER | DRM_AUTH),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200199 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
200 vmw_present_readback_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200201 DRM_MASTER | DRM_AUTH),
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200202 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
203 vmw_kms_update_layout_ioctl,
Thomas Hellstromb0dc6d42016-02-12 10:34:19 +0100204 DRM_MASTER | DRM_CONTROL_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100205 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
206 vmw_shader_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200207 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100208 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
209 vmw_shader_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200210 DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100211 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
212 vmw_gb_surface_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200213 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100214 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
215 vmw_gb_surface_reference_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200216 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100217 VMW_IOCTL_DEF(VMW_SYNCCPU,
218 vmw_user_dmabuf_synccpu_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200219 DRM_RENDER_ALLOW),
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700220 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
221 vmw_extended_context_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200222 DRM_AUTH | DRM_RENDER_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000223};
224
225static struct pci_device_id vmw_pci_id_list[] = {
226 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
227 {0, 0, 0}
228};
Dave Airliec4903422012-08-28 21:40:51 -0400229MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000230
Dave Airlie5d2afab2012-08-28 21:38:49 -0400231static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700232static int vmw_force_iommu;
233static int vmw_restrict_iommu;
234static int vmw_force_coherent;
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100235static int vmw_restrict_dma_mask;
Sinclair Yeh04319d82016-06-29 12:15:48 -0700236static int vmw_assume_16bpp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000237
238static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
239static void vmw_master_init(struct vmw_master *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100240static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
241 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000242
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200243MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800244module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700245MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800246module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700247MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800248module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700249MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800250module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100251MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800252module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
Sinclair Yeh04319d82016-06-29 12:15:48 -0700253MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
254module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700255
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200256
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000257static void vmw_print_capabilities(uint32_t capabilities)
258{
259 DRM_INFO("Capabilities:\n");
260 if (capabilities & SVGA_CAP_RECT_COPY)
261 DRM_INFO(" Rect copy.\n");
262 if (capabilities & SVGA_CAP_CURSOR)
263 DRM_INFO(" Cursor.\n");
264 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
265 DRM_INFO(" Cursor bypass.\n");
266 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
267 DRM_INFO(" Cursor bypass 2.\n");
268 if (capabilities & SVGA_CAP_8BIT_EMULATION)
269 DRM_INFO(" 8bit emulation.\n");
270 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
271 DRM_INFO(" Alpha cursor.\n");
272 if (capabilities & SVGA_CAP_3D)
273 DRM_INFO(" 3D.\n");
274 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
275 DRM_INFO(" Extended Fifo.\n");
276 if (capabilities & SVGA_CAP_MULTIMON)
277 DRM_INFO(" Multimon.\n");
278 if (capabilities & SVGA_CAP_PITCHLOCK)
279 DRM_INFO(" Pitchlock.\n");
280 if (capabilities & SVGA_CAP_IRQMASK)
281 DRM_INFO(" Irq mask.\n");
282 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
283 DRM_INFO(" Display Topology.\n");
284 if (capabilities & SVGA_CAP_GMR)
285 DRM_INFO(" GMR.\n");
286 if (capabilities & SVGA_CAP_TRACES)
287 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000288 if (capabilities & SVGA_CAP_GMR2)
289 DRM_INFO(" GMR2.\n");
290 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
291 DRM_INFO(" Screen Object 2.\n");
Thomas Hellstromc1234db2012-11-21 10:35:08 +0100292 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
293 DRM_INFO(" Command Buffers.\n");
294 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
295 DRM_INFO(" Command Buffers 2.\n");
296 if (capabilities & SVGA_CAP_GBOBJECTS)
297 DRM_INFO(" Guest Backed Resources.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -0700298 if (capabilities & SVGA_CAP_DX)
299 DRM_INFO(" DX Features.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000300}
301
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200302/**
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700303 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200304 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700305 * @dev_priv: A device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200306 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700307 * This function creates a small buffer object that holds the query
308 * result for dummy queries emitted as query barriers.
309 * The function will then map the first page and initialize a pending
310 * occlusion query result structure, Finally it will unmap the buffer.
311 * No interruptible waits are done within this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200312 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700313 * Returns an error if bo creation or initialization fails.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200314 */
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700315static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200316{
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700317 int ret;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700318 struct vmw_dma_buffer *vbo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200319 struct ttm_bo_kmap_obj map;
320 volatile SVGA3dQueryResult *result;
321 bool dummy;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200322
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700323 /*
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700324 * Create the vbo as pinned, so that a tryreserve will
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700325 * immediately succeed. This is because we're the only
326 * user of the bo currently.
327 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700328 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
329 if (!vbo)
330 return -ENOMEM;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700331
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700332 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
333 &vmw_sys_ne_placement, false,
334 &vmw_dmabuf_bo_free);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200335 if (unlikely(ret != 0))
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700336 return ret;
337
Christian Königdfd5e502016-04-06 11:12:03 +0200338 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700339 BUG_ON(ret != 0);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700340 vmw_bo_pin_reserved(vbo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200341
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700342 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200343 if (likely(ret == 0)) {
344 result = ttm_kmap_obj_virtual(&map, &dummy);
345 result->totalSize = sizeof(*result);
346 result->state = SVGA3D_QUERYSTATE_PENDING;
347 result->result32 = 0xff;
348 ttm_bo_kunmap(&map);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700349 }
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700350 vmw_bo_pin_reserved(vbo, false);
351 ttm_bo_unreserve(&vbo->base);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700352
353 if (unlikely(ret != 0)) {
354 DRM_ERROR("Dummy query buffer map failed.\n");
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700355 vmw_dmabuf_unreference(&vbo);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700356 } else
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700357 dev_priv->dummy_query_bo = vbo;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700358
359 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200360}
361
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700362/**
363 * vmw_request_device_late - Perform late device setup
364 *
365 * @dev_priv: Pointer to device private.
366 *
367 * This function performs setup of otables and enables large command
368 * buffer submission. These tasks are split out to a separate function
369 * because it reverts vmw_release_device_early and is intended to be used
370 * by an error path in the hibernation code.
371 */
372static int vmw_request_device_late(struct vmw_private *dev_priv)
373{
374 int ret;
375
376 if (dev_priv->has_mob) {
377 ret = vmw_otables_setup(dev_priv);
378 if (unlikely(ret != 0)) {
379 DRM_ERROR("Unable to initialize "
380 "guest Memory OBjects.\n");
381 return ret;
382 }
383 }
384
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700385 if (dev_priv->cman) {
386 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
387 256*4096, 2*4096);
388 if (ret) {
389 struct vmw_cmdbuf_man *man = dev_priv->cman;
390
391 dev_priv->cman = NULL;
392 vmw_cmdbuf_man_destroy(man);
393 }
394 }
395
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700396 return 0;
397}
398
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000399static int vmw_request_device(struct vmw_private *dev_priv)
400{
401 int ret;
402
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000403 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
404 if (unlikely(ret != 0)) {
405 DRM_ERROR("Unable to initialize FIFO.\n");
406 return ret;
407 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000408 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700409 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700410 if (IS_ERR(dev_priv->cman)) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700411 dev_priv->cman = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700412 dev_priv->has_dx = false;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100413 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700414
415 ret = vmw_request_device_late(dev_priv);
416 if (ret)
417 goto out_no_mob;
418
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200419 ret = vmw_dummy_query_bo_create(dev_priv);
420 if (unlikely(ret != 0))
421 goto out_no_query_bo;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000422
423 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200424
425out_no_query_bo:
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700426 if (dev_priv->cman)
427 vmw_cmdbuf_remove_pool(dev_priv->cman);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700428 if (dev_priv->has_mob) {
429 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100430 vmw_otables_takedown(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700431 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700432 if (dev_priv->cman)
433 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100434out_no_mob:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200435 vmw_fence_fifo_down(dev_priv->fman);
436 vmw_fifo_release(dev_priv, &dev_priv->fifo);
437 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000438}
439
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700440/**
441 * vmw_release_device_early - Early part of fifo takedown.
442 *
443 * @dev_priv: Pointer to device private struct.
444 *
445 * This is the first part of command submission takedown, to be called before
446 * buffer management is taken down.
447 */
448static void vmw_release_device_early(struct vmw_private *dev_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000449{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200450 /*
451 * Previous destructions should've released
452 * the pinned bo.
453 */
454
455 BUG_ON(dev_priv->pinned_bo != NULL);
456
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700457 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700458 if (dev_priv->cman)
459 vmw_cmdbuf_remove_pool(dev_priv->cman);
460
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700461 if (dev_priv->has_mob) {
462 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100463 vmw_otables_takedown(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200464 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200465}
466
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000467/**
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700468 * vmw_release_device_late - Late part of fifo takedown.
469 *
470 * @dev_priv: Pointer to device private struct.
471 *
472 * This is the last part of the command submission takedown, to be called when
473 * command submission is no longer needed. It may wait on pending fences.
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000474 */
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700475static void vmw_release_device_late(struct vmw_private *dev_priv)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200476{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000477 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700478 if (dev_priv->cman)
479 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200480
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000481 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200482}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000483
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100484/**
485 * Sets the initial_[width|height] fields on the given vmw_private.
486 *
487 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100488 * clamping the value to fb_max_[width|height] fields and the
489 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
490 * If the values appear to be invalid, set them to
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100491 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
492 */
493static void vmw_get_initial_size(struct vmw_private *dev_priv)
494{
495 uint32_t width;
496 uint32_t height;
497
498 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
499 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
500
501 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100502 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100503
504 if (width > dev_priv->fb_max_width ||
505 height > dev_priv->fb_max_height) {
506
507 /*
508 * This is a host error and shouldn't occur.
509 */
510
511 width = VMW_MIN_INITIAL_WIDTH;
512 height = VMW_MIN_INITIAL_HEIGHT;
513 }
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100514
515 dev_priv->initial_width = width;
516 dev_priv->initial_height = height;
517}
518
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700519/**
520 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
521 * system.
522 *
523 * @dev_priv: Pointer to a struct vmw_private
524 *
525 * This functions tries to determine the IOMMU setup and what actions
526 * need to be taken by the driver to make system pages visible to the
527 * device.
528 * If this function decides that DMA is not possible, it returns -EINVAL.
529 * The driver may then try to disable features of the device that require
530 * DMA.
531 */
532static int vmw_dma_select_mode(struct vmw_private *dev_priv)
533{
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700534 static const char *names[vmw_dma_map_max] = {
535 [vmw_dma_phys] = "Using physical TTM page addresses.",
536 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
537 [vmw_dma_map_populate] = "Keeping DMA mappings.",
538 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800539#ifdef CONFIG_X86
540 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700541
542#ifdef CONFIG_INTEL_IOMMU
543 if (intel_iommu_enabled) {
544 dev_priv->map_mode = vmw_dma_map_populate;
545 goto out_fixup;
546 }
547#endif
548
549 if (!(vmw_force_iommu || vmw_force_coherent)) {
550 dev_priv->map_mode = vmw_dma_phys;
551 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
552 return 0;
553 }
554
555 dev_priv->map_mode = vmw_dma_map_populate;
556
557 if (dma_ops->sync_single_for_cpu)
558 dev_priv->map_mode = vmw_dma_alloc_coherent;
559#ifdef CONFIG_SWIOTLB
560 if (swiotlb_nr_tbl() == 0)
561 dev_priv->map_mode = vmw_dma_map_populate;
562#endif
563
Dave Airlie21136942013-11-08 16:12:42 +1000564#ifdef CONFIG_INTEL_IOMMU
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700565out_fixup:
Dave Airlie21136942013-11-08 16:12:42 +1000566#endif
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700567 if (dev_priv->map_mode == vmw_dma_map_populate &&
568 vmw_restrict_iommu)
569 dev_priv->map_mode = vmw_dma_map_bind;
570
571 if (vmw_force_coherent)
572 dev_priv->map_mode = vmw_dma_alloc_coherent;
573
574#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
575 /*
576 * No coherent page pool
577 */
578 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
579 return -EINVAL;
580#endif
581
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800582#else /* CONFIG_X86 */
583 dev_priv->map_mode = vmw_dma_map_populate;
584#endif /* CONFIG_X86 */
585
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700586 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
587
588 return 0;
589}
590
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100591/**
592 * vmw_dma_masks - set required page- and dma masks
593 *
594 * @dev: Pointer to struct drm-device
595 *
596 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
597 * restriction also for 64-bit systems.
598 */
599#ifdef CONFIG_INTEL_IOMMU
600static int vmw_dma_masks(struct vmw_private *dev_priv)
601{
602 struct drm_device *dev = dev_priv->dev;
603
604 if (intel_iommu_enabled &&
605 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
606 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
607 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
608 }
609 return 0;
610}
611#else
612static int vmw_dma_masks(struct vmw_private *dev_priv)
613{
614 return 0;
615}
616#endif
617
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000618static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
619{
620 struct vmw_private *dev_priv;
621 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000622 uint32_t svga_id;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000623 enum vmw_res_type i;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700624 bool refuse_dma = false;
Sinclair Yehf9217912016-04-27 19:11:18 -0700625 char host_log[100] = {0};
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000626
627 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
628 if (unlikely(dev_priv == NULL)) {
629 DRM_ERROR("Failed allocating a device private struct.\n");
630 return -ENOMEM;
631 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000632
Dave Airlie466e69b2011-12-19 11:15:29 +0000633 pci_set_master(dev->pdev);
634
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000635 dev_priv->dev = dev;
636 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000637 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000638 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200639 mutex_init(&dev_priv->release_mutex);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700640 mutex_init(&dev_priv->binding_mutex);
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200641 mutex_init(&dev_priv->global_kms_state_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000642 rwlock_init(&dev_priv->resource_lock);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100643 ttm_lock_init(&dev_priv->reservation_sem);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800644 spin_lock_init(&dev_priv->hw_lock);
645 spin_lock_init(&dev_priv->waiter_lock);
646 spin_lock_init(&dev_priv->cap_lock);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700647 spin_lock_init(&dev_priv->svga_lock);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000648
649 for (i = vmw_res_context; i < vmw_res_max; ++i) {
650 idr_init(&dev_priv->res_idr[i]);
651 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
652 }
653
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000654 mutex_init(&dev_priv->init_mutex);
655 init_waitqueue_head(&dev_priv->fence_queue);
656 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000657 dev_priv->fence_queue_waiters = 0;
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100658 dev_priv->fifo_queue_waiters = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000659
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200660 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000661
662 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
663 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
664 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
665
Sinclair Yeh04319d82016-06-29 12:15:48 -0700666 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
667
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200668 dev_priv->enable_fb = enable_fbdev;
669
Peter Hanzelc1886602010-01-30 03:38:07 +0000670 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
671 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
672 if (svga_id != SVGA_ID_2) {
673 ret = -ENOSYS;
Masanari Iida49625902012-02-05 22:50:36 +0900674 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
Peter Hanzelc1886602010-01-30 03:38:07 +0000675 goto out_err0;
676 }
677
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000678 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700679 ret = vmw_dma_select_mode(dev_priv);
680 if (unlikely(ret != 0)) {
681 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
682 refuse_dma = true;
683 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000684
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200685 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
686 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
687 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
688 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100689
690 vmw_get_initial_size(dev_priv);
691
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100692 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000693 dev_priv->max_gmr_ids =
694 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000695 dev_priv->max_gmr_pages =
696 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
697 dev_priv->memory_size =
698 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200699 dev_priv->memory_size -= dev_priv->vram_size;
700 } else {
701 /*
702 * An arbitrary limit of 512MiB on surface
703 * memory. But all HWV8 hardware supports GMR2.
704 */
705 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000706 }
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100707 dev_priv->max_mob_pages = 0;
Charmaine Lee857aea12014-02-12 12:07:38 +0100708 dev_priv->max_mob_size = 0;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100709 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
710 uint64_t mem_size =
711 vmw_read(dev_priv,
712 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
713
Sinclair Yeh7c20d212016-06-29 11:29:47 -0700714 /*
715 * Workaround for low memory 2D VMs to compensate for the
716 * allocation taken by fbdev
717 */
718 if (!(dev_priv->capabilities & SVGA_CAP_3D))
719 mem_size *= 2;
720
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100721 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100722 dev_priv->prim_bb_mem =
723 vmw_read(dev_priv,
724 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
Charmaine Lee857aea12014-02-12 12:07:38 +0100725 dev_priv->max_mob_size =
726 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700727 dev_priv->stdu_max_width =
728 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
729 dev_priv->stdu_max_height =
730 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
731
732 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
733 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
734 dev_priv->texture_max_width = vmw_read(dev_priv,
735 SVGA_REG_DEV_CAP);
736 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
737 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
738 dev_priv->texture_max_height = vmw_read(dev_priv,
739 SVGA_REG_DEV_CAP);
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700740 } else {
741 dev_priv->texture_max_width = 8192;
742 dev_priv->texture_max_height = 8192;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100743 dev_priv->prim_bb_mem = dev_priv->vram_size;
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700744 }
745
Sinclair Yeh35c05122015-06-26 01:42:06 -0700746 vmw_print_capabilities(dev_priv->capabilities);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000747
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100748 ret = vmw_dma_masks(dev_priv);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800749 if (unlikely(ret != 0))
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100750 goto out_err0;
751
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100752 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000753 DRM_INFO("Max GMR ids is %u\n",
754 (unsigned)dev_priv->max_gmr_ids);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000755 DRM_INFO("Max number of GMR pages is %u\n",
756 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200757 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
758 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000759 }
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100760 DRM_INFO("Maximum display memory size is %u kiB\n",
761 dev_priv->prim_bb_mem / 1024);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000762 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
763 dev_priv->vram_start, dev_priv->vram_size / 1024);
764 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
765 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
766
767 ret = vmw_ttm_global_init(dev_priv);
768 if (unlikely(ret != 0))
769 goto out_err0;
770
771
772 vmw_master_init(&dev_priv->fbdev_master);
773 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
774 dev_priv->active_master = &dev_priv->fbdev_master;
775
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100776 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
777 dev_priv->mmio_size, MEMREMAP_WB);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000778
779 if (unlikely(dev_priv->mmio_virt == NULL)) {
780 ret = -ENOMEM;
781 DRM_ERROR("Failed mapping MMIO.\n");
782 goto out_err3;
783 }
784
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200785 /* Need mmio memory to check for fifo pitchlock cap. */
786 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
787 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
788 !vmw_fifo_have_pitchlock(dev_priv)) {
789 ret = -ENOSYS;
790 DRM_ERROR("Hardware has no pitchlock\n");
791 goto out_err4;
792 }
793
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000794 dev_priv->tdev = ttm_object_device_init
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800795 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000796
797 if (unlikely(dev_priv->tdev == NULL)) {
798 DRM_ERROR("Unable to initialize TTM object management.\n");
799 ret = -ENOMEM;
800 goto out_err4;
801 }
802
803 dev->dev_private = dev_priv;
804
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000805 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
806 dev_priv->stealth = (ret != 0);
807 if (dev_priv->stealth) {
808 /**
809 * Request at least the mmio PCI resource.
810 */
811
812 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000813 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000814 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
815 if (unlikely(ret != 0)) {
816 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
817 goto out_no_device;
818 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000819 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000820
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000821 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
Daniel Vetterbb0f1b52013-11-03 21:09:27 +0100822 ret = drm_irq_install(dev, dev->pdev->irq);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000823 if (ret != 0) {
824 DRM_ERROR("Failed installing irq: %d\n", ret);
825 goto out_no_irq;
826 }
827 }
828
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000829 dev_priv->fman = vmw_fence_manager_init(dev_priv);
Wei Yongjun14bbf202013-08-26 15:15:37 +0800830 if (unlikely(dev_priv->fman == NULL)) {
831 ret = -ENOMEM;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000832 goto out_no_fman;
Wei Yongjun14bbf202013-08-26 15:15:37 +0800833 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200834
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700835 ret = ttm_bo_device_init(&dev_priv->bdev,
836 dev_priv->bo_global_ref.ref.object,
837 &vmw_bo_driver,
838 dev->anon_inode->i_mapping,
839 VMWGFX_FILE_PAGE_OFFSET,
840 false);
841 if (unlikely(ret != 0)) {
842 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
843 goto out_no_bdev;
844 }
Thomas Hellstrom34583902015-03-05 02:33:24 -0800845
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700846 /*
847 * Enable VRAM, but initially don't use it until SVGA is enabled and
848 * unhidden.
849 */
Thomas Hellstrom34583902015-03-05 02:33:24 -0800850 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
851 (dev_priv->vram_size >> PAGE_SHIFT));
852 if (unlikely(ret != 0)) {
853 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
854 goto out_no_vram;
855 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700856 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom34583902015-03-05 02:33:24 -0800857
858 dev_priv->has_gmr = true;
859 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
860 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
861 VMW_PL_GMR) != 0) {
862 DRM_INFO("No GMR memory available. "
863 "Graphics memory resources are very limited.\n");
864 dev_priv->has_gmr = false;
865 }
866
867 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
868 dev_priv->has_mob = true;
869 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
870 VMW_PL_MOB) != 0) {
871 DRM_INFO("No MOB memory available. "
872 "3D will be disabled.\n");
873 dev_priv->has_mob = false;
874 }
875 }
876
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700877 if (dev_priv->has_mob) {
878 spin_lock(&dev_priv->cap_lock);
879 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
880 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
881 spin_unlock(&dev_priv->cap_lock);
882 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200883
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700884
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200885 ret = vmw_kms_init(dev_priv);
886 if (unlikely(ret != 0))
887 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000888 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200889
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700890 ret = vmw_request_device(dev_priv);
891 if (ret)
892 goto out_no_fifo;
893
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700894 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
895
Sinclair Yehf9217912016-04-27 19:11:18 -0700896 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
897 VMWGFX_REPO, VMWGFX_GIT_VERSION);
898 vmw_host_log(host_log);
899
900 memset(host_log, 0, sizeof(host_log));
901 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
902 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
903 VMWGFX_DRIVER_PATCHLEVEL);
904 vmw_host_log(host_log);
905
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200906 if (dev_priv->enable_fb) {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700907 vmw_fifo_resource_inc(dev_priv);
908 vmw_svga_enable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200909 vmw_fb_init(dev_priv);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200910 }
911
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100912 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
913 register_pm_notifier(&dev_priv->pm_nb);
914
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000915 return 0;
916
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000917out_no_fifo:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200918 vmw_overlay_close(dev_priv);
919 vmw_kms_close(dev_priv);
920out_no_kms:
Thomas Hellstrom34583902015-03-05 02:33:24 -0800921 if (dev_priv->has_mob)
922 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
923 if (dev_priv->has_gmr)
924 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
925 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
926out_no_vram:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700927 (void)ttm_bo_device_release(&dev_priv->bdev);
928out_no_bdev:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000929 vmw_fence_manager_takedown(dev_priv->fman);
930out_no_fman:
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000931 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
932 drm_irq_uninstall(dev_priv->dev);
933out_no_irq:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200934 if (dev_priv->stealth)
935 pci_release_region(dev->pdev, 2);
936 else
937 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000938out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000939 ttm_object_device_release(&dev_priv->tdev);
940out_err4:
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100941 memunmap(dev_priv->mmio_virt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000942out_err3:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000943 vmw_ttm_global_release(dev_priv);
944out_err0:
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000945 for (i = vmw_res_context; i < vmw_res_max; ++i)
946 idr_destroy(&dev_priv->res_idr[i]);
947
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700948 if (dev_priv->ctx.staged_bindings)
949 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000950 kfree(dev_priv);
951 return ret;
952}
953
Gabriel Krisman Bertazi11b3c202017-01-06 15:57:31 -0200954static void vmw_driver_unload(struct drm_device *dev)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000955{
956 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000957 enum vmw_res_type i;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000958
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100959 unregister_pm_notifier(&dev_priv->pm_nb);
960
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000961 if (dev_priv->ctx.res_ht_initialized)
962 drm_ht_remove(&dev_priv->ctx.res_ht);
Markus Elfringa3a1a662014-11-19 17:50:19 +0100963 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200964 if (dev_priv->enable_fb) {
Sinclair Yeh05c95012015-08-11 22:53:39 -0700965 vmw_fb_off(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200966 vmw_fb_close(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700967 vmw_fifo_resource_dec(dev_priv);
968 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200969 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700970
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000971 vmw_kms_close(dev_priv);
972 vmw_overlay_close(dev_priv);
Thomas Hellstrom34583902015-03-05 02:33:24 -0800973
Thomas Hellstrom34583902015-03-05 02:33:24 -0800974 if (dev_priv->has_gmr)
975 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
976 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
977
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700978 vmw_release_device_early(dev_priv);
979 if (dev_priv->has_mob)
980 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
981 (void) ttm_bo_device_release(&dev_priv->bdev);
982 vmw_release_device_late(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000983 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000984 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
985 drm_irq_uninstall(dev_priv->dev);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000986 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000987 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000988 else
989 pci_release_regions(dev->pdev);
990
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000991 ttm_object_device_release(&dev_priv->tdev);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100992 memunmap(dev_priv->mmio_virt);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700993 if (dev_priv->ctx.staged_bindings)
994 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000995 vmw_ttm_global_release(dev_priv);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000996
997 for (i = vmw_res_context; i < vmw_res_max; ++i)
998 idr_destroy(&dev_priv->res_idr[i]);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000999
1000 kfree(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001001}
1002
1003static void vmw_postclose(struct drm_device *dev,
1004 struct drm_file *file_priv)
1005{
1006 struct vmw_fpriv *vmw_fp;
1007
1008 vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromc4249852013-10-09 01:42:51 -07001009
1010 if (vmw_fp->locked_master) {
1011 struct vmw_master *vmaster =
1012 vmw_master(vmw_fp->locked_master);
1013
1014 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1015 ttm_vt_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001016 drm_master_put(&vmw_fp->locked_master);
Thomas Hellstromc4249852013-10-09 01:42:51 -07001017 }
1018
1019 ttm_object_file_release(&vmw_fp->tfile);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001020 kfree(vmw_fp);
1021}
1022
1023static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1024{
1025 struct vmw_private *dev_priv = vmw_priv(dev);
1026 struct vmw_fpriv *vmw_fp;
1027 int ret = -ENOMEM;
1028
1029 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1030 if (unlikely(vmw_fp == NULL))
1031 return ret;
1032
1033 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1034 if (unlikely(vmw_fp->tfile == NULL))
1035 goto out_no_tfile;
1036
1037 file_priv->driver_priv = vmw_fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001038
1039 return 0;
1040
1041out_no_tfile:
1042 kfree(vmw_fp);
1043 return ret;
1044}
1045
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001046static struct vmw_master *vmw_master_check(struct drm_device *dev,
1047 struct drm_file *file_priv,
1048 unsigned int flags)
1049{
1050 int ret;
1051 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1052 struct vmw_master *vmaster;
1053
Frank Binns0d02c4a2016-06-24 18:15:15 +01001054 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001055 return NULL;
1056
1057 ret = mutex_lock_interruptible(&dev->master_mutex);
1058 if (unlikely(ret != 0))
1059 return ERR_PTR(-ERESTARTSYS);
1060
Daniel Vetterb3ac9f22016-06-21 10:54:20 +02001061 if (drm_is_current_master(file_priv)) {
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001062 mutex_unlock(&dev->master_mutex);
1063 return NULL;
1064 }
1065
1066 /*
Thomas Hellstromaa3469c2015-08-27 10:06:24 -07001067 * Check if we were previously master, but now dropped. In that
1068 * case, allow at least render node functionality.
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001069 */
1070 if (vmw_fp->locked_master) {
1071 mutex_unlock(&dev->master_mutex);
Thomas Hellstromaa3469c2015-08-27 10:06:24 -07001072
1073 if (flags & DRM_RENDER_ALLOW)
1074 return NULL;
1075
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001076 DRM_ERROR("Dropped master trying to access ioctl that "
1077 "requires authentication.\n");
1078 return ERR_PTR(-EACCES);
1079 }
1080 mutex_unlock(&dev->master_mutex);
1081
1082 /*
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001083 * Take the TTM lock. Possibly sleep waiting for the authenticating
1084 * master to become master again, or for a SIGTERM if the
1085 * authenticating master exits.
1086 */
1087 vmaster = vmw_master(file_priv->master);
1088 ret = ttm_read_lock(&vmaster->lock, true);
1089 if (unlikely(ret != 0))
1090 vmaster = ERR_PTR(ret);
1091
1092 return vmaster;
1093}
1094
1095static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1096 unsigned long arg,
1097 long (*ioctl_func)(struct file *, unsigned int,
1098 unsigned long))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001099{
1100 struct drm_file *file_priv = filp->private_data;
1101 struct drm_device *dev = file_priv->minor->dev;
1102 unsigned int nr = DRM_IOCTL_NR(cmd);
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001103 struct vmw_master *vmaster;
1104 unsigned int flags;
1105 long ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001106
1107 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +01001108 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001109 */
1110
1111 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1112 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
Rob Clarkbaa70942013-08-02 13:27:49 -04001113 const struct drm_ioctl_desc *ioctl =
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001114 &vmw_ioctls[nr - DRM_COMMAND_BASE];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001115
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001116 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1117 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1118 if (unlikely(ret != 0))
1119 return ret;
1120
1121 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1122 goto out_io_encoding;
1123
1124 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1125 _IOC_SIZE(cmd));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001126 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001127
1128 if (unlikely(ioctl->cmd != cmd))
1129 goto out_io_encoding;
1130
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001131 flags = ioctl->flags;
1132 } else if (!drm_ioctl_flags(nr, &flags))
1133 return -EINVAL;
1134
1135 vmaster = vmw_master_check(dev, file_priv, flags);
Viresh Kumar55579cf2015-07-31 14:08:24 +05301136 if (IS_ERR(vmaster)) {
Thomas Hellstrome338c4c2014-11-25 08:20:05 +01001137 ret = PTR_ERR(vmaster);
1138
1139 if (ret != -ERESTARTSYS)
1140 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1141 nr, ret);
1142 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001143 }
1144
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001145 ret = ioctl_func(filp, cmd, arg);
1146 if (vmaster)
1147 ttm_read_unlock(&vmaster->lock);
1148
1149 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001150
1151out_io_encoding:
1152 DRM_ERROR("Invalid command format, ioctl %d\n",
1153 nr - DRM_COMMAND_BASE);
1154
1155 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001156}
1157
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001158static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1159 unsigned long arg)
1160{
1161 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1162}
1163
1164#ifdef CONFIG_COMPAT
1165static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1166 unsigned long arg)
1167{
1168 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1169}
1170#endif
1171
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001172static void vmw_lastclose(struct drm_device *dev)
1173{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001174}
1175
1176static void vmw_master_init(struct vmw_master *vmaster)
1177{
1178 ttm_lock_init(&vmaster->lock);
1179}
1180
1181static int vmw_master_create(struct drm_device *dev,
1182 struct drm_master *master)
1183{
1184 struct vmw_master *vmaster;
1185
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001186 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1187 if (unlikely(vmaster == NULL))
1188 return -ENOMEM;
1189
Thomas Hellstrom3a939a52010-10-05 12:43:03 +02001190 vmw_master_init(vmaster);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001191 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1192 master->driver_priv = vmaster;
1193
1194 return 0;
1195}
1196
1197static void vmw_master_destroy(struct drm_device *dev,
1198 struct drm_master *master)
1199{
1200 struct vmw_master *vmaster = vmw_master(master);
1201
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001202 master->driver_priv = NULL;
1203 kfree(vmaster);
1204}
1205
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001206static int vmw_master_set(struct drm_device *dev,
1207 struct drm_file *file_priv,
1208 bool from_open)
1209{
1210 struct vmw_private *dev_priv = vmw_priv(dev);
1211 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1212 struct vmw_master *active = dev_priv->active_master;
1213 struct vmw_master *vmaster = vmw_master(file_priv->master);
1214 int ret = 0;
1215
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001216 if (active) {
1217 BUG_ON(active != &dev_priv->fbdev_master);
1218 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1219 if (unlikely(ret != 0))
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001220 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001221
1222 ttm_lock_set_kill(&active->lock, true, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001223 dev_priv->active_master = NULL;
1224 }
1225
1226 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1227 if (!from_open) {
1228 ttm_vt_unlock(&vmaster->lock);
1229 BUG_ON(vmw_fp->locked_master != file_priv->master);
1230 drm_master_put(&vmw_fp->locked_master);
1231 }
1232
1233 dev_priv->active_master = vmaster;
Thomas Hellstrom5ea17342016-02-12 10:01:28 +01001234 drm_sysfs_hotplug_event(dev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001235
1236 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001237}
1238
1239static void vmw_master_drop(struct drm_device *dev,
Daniel Vetterd6ed6822016-06-21 14:20:38 +02001240 struct drm_file *file_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001241{
1242 struct vmw_private *dev_priv = vmw_priv(dev);
1243 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1244 struct vmw_master *vmaster = vmw_master(file_priv->master);
1245 int ret;
1246
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001247 /**
1248 * Make sure the master doesn't disappear while we have
1249 * it locked.
1250 */
1251
1252 vmw_fp->locked_master = drm_master_get(file_priv->master);
1253 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +01001254 vmw_kms_legacy_hotspot_clear(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001255 if (unlikely((ret != 0))) {
1256 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1257 drm_master_put(&vmw_fp->locked_master);
1258 }
1259
Thomas Hellstromc4249852013-10-09 01:42:51 -07001260 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001261
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001262 if (!dev_priv->enable_fb)
1263 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001264
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001265 dev_priv->active_master = &dev_priv->fbdev_master;
1266 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1267 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1268
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001269 if (dev_priv->enable_fb)
1270 vmw_fb_on(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001271}
1272
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001273/**
1274 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1275 *
1276 * @dev_priv: Pointer to device private struct.
1277 * Needs the reservation sem to be held in non-exclusive mode.
1278 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001279static void __vmw_svga_enable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001280{
1281 spin_lock(&dev_priv->svga_lock);
1282 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1283 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1284 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1285 }
1286 spin_unlock(&dev_priv->svga_lock);
1287}
1288
1289/**
1290 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1291 *
1292 * @dev_priv: Pointer to device private struct.
1293 */
1294void vmw_svga_enable(struct vmw_private *dev_priv)
1295{
Thomas Hellstromf08c86c2017-01-19 10:57:00 -08001296 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001297 __vmw_svga_enable(dev_priv);
1298 ttm_read_unlock(&dev_priv->reservation_sem);
1299}
1300
1301/**
1302 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1303 *
1304 * @dev_priv: Pointer to device private struct.
1305 * Needs the reservation sem to be held in exclusive mode.
1306 * Will not empty VRAM. VRAM must be emptied by caller.
1307 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001308static void __vmw_svga_disable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001309{
1310 spin_lock(&dev_priv->svga_lock);
1311 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1312 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1313 vmw_write(dev_priv, SVGA_REG_ENABLE,
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001314 SVGA_REG_ENABLE_HIDE |
1315 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001316 }
1317 spin_unlock(&dev_priv->svga_lock);
1318}
1319
1320/**
1321 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1322 * running.
1323 *
1324 * @dev_priv: Pointer to device private struct.
1325 * Will empty VRAM.
1326 */
1327void vmw_svga_disable(struct vmw_private *dev_priv)
1328{
1329 ttm_write_lock(&dev_priv->reservation_sem, false);
1330 spin_lock(&dev_priv->svga_lock);
1331 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1332 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001333 spin_unlock(&dev_priv->svga_lock);
1334 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1335 DRM_ERROR("Failed evicting VRAM buffers.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001336 vmw_write(dev_priv, SVGA_REG_ENABLE,
1337 SVGA_REG_ENABLE_HIDE |
1338 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001339 } else
1340 spin_unlock(&dev_priv->svga_lock);
1341 ttm_write_unlock(&dev_priv->reservation_sem);
1342}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001343
1344static void vmw_remove(struct pci_dev *pdev)
1345{
1346 struct drm_device *dev = pci_get_drvdata(pdev);
1347
Thomas Hellstromfd3e4d62015-03-10 11:07:40 -07001348 pci_disable_device(pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001349 drm_put_dev(dev);
1350}
1351
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001352static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1353 void *ptr)
1354{
1355 struct vmw_private *dev_priv =
1356 container_of(nb, struct vmw_private, pm_nb);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001357
1358 switch (val) {
1359 case PM_HIBERNATION_PREPARE:
Thomas Hellstroma2787242015-06-29 12:55:07 -07001360 if (dev_priv->enable_fb)
1361 vmw_fb_off(dev_priv);
Thomas Hellstrom294adf72014-02-27 12:34:51 +01001362 ttm_suspend_lock(&dev_priv->reservation_sem);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001363
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001364 /*
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001365 * This empties VRAM and unbinds all GMR bindings.
1366 * Buffer contents is moved to swappable memory.
1367 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001368 vmw_execbuf_release_pinned_bo(dev_priv);
1369 vmw_resource_evict_all(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001370 vmw_release_device_early(dev_priv);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001371 ttm_bo_swapout_all(&dev_priv->bdev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001372 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001373 break;
1374 case PM_POST_HIBERNATION:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001375 case PM_POST_RESTORE:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001376 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom294adf72014-02-27 12:34:51 +01001377 ttm_suspend_unlock(&dev_priv->reservation_sem);
Thomas Hellstroma2787242015-06-29 12:55:07 -07001378 if (dev_priv->enable_fb)
1379 vmw_fb_on(dev_priv);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001380 break;
1381 case PM_RESTORE_PREPARE:
1382 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001383 default:
1384 break;
1385 }
1386 return 0;
1387}
1388
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001389static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001390{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001391 struct drm_device *dev = pci_get_drvdata(pdev);
1392 struct vmw_private *dev_priv = vmw_priv(dev);
1393
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001394 if (dev_priv->refuse_hibernation)
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001395 return -EBUSY;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001396
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001397 pci_save_state(pdev);
1398 pci_disable_device(pdev);
1399 pci_set_power_state(pdev, PCI_D3hot);
1400 return 0;
1401}
1402
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001403static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001404{
1405 pci_set_power_state(pdev, PCI_D0);
1406 pci_restore_state(pdev);
1407 return pci_enable_device(pdev);
1408}
1409
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001410static int vmw_pm_suspend(struct device *kdev)
1411{
1412 struct pci_dev *pdev = to_pci_dev(kdev);
1413 struct pm_message dummy;
1414
1415 dummy.event = 0;
1416
1417 return vmw_pci_suspend(pdev, dummy);
1418}
1419
1420static int vmw_pm_resume(struct device *kdev)
1421{
1422 struct pci_dev *pdev = to_pci_dev(kdev);
1423
1424 return vmw_pci_resume(pdev);
1425}
1426
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001427static int vmw_pm_freeze(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001428{
1429 struct pci_dev *pdev = to_pci_dev(kdev);
1430 struct drm_device *dev = pci_get_drvdata(pdev);
1431 struct vmw_private *dev_priv = vmw_priv(dev);
1432
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001433 dev_priv->suspended = true;
1434 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001435 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001436
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001437 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1438 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001439 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001440 vmw_fifo_resource_inc(dev_priv);
1441 WARN_ON(vmw_request_device_late(dev_priv));
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001442 dev_priv->suspended = false;
1443 return -EBUSY;
1444 }
1445
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001446 if (dev_priv->enable_fb)
1447 __vmw_svga_disable(dev_priv);
1448
1449 vmw_release_device_late(dev_priv);
1450
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001451 return 0;
1452}
1453
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001454static int vmw_pm_restore(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001455{
1456 struct pci_dev *pdev = to_pci_dev(kdev);
1457 struct drm_device *dev = pci_get_drvdata(pdev);
1458 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001459 int ret;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001460
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001461 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1462 (void) vmw_read(dev_priv, SVGA_REG_ID);
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001463
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001464 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001465 vmw_fifo_resource_inc(dev_priv);
1466
1467 ret = vmw_request_device(dev_priv);
1468 if (ret)
1469 return ret;
1470
1471 if (dev_priv->enable_fb)
1472 __vmw_svga_enable(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001473
1474 dev_priv->suspended = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001475
1476 return 0;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001477}
1478
1479static const struct dev_pm_ops vmw_pm_ops = {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001480 .freeze = vmw_pm_freeze,
1481 .thaw = vmw_pm_restore,
1482 .restore = vmw_pm_restore,
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001483 .suspend = vmw_pm_suspend,
1484 .resume = vmw_pm_resume,
1485};
1486
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001487static const struct file_operations vmwgfx_driver_fops = {
1488 .owner = THIS_MODULE,
1489 .open = drm_open,
1490 .release = drm_release,
1491 .unlocked_ioctl = vmw_unlocked_ioctl,
1492 .mmap = vmw_mmap,
1493 .poll = vmw_fops_poll,
1494 .read = vmw_fops_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001495#if defined(CONFIG_COMPAT)
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001496 .compat_ioctl = vmw_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001497#endif
1498 .llseek = noop_llseek,
1499};
1500
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001501static struct drm_driver driver = {
1502 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
Thomas Hellstrom03f80262014-03-20 13:06:34 +01001503 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001504 .load = vmw_driver_load,
1505 .unload = vmw_driver_unload,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001506 .lastclose = vmw_lastclose,
1507 .irq_preinstall = vmw_irq_preinstall,
1508 .irq_postinstall = vmw_irq_postinstall,
1509 .irq_uninstall = vmw_irq_uninstall,
1510 .irq_handler = vmw_irq_handler,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001511 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001512 .enable_vblank = vmw_enable_vblank,
1513 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001514 .ioctls = vmw_ioctls,
Damien Lespiauf95aeb12014-06-09 14:39:49 +01001515 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001516 .master_create = vmw_master_create,
1517 .master_destroy = vmw_master_destroy,
1518 .master_set = vmw_master_set,
1519 .master_drop = vmw_master_drop,
1520 .open = vmw_driver_open,
1521 .postclose = vmw_postclose,
David Herrmann915b4d12014-08-29 12:12:43 +02001522 .set_busid = drm_pci_set_busid,
Dave Airlie5e1782d2012-08-28 01:53:54 +00001523
1524 .dumb_create = vmw_dumb_create,
1525 .dumb_map_offset = vmw_dumb_map_offset,
1526 .dumb_destroy = vmw_dumb_destroy,
1527
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001528 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1529 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1530
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001531 .fops = &vmwgfx_driver_fops,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001532 .name = VMWGFX_DRIVER_NAME,
1533 .desc = VMWGFX_DRIVER_DESC,
1534 .date = VMWGFX_DRIVER_DATE,
1535 .major = VMWGFX_DRIVER_MAJOR,
1536 .minor = VMWGFX_DRIVER_MINOR,
1537 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1538};
1539
Dave Airlie8410ea32010-12-15 03:16:38 +10001540static struct pci_driver vmw_pci_driver = {
1541 .name = VMWGFX_DRIVER_NAME,
1542 .id_table = vmw_pci_id_list,
1543 .probe = vmw_probe,
1544 .remove = vmw_remove,
1545 .driver = {
1546 .pm = &vmw_pm_ops
1547 }
1548};
1549
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001550static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1551{
Jordan Crousedcdb1672010-05-27 13:40:25 -06001552 return drm_get_pci_dev(pdev, ent, &driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001553}
1554
1555static int __init vmwgfx_init(void)
1556{
1557 int ret;
Rob Clark96c5d072014-10-15 15:00:47 -04001558
Rob Clark96c5d072014-10-15 15:00:47 -04001559 if (vgacon_text_force())
1560 return -EINVAL;
Rob Clark96c5d072014-10-15 15:00:47 -04001561
Dave Airlie8410ea32010-12-15 03:16:38 +10001562 ret = drm_pci_init(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001563 if (ret)
1564 DRM_ERROR("Failed initializing DRM.\n");
1565 return ret;
1566}
1567
1568static void __exit vmwgfx_exit(void)
1569{
Dave Airlie8410ea32010-12-15 03:16:38 +10001570 drm_pci_exit(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001571}
1572
1573module_init(vmwgfx_init);
1574module_exit(vmwgfx_exit);
1575
1576MODULE_AUTHOR("VMware Inc. and others");
1577MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1578MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001579MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1580 __stringify(VMWGFX_DRIVER_MINOR) "."
1581 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1582 "0");