blob: 827458f49112159d3b6d10c8b3a0eaf100edbe1b [file] [log] [blame]
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02001// SPDX-License-Identifier: GPL-2.0 OR MIT
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002/**************************************************************************
3 *
Dirk Hohndel (VMware)dff96882018-05-07 01:16:26 +02004 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Sam Ravnborg6ae87482019-06-23 12:23:34 +020027
Rob Clark96c5d072014-10-15 15:00:47 -040028#include <linux/console.h>
Thomas Hellstrom9ddac732019-01-17 14:34:52 +010029#include <linux/dma-mapping.h>
Sam Ravnborg6ae87482019-06-23 12:23:34 +020030#include <linux/module.h>
Thomas Zimmermann36891da2019-12-10 13:43:22 +010031#include <linux/pci.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000032
Sam Ravnborg6ae87482019-06-23 12:23:34 +020033#include <drm/drm_drv.h>
34#include <drm/drm_ioctl.h>
Sam Ravnborg6ae87482019-06-23 12:23:34 +020035#include <drm/drm_sysfs.h>
David Howells760285e2012-10-02 18:01:07 +010036#include <drm/ttm/ttm_bo_driver.h>
David Howells760285e2012-10-02 18:01:07 +010037#include <drm/ttm/ttm_module.h>
Sam Ravnborg6ae87482019-06-23 12:23:34 +020038#include <drm/ttm/ttm_placement.h>
39
40#include "ttm_object.h"
41#include "vmwgfx_binding.h"
42#include "vmwgfx_drv.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000043
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000044#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
45#define VMWGFX_CHIP_SVGAII 0
46#define VMW_FB_RESERVATION 0
47
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010048#define VMW_MIN_INITIAL_WIDTH 800
49#define VMW_MIN_INITIAL_HEIGHT 600
50
Sinclair Yehf9217912016-04-27 19:11:18 -070051#ifndef VMWGFX_GIT_VERSION
52#define VMWGFX_GIT_VERSION "Unknown"
53#endif
54
55#define VMWGFX_REPO "In Tree"
56
Thomas Hellstromfd567462018-12-12 11:52:08 +010057#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
58
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010059
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000060/**
61 * Fully encoded drm commands. Might move to vmw_drm.h
62 */
63
64#define DRM_IOCTL_VMW_GET_PARAM \
65 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
66 struct drm_vmw_getparam_arg)
67#define DRM_IOCTL_VMW_ALLOC_DMABUF \
68 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
69 union drm_vmw_alloc_dmabuf_arg)
70#define DRM_IOCTL_VMW_UNREF_DMABUF \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
72 struct drm_vmw_unref_dmabuf_arg)
73#define DRM_IOCTL_VMW_CURSOR_BYPASS \
74 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
75 struct drm_vmw_cursor_bypass_arg)
76
77#define DRM_IOCTL_VMW_CONTROL_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
79 struct drm_vmw_control_stream_arg)
80#define DRM_IOCTL_VMW_CLAIM_STREAM \
81 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
82 struct drm_vmw_stream_arg)
83#define DRM_IOCTL_VMW_UNREF_STREAM \
84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
85 struct drm_vmw_stream_arg)
86
87#define DRM_IOCTL_VMW_CREATE_CONTEXT \
88 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
89 struct drm_vmw_context_arg)
90#define DRM_IOCTL_VMW_UNREF_CONTEXT \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
92 struct drm_vmw_context_arg)
93#define DRM_IOCTL_VMW_CREATE_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
95 union drm_vmw_surface_create_arg)
96#define DRM_IOCTL_VMW_UNREF_SURFACE \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
98 struct drm_vmw_surface_arg)
99#define DRM_IOCTL_VMW_REF_SURFACE \
100 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
101 union drm_vmw_surface_reference_arg)
102#define DRM_IOCTL_VMW_EXECBUF \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
104 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000105#define DRM_IOCTL_VMW_GET_3D_CAP \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
107 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000108#define DRM_IOCTL_VMW_FENCE_WAIT \
109 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
110 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000111#define DRM_IOCTL_VMW_FENCE_SIGNALED \
112 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
113 struct drm_vmw_fence_signaled_arg)
114#define DRM_IOCTL_VMW_FENCE_UNREF \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
116 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200117#define DRM_IOCTL_VMW_FENCE_EVENT \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
119 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200120#define DRM_IOCTL_VMW_PRESENT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
122 struct drm_vmw_present_arg)
123#define DRM_IOCTL_VMW_PRESENT_READBACK \
124 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
125 struct drm_vmw_present_readback_arg)
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200126#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
128 struct drm_vmw_update_layout_arg)
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100129#define DRM_IOCTL_VMW_CREATE_SHADER \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
131 struct drm_vmw_shader_create_arg)
132#define DRM_IOCTL_VMW_UNREF_SHADER \
133 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
134 struct drm_vmw_shader_arg)
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100135#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
136 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
137 union drm_vmw_gb_surface_create_arg)
138#define DRM_IOCTL_VMW_GB_SURFACE_REF \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
140 union drm_vmw_gb_surface_reference_arg)
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100141#define DRM_IOCTL_VMW_SYNCCPU \
142 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
143 struct drm_vmw_synccpu_arg)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700144#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
145 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
146 struct drm_vmw_context_arg)
Deepak Rawat14b1c332018-06-20 14:48:35 -0700147#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
148 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
149 union drm_vmw_gb_surface_create_ext_arg)
150#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
151 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
152 union drm_vmw_gb_surface_reference_ext_arg)
Roland Scheideggercb92a322019-11-21 17:44:56 +0100153#define DRM_IOCTL_VMW_MSG \
154 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
155 struct drm_vmw_msg_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000156
157/**
158 * The core DRM version of this macro doesn't account for
159 * DRM_COMMAND_BASE.
160 */
161
162#define VMW_IOCTL_DEF(ioctl, func, flags) \
Ville Syrjälä7e7392a2015-03-27 15:51:56 +0200163 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000164
165/**
166 * Ioctl definitions.
167 */
168
Rob Clarkbaa70942013-08-02 13:27:49 -0400169static const struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000170 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000171 DRM_RENDER_ALLOW),
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200172 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000173 DRM_RENDER_ALLOW),
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200174 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200175 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000176 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100177 vmw_kms_cursor_bypass_ioctl,
Daniel Vetter190c4622018-04-20 08:51:58 +0200178 DRM_MASTER),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179
Dave Airlie1b2f1482010-08-14 20:20:34 +1000180 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Daniel Vetter190c4622018-04-20 08:51:58 +0200181 DRM_MASTER),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000182 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Daniel Vetter190c4622018-04-20 08:51:58 +0200183 DRM_MASTER),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000184 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Daniel Vetter190c4622018-04-20 08:51:58 +0200185 DRM_MASTER),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000186
Dave Airlie1b2f1482010-08-14 20:20:34 +1000187 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000188 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000189 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200190 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000191 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000192 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000193 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200194 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000195 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000196 DRM_RENDER_ALLOW),
197 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700198 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000199 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200200 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000201 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
202 vmw_fence_obj_signaled_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200203 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000204 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200205 DRM_RENDER_ALLOW),
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100206 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000207 DRM_RENDER_ALLOW),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000208 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000209 DRM_RENDER_ALLOW),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200210
211 /* these allow direct access to the framebuffers mark as master only */
212 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200213 DRM_MASTER | DRM_AUTH),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200214 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
215 vmw_present_readback_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200216 DRM_MASTER | DRM_AUTH),
Thomas Hellstrom31788ca2017-02-21 17:42:27 +0700217 /*
218 * The permissions of the below ioctl are overridden in
219 * vmw_generic_ioctl(). We require either
220 * DRM_MASTER or capable(CAP_SYS_ADMIN).
221 */
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200222 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
223 vmw_kms_update_layout_ioctl,
Thomas Hellstrom31788ca2017-02-21 17:42:27 +0700224 DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100225 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
226 vmw_shader_define_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000227 DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100228 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
229 vmw_shader_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200230 DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100231 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
232 vmw_gb_surface_define_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000233 DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100234 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
235 vmw_gb_surface_reference_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000236 DRM_RENDER_ALLOW),
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100237 VMW_IOCTL_DEF(VMW_SYNCCPU,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200238 vmw_user_bo_synccpu_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200239 DRM_RENDER_ALLOW),
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700240 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
241 vmw_extended_context_define_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000242 DRM_RENDER_ALLOW),
Deepak Rawat14b1c332018-06-20 14:48:35 -0700243 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
244 vmw_gb_surface_define_ext_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000245 DRM_RENDER_ALLOW),
Deepak Rawat14b1c332018-06-20 14:48:35 -0700246 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
247 vmw_gb_surface_reference_ext_ioctl,
Emil Velikov0d4c19f2019-11-01 13:03:11 +0000248 DRM_RENDER_ALLOW),
Roland Scheideggercb92a322019-11-21 17:44:56 +0100249 VMW_IOCTL_DEF(VMW_MSG,
250 vmw_msg_ioctl,
251 DRM_RENDER_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000252};
253
Arvind Yadav80463062017-07-15 12:44:53 +0530254static const struct pci_device_id vmw_pci_id_list[] = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000255 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
256 {0, 0, 0}
257};
Dave Airliec4903422012-08-28 21:40:51 -0400258MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000259
Dave Airlie5d2afab2012-08-28 21:38:49 -0400260static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700261static int vmw_force_iommu;
262static int vmw_restrict_iommu;
263static int vmw_force_coherent;
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100264static int vmw_restrict_dma_mask;
Sinclair Yeh04319d82016-06-29 12:15:48 -0700265static int vmw_assume_16bpp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000266
267static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100268static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
269 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000270
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200271MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
Øyvind A. Holm50f83732017-03-23 14:54:48 -0700272module_param_named(enable_fbdev, enable_fbdev, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700273MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
Øyvind A. Holm50f83732017-03-23 14:54:48 -0700274module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700275MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
Øyvind A. Holm50f83732017-03-23 14:54:48 -0700276module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700277MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
Øyvind A. Holm50f83732017-03-23 14:54:48 -0700278module_param_named(force_coherent, vmw_force_coherent, int, 0600);
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100279MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
Øyvind A. Holm7a9d2002017-04-03 22:06:24 +0200280module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
Sinclair Yeh04319d82016-06-29 12:15:48 -0700281MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
282module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700283
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200284
Neha Bhende3b4c2512018-06-18 16:44:48 -0700285static void vmw_print_capabilities2(uint32_t capabilities2)
286{
287 DRM_INFO("Capabilities2:\n");
288 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
289 DRM_INFO(" Grow oTable.\n");
290 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
291 DRM_INFO(" IntraSurface copy.\n");
292}
293
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000294static void vmw_print_capabilities(uint32_t capabilities)
295{
296 DRM_INFO("Capabilities:\n");
297 if (capabilities & SVGA_CAP_RECT_COPY)
298 DRM_INFO(" Rect copy.\n");
299 if (capabilities & SVGA_CAP_CURSOR)
300 DRM_INFO(" Cursor.\n");
301 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
302 DRM_INFO(" Cursor bypass.\n");
303 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
304 DRM_INFO(" Cursor bypass 2.\n");
305 if (capabilities & SVGA_CAP_8BIT_EMULATION)
306 DRM_INFO(" 8bit emulation.\n");
307 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
308 DRM_INFO(" Alpha cursor.\n");
309 if (capabilities & SVGA_CAP_3D)
310 DRM_INFO(" 3D.\n");
311 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
312 DRM_INFO(" Extended Fifo.\n");
313 if (capabilities & SVGA_CAP_MULTIMON)
314 DRM_INFO(" Multimon.\n");
315 if (capabilities & SVGA_CAP_PITCHLOCK)
316 DRM_INFO(" Pitchlock.\n");
317 if (capabilities & SVGA_CAP_IRQMASK)
318 DRM_INFO(" Irq mask.\n");
319 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
320 DRM_INFO(" Display Topology.\n");
321 if (capabilities & SVGA_CAP_GMR)
322 DRM_INFO(" GMR.\n");
323 if (capabilities & SVGA_CAP_TRACES)
324 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000325 if (capabilities & SVGA_CAP_GMR2)
326 DRM_INFO(" GMR2.\n");
327 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
328 DRM_INFO(" Screen Object 2.\n");
Thomas Hellstromc1234db2012-11-21 10:35:08 +0100329 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
330 DRM_INFO(" Command Buffers.\n");
331 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
332 DRM_INFO(" Command Buffers 2.\n");
333 if (capabilities & SVGA_CAP_GBOBJECTS)
334 DRM_INFO(" Guest Backed Resources.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -0700335 if (capabilities & SVGA_CAP_DX)
336 DRM_INFO(" DX Features.\n");
Thomas Hellstromdc366362018-03-22 10:15:23 +0100337 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
338 DRM_INFO(" HP Command Queue.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000339}
340
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200341/**
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700342 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200343 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700344 * @dev_priv: A device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200345 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700346 * This function creates a small buffer object that holds the query
347 * result for dummy queries emitted as query barriers.
348 * The function will then map the first page and initialize a pending
349 * occlusion query result structure, Finally it will unmap the buffer.
350 * No interruptible waits are done within this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200351 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700352 * Returns an error if bo creation or initialization fails.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200353 */
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700354static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200355{
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700356 int ret;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200357 struct vmw_buffer_object *vbo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200358 struct ttm_bo_kmap_obj map;
359 volatile SVGA3dQueryResult *result;
360 bool dummy;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200361
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700362 /*
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700363 * Create the vbo as pinned, so that a tryreserve will
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700364 * immediately succeed. This is because we're the only
365 * user of the bo currently.
366 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700367 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
368 if (!vbo)
369 return -ENOMEM;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700370
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200371 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
372 &vmw_sys_ne_placement, false,
373 &vmw_bo_bo_free);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200374 if (unlikely(ret != 0))
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700375 return ret;
376
Christian Königdfd5e502016-04-06 11:12:03 +0200377 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700378 BUG_ON(ret != 0);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700379 vmw_bo_pin_reserved(vbo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200380
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700381 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200382 if (likely(ret == 0)) {
383 result = ttm_kmap_obj_virtual(&map, &dummy);
384 result->totalSize = sizeof(*result);
385 result->state = SVGA3D_QUERYSTATE_PENDING;
386 result->result32 = 0xff;
387 ttm_bo_kunmap(&map);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700388 }
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700389 vmw_bo_pin_reserved(vbo, false);
390 ttm_bo_unreserve(&vbo->base);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700391
392 if (unlikely(ret != 0)) {
393 DRM_ERROR("Dummy query buffer map failed.\n");
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200394 vmw_bo_unreference(&vbo);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700395 } else
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700396 dev_priv->dummy_query_bo = vbo;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700397
398 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200399}
400
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700401/**
402 * vmw_request_device_late - Perform late device setup
403 *
404 * @dev_priv: Pointer to device private.
405 *
406 * This function performs setup of otables and enables large command
407 * buffer submission. These tasks are split out to a separate function
408 * because it reverts vmw_release_device_early and is intended to be used
409 * by an error path in the hibernation code.
410 */
411static int vmw_request_device_late(struct vmw_private *dev_priv)
412{
413 int ret;
414
415 if (dev_priv->has_mob) {
416 ret = vmw_otables_setup(dev_priv);
417 if (unlikely(ret != 0)) {
418 DRM_ERROR("Unable to initialize "
419 "guest Memory OBjects.\n");
420 return ret;
421 }
422 }
423
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700424 if (dev_priv->cman) {
425 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
426 256*4096, 2*4096);
427 if (ret) {
428 struct vmw_cmdbuf_man *man = dev_priv->cman;
429
430 dev_priv->cman = NULL;
431 vmw_cmdbuf_man_destroy(man);
432 }
433 }
434
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700435 return 0;
436}
437
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000438static int vmw_request_device(struct vmw_private *dev_priv)
439{
440 int ret;
441
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000442 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
443 if (unlikely(ret != 0)) {
444 DRM_ERROR("Unable to initialize FIFO.\n");
445 return ret;
446 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000447 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700448 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700449 if (IS_ERR(dev_priv->cman)) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700450 dev_priv->cman = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700451 dev_priv->has_dx = false;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100452 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700453
454 ret = vmw_request_device_late(dev_priv);
455 if (ret)
456 goto out_no_mob;
457
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200458 ret = vmw_dummy_query_bo_create(dev_priv);
459 if (unlikely(ret != 0))
460 goto out_no_query_bo;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000461
462 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200463
464out_no_query_bo:
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700465 if (dev_priv->cman)
466 vmw_cmdbuf_remove_pool(dev_priv->cman);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700467 if (dev_priv->has_mob) {
468 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100469 vmw_otables_takedown(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700470 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700471 if (dev_priv->cman)
472 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100473out_no_mob:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200474 vmw_fence_fifo_down(dev_priv->fman);
475 vmw_fifo_release(dev_priv, &dev_priv->fifo);
476 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000477}
478
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700479/**
480 * vmw_release_device_early - Early part of fifo takedown.
481 *
482 * @dev_priv: Pointer to device private struct.
483 *
484 * This is the first part of command submission takedown, to be called before
485 * buffer management is taken down.
486 */
487static void vmw_release_device_early(struct vmw_private *dev_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000488{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200489 /*
490 * Previous destructions should've released
491 * the pinned bo.
492 */
493
494 BUG_ON(dev_priv->pinned_bo != NULL);
495
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200496 vmw_bo_unreference(&dev_priv->dummy_query_bo);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700497 if (dev_priv->cman)
498 vmw_cmdbuf_remove_pool(dev_priv->cman);
499
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700500 if (dev_priv->has_mob) {
501 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100502 vmw_otables_takedown(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200503 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200504}
505
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000506/**
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700507 * vmw_release_device_late - Late part of fifo takedown.
508 *
509 * @dev_priv: Pointer to device private struct.
510 *
511 * This is the last part of the command submission takedown, to be called when
512 * command submission is no longer needed. It may wait on pending fences.
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000513 */
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700514static void vmw_release_device_late(struct vmw_private *dev_priv)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200515{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000516 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700517 if (dev_priv->cman)
518 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200519
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000520 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200521}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000522
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100523/**
524 * Sets the initial_[width|height] fields on the given vmw_private.
525 *
526 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100527 * clamping the value to fb_max_[width|height] fields and the
528 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
529 * If the values appear to be invalid, set them to
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100530 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
531 */
532static void vmw_get_initial_size(struct vmw_private *dev_priv)
533{
534 uint32_t width;
535 uint32_t height;
536
537 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
538 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
539
540 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100541 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100542
543 if (width > dev_priv->fb_max_width ||
544 height > dev_priv->fb_max_height) {
545
546 /*
547 * This is a host error and shouldn't occur.
548 */
549
550 width = VMW_MIN_INITIAL_WIDTH;
551 height = VMW_MIN_INITIAL_HEIGHT;
552 }
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100553
554 dev_priv->initial_width = width;
555 dev_priv->initial_height = height;
556}
557
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700558/**
559 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
560 * system.
561 *
562 * @dev_priv: Pointer to a struct vmw_private
563 *
Thomas Hellstrom81103352019-04-23 14:02:57 +0200564 * This functions tries to determine what actions need to be taken by the
565 * driver to make system pages visible to the device.
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700566 * If this function decides that DMA is not possible, it returns -EINVAL.
567 * The driver may then try to disable features of the device that require
568 * DMA.
569 */
570static int vmw_dma_select_mode(struct vmw_private *dev_priv)
571{
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700572 static const char *names[vmw_dma_map_max] = {
573 [vmw_dma_phys] = "Using physical TTM page addresses.",
574 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
Thomas Hellstrom81103352019-04-23 14:02:57 +0200575 [vmw_dma_map_populate] = "Caching DMA mappings.",
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700576 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700577
578 if (vmw_force_coherent)
579 dev_priv->map_mode = vmw_dma_alloc_coherent;
Thomas Hellstrom81103352019-04-23 14:02:57 +0200580 else if (vmw_restrict_iommu)
581 dev_priv->map_mode = vmw_dma_map_bind;
Christoph Hellwig05f94672019-01-05 09:01:08 +0100582 else
583 dev_priv->map_mode = vmw_dma_map_populate;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700584
Thomas Hellstrome2e96662019-11-13 19:02:42 +0100585 if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
Christoph Hellwig9b5bf242019-01-05 09:01:06 +0100586 (dev_priv->map_mode == vmw_dma_alloc_coherent))
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700587 return -EINVAL;
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800588
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700589 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700590 return 0;
591}
592
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100593/**
594 * vmw_dma_masks - set required page- and dma masks
595 *
596 * @dev: Pointer to struct drm-device
597 *
598 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
599 * restriction also for 64-bit systems.
600 */
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100601static int vmw_dma_masks(struct vmw_private *dev_priv)
602{
603 struct drm_device *dev = dev_priv->dev;
Thomas Hellstrom4cbfa1e2019-01-28 10:31:33 +0100604 int ret = 0;
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100605
Thomas Hellstrom4cbfa1e2019-01-28 10:31:33 +0100606 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
607 if (dev_priv->map_mode != vmw_dma_phys &&
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100608 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
609 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
Thomas Hellstrom4cbfa1e2019-01-28 10:31:33 +0100610 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100611 }
Thomas Hellstrom4cbfa1e2019-01-28 10:31:33 +0100612
613 return ret;
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100614}
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100615
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000616static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
617{
618 struct vmw_private *dev_priv;
619 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000620 uint32_t svga_id;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000621 enum vmw_res_type i;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700622 bool refuse_dma = false;
Sinclair Yehf9217912016-04-27 19:11:18 -0700623 char host_log[100] = {0};
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000624
625 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530626 if (unlikely(!dev_priv)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000627 DRM_ERROR("Failed allocating a device private struct.\n");
628 return -ENOMEM;
629 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000630
Dave Airlie466e69b2011-12-19 11:15:29 +0000631 pci_set_master(dev->pdev);
632
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000633 dev_priv->dev = dev;
634 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000635 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000636 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200637 mutex_init(&dev_priv->release_mutex);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700638 mutex_init(&dev_priv->binding_mutex);
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200639 mutex_init(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100640 ttm_lock_init(&dev_priv->reservation_sem);
Thomas Hellstrom13289242018-09-26 15:41:52 +0200641 spin_lock_init(&dev_priv->resource_lock);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800642 spin_lock_init(&dev_priv->hw_lock);
643 spin_lock_init(&dev_priv->waiter_lock);
644 spin_lock_init(&dev_priv->cap_lock);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700645 spin_lock_init(&dev_priv->svga_lock);
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700646 spin_lock_init(&dev_priv->cursor_lock);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000647
648 for (i = vmw_res_context; i < vmw_res_max; ++i) {
649 idr_init(&dev_priv->res_idr[i]);
650 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
651 }
652
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000653 init_waitqueue_head(&dev_priv->fence_queue);
654 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000655 dev_priv->fence_queue_waiters = 0;
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100656 dev_priv->fifo_queue_waiters = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000657
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200658 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000659
660 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
661 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
662 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
663
Sinclair Yeh04319d82016-06-29 12:15:48 -0700664 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
665
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200666 dev_priv->enable_fb = enable_fbdev;
667
Peter Hanzelc1886602010-01-30 03:38:07 +0000668 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
669 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
670 if (svga_id != SVGA_ID_2) {
671 ret = -ENOSYS;
Masanari Iida49625902012-02-05 22:50:36 +0900672 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
Peter Hanzelc1886602010-01-30 03:38:07 +0000673 goto out_err0;
674 }
675
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000676 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
Neha Bhende3b4c2512018-06-18 16:44:48 -0700677
678 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
679 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
680 }
681
682
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700683 ret = vmw_dma_select_mode(dev_priv);
684 if (unlikely(ret != 0)) {
685 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
686 refuse_dma = true;
687 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000688
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200689 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
690 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
691 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
692 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100693
694 vmw_get_initial_size(dev_priv);
695
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100696 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000697 dev_priv->max_gmr_ids =
698 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000699 dev_priv->max_gmr_pages =
700 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
701 dev_priv->memory_size =
702 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200703 dev_priv->memory_size -= dev_priv->vram_size;
704 } else {
705 /*
706 * An arbitrary limit of 512MiB on surface
707 * memory. But all HWV8 hardware supports GMR2.
708 */
709 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000710 }
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100711 dev_priv->max_mob_pages = 0;
Charmaine Lee857aea12014-02-12 12:07:38 +0100712 dev_priv->max_mob_size = 0;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100713 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
714 uint64_t mem_size =
715 vmw_read(dev_priv,
716 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
717
Sinclair Yeh7c20d212016-06-29 11:29:47 -0700718 /*
719 * Workaround for low memory 2D VMs to compensate for the
720 * allocation taken by fbdev
721 */
722 if (!(dev_priv->capabilities & SVGA_CAP_3D))
Sinclair Yehcef75032017-11-01 10:47:05 -0700723 mem_size *= 3;
Sinclair Yeh7c20d212016-06-29 11:29:47 -0700724
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100725 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100726 dev_priv->prim_bb_mem =
727 vmw_read(dev_priv,
728 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
Charmaine Lee857aea12014-02-12 12:07:38 +0100729 dev_priv->max_mob_size =
730 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700731 dev_priv->stdu_max_width =
732 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
733 dev_priv->stdu_max_height =
734 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
735
736 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
737 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
738 dev_priv->texture_max_width = vmw_read(dev_priv,
739 SVGA_REG_DEV_CAP);
740 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
741 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
742 dev_priv->texture_max_height = vmw_read(dev_priv,
743 SVGA_REG_DEV_CAP);
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700744 } else {
745 dev_priv->texture_max_width = 8192;
746 dev_priv->texture_max_height = 8192;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100747 dev_priv->prim_bb_mem = dev_priv->vram_size;
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700748 }
749
Sinclair Yeh35c05122015-06-26 01:42:06 -0700750 vmw_print_capabilities(dev_priv->capabilities);
Neha Bhende3b4c2512018-06-18 16:44:48 -0700751 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
752 vmw_print_capabilities2(dev_priv->capabilities2);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000753
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100754 ret = vmw_dma_masks(dev_priv);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800755 if (unlikely(ret != 0))
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100756 goto out_err0;
757
Qian Cai39916892019-06-03 16:44:15 -0400758 dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
759 SCATTERLIST_MAX_SEGMENT));
760
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100761 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000762 DRM_INFO("Max GMR ids is %u\n",
763 (unsigned)dev_priv->max_gmr_ids);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000764 DRM_INFO("Max number of GMR pages is %u\n",
765 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200766 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
767 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000768 }
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100769 DRM_INFO("Maximum display memory size is %u kiB\n",
770 dev_priv->prim_bb_mem / 1024);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000771 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
772 dev_priv->vram_start, dev_priv->vram_size / 1024);
773 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
774 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
775
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100776 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
777 dev_priv->mmio_size, MEMREMAP_WB);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000778
779 if (unlikely(dev_priv->mmio_virt == NULL)) {
780 ret = -ENOMEM;
781 DRM_ERROR("Failed mapping MMIO.\n");
Christian Königa64f7842018-10-19 16:55:26 +0200782 goto out_err0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000783 }
784
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200785 /* Need mmio memory to check for fifo pitchlock cap. */
786 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
787 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
788 !vmw_fifo_have_pitchlock(dev_priv)) {
789 ret = -ENOSYS;
790 DRM_ERROR("Hardware has no pitchlock\n");
791 goto out_err4;
792 }
793
Christian König27eb1fa2018-10-19 13:49:05 +0200794 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
795 &vmw_prime_dmabuf_ops);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000796
797 if (unlikely(dev_priv->tdev == NULL)) {
798 DRM_ERROR("Unable to initialize TTM object management.\n");
799 ret = -ENOMEM;
800 goto out_err4;
801 }
802
803 dev->dev_private = dev_priv;
804
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000805 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
806 dev_priv->stealth = (ret != 0);
807 if (dev_priv->stealth) {
808 /**
809 * Request at least the mmio PCI resource.
810 */
811
812 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000813 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000814 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
815 if (unlikely(ret != 0)) {
816 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
817 goto out_no_device;
818 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000819 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000820
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000821 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
Thomas Hellstrome3001732017-08-24 08:06:27 +0200822 ret = vmw_irq_install(dev, dev->pdev->irq);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000823 if (ret != 0) {
824 DRM_ERROR("Failed installing irq: %d\n", ret);
825 goto out_no_irq;
826 }
827 }
828
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000829 dev_priv->fman = vmw_fence_manager_init(dev_priv);
Wei Yongjun14bbf202013-08-26 15:15:37 +0800830 if (unlikely(dev_priv->fman == NULL)) {
831 ret = -ENOMEM;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000832 goto out_no_fman;
Wei Yongjun14bbf202013-08-26 15:15:37 +0800833 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200834
Gerd Hoffmann293f86b2019-09-05 09:05:08 +0200835 drm_vma_offset_manager_init(&dev_priv->vma_manager,
836 DRM_FILE_PAGE_OFFSET_START,
837 DRM_FILE_PAGE_OFFSET_SIZE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700838 ret = ttm_bo_device_init(&dev_priv->bdev,
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700839 &vmw_bo_driver,
840 dev->anon_inode->i_mapping,
Gerd Hoffmann293f86b2019-09-05 09:05:08 +0200841 &dev_priv->vma_manager,
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700842 false);
843 if (unlikely(ret != 0)) {
844 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
845 goto out_no_bdev;
846 }
Thomas Hellstrom34583902015-03-05 02:33:24 -0800847
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700848 /*
849 * Enable VRAM, but initially don't use it until SVGA is enabled and
850 * unhidden.
851 */
Thomas Hellstrom34583902015-03-05 02:33:24 -0800852 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
853 (dev_priv->vram_size >> PAGE_SHIFT));
854 if (unlikely(ret != 0)) {
855 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
856 goto out_no_vram;
857 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700858 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom34583902015-03-05 02:33:24 -0800859
860 dev_priv->has_gmr = true;
861 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
862 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
863 VMW_PL_GMR) != 0) {
864 DRM_INFO("No GMR memory available. "
865 "Graphics memory resources are very limited.\n");
866 dev_priv->has_gmr = false;
867 }
868
869 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
870 dev_priv->has_mob = true;
871 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
872 VMW_PL_MOB) != 0) {
873 DRM_INFO("No MOB memory available. "
874 "3D will be disabled.\n");
875 dev_priv->has_mob = false;
876 }
877 }
878
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700879 if (dev_priv->has_mob) {
880 spin_lock(&dev_priv->cap_lock);
Deepak Rawatdc75e732018-06-13 13:53:28 -0700881 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700882 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
883 spin_unlock(&dev_priv->cap_lock);
884 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200885
Thomas Hellstromfd567462018-12-12 11:52:08 +0100886 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200887 ret = vmw_kms_init(dev_priv);
888 if (unlikely(ret != 0))
889 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000890 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200891
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700892 ret = vmw_request_device(dev_priv);
893 if (ret)
894 goto out_no_fifo;
895
Deepak Rawat30aeee62018-06-20 13:52:32 -0700896 if (dev_priv->has_dx) {
897 /*
898 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
899 * support
900 */
901 if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
902 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
903 SVGA3D_DEVCAP_SM41);
904 dev_priv->has_sm4_1 = vmw_read(dev_priv,
905 SVGA_REG_DEV_CAP);
906 }
907 }
908
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700909 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
Deepak Rawat30aeee62018-06-20 13:52:32 -0700910 DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
911 ? "yes." : "no.");
912 DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700913
Sinclair Yehf9217912016-04-27 19:11:18 -0700914 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
915 VMWGFX_REPO, VMWGFX_GIT_VERSION);
916 vmw_host_log(host_log);
917
918 memset(host_log, 0, sizeof(host_log));
919 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
920 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
921 VMWGFX_DRIVER_PATCHLEVEL);
922 vmw_host_log(host_log);
923
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200924 if (dev_priv->enable_fb) {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700925 vmw_fifo_resource_inc(dev_priv);
926 vmw_svga_enable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200927 vmw_fb_init(dev_priv);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200928 }
929
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100930 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
931 register_pm_notifier(&dev_priv->pm_nb);
932
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000933 return 0;
934
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000935out_no_fifo:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200936 vmw_overlay_close(dev_priv);
937 vmw_kms_close(dev_priv);
938out_no_kms:
Thomas Hellstrom34583902015-03-05 02:33:24 -0800939 if (dev_priv->has_mob)
940 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
941 if (dev_priv->has_gmr)
942 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
943 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
944out_no_vram:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700945 (void)ttm_bo_device_release(&dev_priv->bdev);
946out_no_bdev:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000947 vmw_fence_manager_takedown(dev_priv->fman);
948out_no_fman:
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000949 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
Thomas Hellstrome3001732017-08-24 08:06:27 +0200950 vmw_irq_uninstall(dev_priv->dev);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000951out_no_irq:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200952 if (dev_priv->stealth)
953 pci_release_region(dev->pdev, 2);
954 else
955 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000956out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000957 ttm_object_device_release(&dev_priv->tdev);
958out_err4:
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100959 memunmap(dev_priv->mmio_virt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000960out_err0:
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000961 for (i = vmw_res_context; i < vmw_res_max; ++i)
962 idr_destroy(&dev_priv->res_idr[i]);
963
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700964 if (dev_priv->ctx.staged_bindings)
965 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000966 kfree(dev_priv);
967 return ret;
968}
969
Gabriel Krisman Bertazi11b3c202017-01-06 15:57:31 -0200970static void vmw_driver_unload(struct drm_device *dev)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000971{
972 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000973 enum vmw_res_type i;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000974
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100975 unregister_pm_notifier(&dev_priv->pm_nb);
976
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000977 if (dev_priv->ctx.res_ht_initialized)
978 drm_ht_remove(&dev_priv->ctx.res_ht);
Markus Elfringa3a1a662014-11-19 17:50:19 +0100979 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200980 if (dev_priv->enable_fb) {
Sinclair Yeh05c95012015-08-11 22:53:39 -0700981 vmw_fb_off(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200982 vmw_fb_close(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700983 vmw_fifo_resource_dec(dev_priv);
984 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200985 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700986
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000987 vmw_kms_close(dev_priv);
988 vmw_overlay_close(dev_priv);
Thomas Hellstrom34583902015-03-05 02:33:24 -0800989
Thomas Hellstrom34583902015-03-05 02:33:24 -0800990 if (dev_priv->has_gmr)
991 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
992 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
993
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700994 vmw_release_device_early(dev_priv);
995 if (dev_priv->has_mob)
996 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
997 (void) ttm_bo_device_release(&dev_priv->bdev);
Gerd Hoffmann293f86b2019-09-05 09:05:08 +0200998 drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700999 vmw_release_device_late(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001000 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstrom506ff752012-11-09 12:26:14 +00001001 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
Thomas Hellstrome3001732017-08-24 08:06:27 +02001002 vmw_irq_uninstall(dev_priv->dev);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +00001003 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001004 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +00001005 else
1006 pci_release_regions(dev->pdev);
1007
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001008 ttm_object_device_release(&dev_priv->tdev);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001009 memunmap(dev_priv->mmio_virt);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001010 if (dev_priv->ctx.staged_bindings)
1011 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001012
1013 for (i = vmw_res_context; i < vmw_res_max; ++i)
1014 idr_destroy(&dev_priv->res_idr[i]);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001015
1016 kfree(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001017}
1018
1019static void vmw_postclose(struct drm_device *dev,
1020 struct drm_file *file_priv)
1021{
Thomas Hellstrom9c84aeb2019-05-28 08:08:55 +02001022 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromc4249852013-10-09 01:42:51 -07001023
1024 ttm_object_file_release(&vmw_fp->tfile);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001025 kfree(vmw_fp);
1026}
1027
1028static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1029{
1030 struct vmw_private *dev_priv = vmw_priv(dev);
1031 struct vmw_fpriv *vmw_fp;
1032 int ret = -ENOMEM;
1033
1034 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +05301035 if (unlikely(!vmw_fp))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001036 return ret;
1037
1038 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1039 if (unlikely(vmw_fp->tfile == NULL))
1040 goto out_no_tfile;
1041
1042 file_priv->driver_priv = vmw_fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001043
1044 return 0;
1045
1046out_no_tfile:
1047 kfree(vmw_fp);
1048 return ret;
1049}
1050
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001051static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1052 unsigned long arg,
1053 long (*ioctl_func)(struct file *, unsigned int,
1054 unsigned long))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001055{
1056 struct drm_file *file_priv = filp->private_data;
1057 struct drm_device *dev = file_priv->minor->dev;
1058 unsigned int nr = DRM_IOCTL_NR(cmd);
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001059 unsigned int flags;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001060
1061 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +01001062 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001063 */
1064
1065 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1066 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
Rob Clarkbaa70942013-08-02 13:27:49 -04001067 const struct drm_ioctl_desc *ioctl =
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001068 &vmw_ioctls[nr - DRM_COMMAND_BASE];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001069
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001070 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
Emil Velikovcbfbe472019-05-22 17:41:17 +01001071 return ioctl_func(filp, cmd, arg);
Thomas Hellstrom31788ca2017-02-21 17:42:27 +07001072 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1073 if (!drm_is_current_master(file_priv) &&
1074 !capable(CAP_SYS_ADMIN))
1075 return -EACCES;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001076 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001077
1078 if (unlikely(ioctl->cmd != cmd))
1079 goto out_io_encoding;
1080
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001081 flags = ioctl->flags;
1082 } else if (!drm_ioctl_flags(nr, &flags))
1083 return -EINVAL;
1084
Thomas Hellstrom9c84aeb2019-05-28 08:08:55 +02001085 return ioctl_func(filp, cmd, arg);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001086
1087out_io_encoding:
1088 DRM_ERROR("Invalid command format, ioctl %d\n",
1089 nr - DRM_COMMAND_BASE);
1090
1091 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001092}
1093
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001094static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1095 unsigned long arg)
1096{
1097 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1098}
1099
1100#ifdef CONFIG_COMPAT
1101static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1102 unsigned long arg)
1103{
1104 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1105}
1106#endif
1107
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001108static int vmw_master_set(struct drm_device *dev,
1109 struct drm_file *file_priv,
1110 bool from_open)
1111{
Thomas Hellstrom63cb4442019-05-07 11:07:53 +02001112 /*
1113 * Inform a new master that the layout may have changed while
1114 * it was gone.
1115 */
1116 if (!from_open)
1117 drm_sysfs_hotplug_event(dev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001118
1119 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001120}
1121
1122static void vmw_master_drop(struct drm_device *dev,
Daniel Vetterd6ed6822016-06-21 14:20:38 +02001123 struct drm_file *file_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001124{
1125 struct vmw_private *dev_priv = vmw_priv(dev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001126
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +01001127 vmw_kms_legacy_hotspot_clear(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001128 if (!dev_priv->enable_fb)
1129 vmw_svga_disable(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001130}
1131
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001132/**
1133 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1134 *
1135 * @dev_priv: Pointer to device private struct.
1136 * Needs the reservation sem to be held in non-exclusive mode.
1137 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001138static void __vmw_svga_enable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001139{
1140 spin_lock(&dev_priv->svga_lock);
1141 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1142 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1143 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1144 }
1145 spin_unlock(&dev_priv->svga_lock);
1146}
1147
1148/**
1149 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1150 *
1151 * @dev_priv: Pointer to device private struct.
1152 */
1153void vmw_svga_enable(struct vmw_private *dev_priv)
1154{
Thomas Hellstromf08c86c2017-01-19 10:57:00 -08001155 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001156 __vmw_svga_enable(dev_priv);
1157 ttm_read_unlock(&dev_priv->reservation_sem);
1158}
1159
1160/**
1161 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1162 *
1163 * @dev_priv: Pointer to device private struct.
1164 * Needs the reservation sem to be held in exclusive mode.
1165 * Will not empty VRAM. VRAM must be emptied by caller.
1166 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001167static void __vmw_svga_disable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001168{
1169 spin_lock(&dev_priv->svga_lock);
1170 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1171 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1172 vmw_write(dev_priv, SVGA_REG_ENABLE,
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001173 SVGA_REG_ENABLE_HIDE |
1174 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001175 }
1176 spin_unlock(&dev_priv->svga_lock);
1177}
1178
1179/**
1180 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1181 * running.
1182 *
1183 * @dev_priv: Pointer to device private struct.
1184 * Will empty VRAM.
1185 */
1186void vmw_svga_disable(struct vmw_private *dev_priv)
1187{
Thomas Hellstrom140bcaa2018-03-08 10:07:37 +01001188 /*
1189 * Disabling SVGA will turn off device modesetting capabilities, so
1190 * notify KMS about that so that it doesn't cache atomic state that
1191 * isn't valid anymore, for example crtcs turned on.
1192 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1193 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1194 * end up with lock order reversal. Thus, a master may actually perform
1195 * a new modeset just after we call vmw_kms_lost_device() and race with
1196 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1197 * to be inconsistent with the device, causing modesetting problems.
1198 *
1199 */
1200 vmw_kms_lost_device(dev_priv->dev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001201 ttm_write_lock(&dev_priv->reservation_sem, false);
1202 spin_lock(&dev_priv->svga_lock);
1203 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1204 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001205 spin_unlock(&dev_priv->svga_lock);
1206 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1207 DRM_ERROR("Failed evicting VRAM buffers.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001208 vmw_write(dev_priv, SVGA_REG_ENABLE,
1209 SVGA_REG_ENABLE_HIDE |
1210 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001211 } else
1212 spin_unlock(&dev_priv->svga_lock);
1213 ttm_write_unlock(&dev_priv->reservation_sem);
1214}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001215
1216static void vmw_remove(struct pci_dev *pdev)
1217{
1218 struct drm_device *dev = pci_get_drvdata(pdev);
1219
Thomas Zimmermann36891da2019-12-10 13:43:22 +01001220 drm_dev_unregister(dev);
1221 vmw_driver_unload(dev);
1222 drm_dev_put(dev);
Thomas Hellstromfd3e4d62015-03-10 11:07:40 -07001223 pci_disable_device(pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001224}
1225
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001226static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1227 void *ptr)
1228{
1229 struct vmw_private *dev_priv =
1230 container_of(nb, struct vmw_private, pm_nb);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001231
1232 switch (val) {
1233 case PM_HIBERNATION_PREPARE:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001234 /*
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001235 * Take the reservation sem in write mode, which will make sure
1236 * there are no other processes holding a buffer object
1237 * reservation, meaning we should be able to evict all buffer
1238 * objects if needed.
1239 * Once user-space processes have been frozen, we can release
1240 * the lock again.
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001241 */
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001242 ttm_suspend_lock(&dev_priv->reservation_sem);
1243 dev_priv->suspend_locked = true;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001244 break;
1245 case PM_POST_HIBERNATION:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001246 case PM_POST_RESTORE:
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001247 if (READ_ONCE(dev_priv->suspend_locked)) {
1248 dev_priv->suspend_locked = false;
1249 ttm_suspend_unlock(&dev_priv->reservation_sem);
1250 }
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001251 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001252 default:
1253 break;
1254 }
1255 return 0;
1256}
1257
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001258static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001259{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001260 struct drm_device *dev = pci_get_drvdata(pdev);
1261 struct vmw_private *dev_priv = vmw_priv(dev);
1262
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001263 if (dev_priv->refuse_hibernation)
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001264 return -EBUSY;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001265
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001266 pci_save_state(pdev);
1267 pci_disable_device(pdev);
1268 pci_set_power_state(pdev, PCI_D3hot);
1269 return 0;
1270}
1271
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001272static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001273{
1274 pci_set_power_state(pdev, PCI_D0);
1275 pci_restore_state(pdev);
1276 return pci_enable_device(pdev);
1277}
1278
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001279static int vmw_pm_suspend(struct device *kdev)
1280{
1281 struct pci_dev *pdev = to_pci_dev(kdev);
1282 struct pm_message dummy;
1283
1284 dummy.event = 0;
1285
1286 return vmw_pci_suspend(pdev, dummy);
1287}
1288
1289static int vmw_pm_resume(struct device *kdev)
1290{
1291 struct pci_dev *pdev = to_pci_dev(kdev);
1292
1293 return vmw_pci_resume(pdev);
1294}
1295
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001296static int vmw_pm_freeze(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001297{
1298 struct pci_dev *pdev = to_pci_dev(kdev);
1299 struct drm_device *dev = pci_get_drvdata(pdev);
1300 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001301 int ret;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001302
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001303 /*
1304 * Unlock for vmw_kms_suspend.
1305 * No user-space processes should be running now.
1306 */
1307 ttm_suspend_unlock(&dev_priv->reservation_sem);
1308 ret = vmw_kms_suspend(dev_priv->dev);
1309 if (ret) {
1310 ttm_suspend_lock(&dev_priv->reservation_sem);
1311 DRM_ERROR("Failed to freeze modesetting.\n");
1312 return ret;
1313 }
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001314 if (dev_priv->enable_fb)
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001315 vmw_fb_off(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001316
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001317 ttm_suspend_lock(&dev_priv->reservation_sem);
1318 vmw_execbuf_release_pinned_bo(dev_priv);
1319 vmw_resource_evict_all(dev_priv);
1320 vmw_release_device_early(dev_priv);
1321 ttm_bo_swapout_all(&dev_priv->bdev);
1322 if (dev_priv->enable_fb)
1323 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001324 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1325 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001326 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001327 vmw_fifo_resource_inc(dev_priv);
1328 WARN_ON(vmw_request_device_late(dev_priv));
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001329 dev_priv->suspend_locked = false;
1330 ttm_suspend_unlock(&dev_priv->reservation_sem);
1331 if (dev_priv->suspend_state)
1332 vmw_kms_resume(dev);
1333 if (dev_priv->enable_fb)
1334 vmw_fb_on(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001335 return -EBUSY;
1336 }
1337
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001338 vmw_fence_fifo_down(dev_priv->fman);
1339 __vmw_svga_disable(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001340
1341 vmw_release_device_late(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001342 return 0;
1343}
1344
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001345static int vmw_pm_restore(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001346{
1347 struct pci_dev *pdev = to_pci_dev(kdev);
1348 struct drm_device *dev = pci_get_drvdata(pdev);
1349 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001350 int ret;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001351
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001352 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1353 (void) vmw_read(dev_priv, SVGA_REG_ID);
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001354
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001355 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001356 vmw_fifo_resource_inc(dev_priv);
1357
1358 ret = vmw_request_device(dev_priv);
1359 if (ret)
1360 return ret;
1361
1362 if (dev_priv->enable_fb)
1363 __vmw_svga_enable(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001364
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001365 vmw_fence_fifo_up(dev_priv->fman);
1366 dev_priv->suspend_locked = false;
1367 ttm_suspend_unlock(&dev_priv->reservation_sem);
1368 if (dev_priv->suspend_state)
1369 vmw_kms_resume(dev_priv->dev);
1370
1371 if (dev_priv->enable_fb)
1372 vmw_fb_on(dev_priv);
1373
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001374 return 0;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001375}
1376
1377static const struct dev_pm_ops vmw_pm_ops = {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001378 .freeze = vmw_pm_freeze,
1379 .thaw = vmw_pm_restore,
1380 .restore = vmw_pm_restore,
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001381 .suspend = vmw_pm_suspend,
1382 .resume = vmw_pm_resume,
1383};
1384
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001385static const struct file_operations vmwgfx_driver_fops = {
1386 .owner = THIS_MODULE,
1387 .open = drm_open,
1388 .release = drm_release,
1389 .unlocked_ioctl = vmw_unlocked_ioctl,
1390 .mmap = vmw_mmap,
1391 .poll = vmw_fops_poll,
1392 .read = vmw_fops_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001393#if defined(CONFIG_COMPAT)
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001394 .compat_ioctl = vmw_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001395#endif
1396 .llseek = noop_llseek,
1397};
1398
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001399static struct drm_driver driver = {
Daniel Vetter1ff49482019-01-29 11:42:48 +01001400 .driver_features =
Daniel Vetter0424fda2019-06-17 17:39:24 +02001401 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001402 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001403 .enable_vblank = vmw_enable_vblank,
1404 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001405 .ioctls = vmw_ioctls,
Damien Lespiauf95aeb12014-06-09 14:39:49 +01001406 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001407 .master_set = vmw_master_set,
1408 .master_drop = vmw_master_drop,
1409 .open = vmw_driver_open,
1410 .postclose = vmw_postclose,
Dave Airlie5e1782d2012-08-28 01:53:54 +00001411
1412 .dumb_create = vmw_dumb_create,
1413 .dumb_map_offset = vmw_dumb_map_offset,
1414 .dumb_destroy = vmw_dumb_destroy,
1415
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001416 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1417 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1418
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001419 .fops = &vmwgfx_driver_fops,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001420 .name = VMWGFX_DRIVER_NAME,
1421 .desc = VMWGFX_DRIVER_DESC,
1422 .date = VMWGFX_DRIVER_DATE,
1423 .major = VMWGFX_DRIVER_MAJOR,
1424 .minor = VMWGFX_DRIVER_MINOR,
1425 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1426};
1427
Dave Airlie8410ea32010-12-15 03:16:38 +10001428static struct pci_driver vmw_pci_driver = {
1429 .name = VMWGFX_DRIVER_NAME,
1430 .id_table = vmw_pci_id_list,
1431 .probe = vmw_probe,
1432 .remove = vmw_remove,
1433 .driver = {
1434 .pm = &vmw_pm_ops
1435 }
1436};
1437
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001438static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1439{
Thomas Zimmermann36891da2019-12-10 13:43:22 +01001440 struct drm_device *dev;
1441 int ret;
1442
1443 ret = pci_enable_device(pdev);
1444 if (ret)
1445 return ret;
1446
1447 dev = drm_dev_alloc(&driver, &pdev->dev);
1448 if (IS_ERR(dev)) {
1449 ret = PTR_ERR(dev);
1450 goto err_pci_disable_device;
1451 }
1452
1453 dev->pdev = pdev;
1454 pci_set_drvdata(pdev, dev);
1455
1456 ret = vmw_driver_load(dev, ent->driver_data);
1457 if (ret)
1458 goto err_drm_dev_put;
1459
1460 ret = drm_dev_register(dev, ent->driver_data);
1461 if (ret)
1462 goto err_vmw_driver_unload;
1463
1464 return 0;
1465
1466err_vmw_driver_unload:
1467 vmw_driver_unload(dev);
1468err_drm_dev_put:
1469 drm_dev_put(dev);
1470err_pci_disable_device:
1471 pci_disable_device(pdev);
1472 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001473}
1474
1475static int __init vmwgfx_init(void)
1476{
1477 int ret;
Rob Clark96c5d072014-10-15 15:00:47 -04001478
Rob Clark96c5d072014-10-15 15:00:47 -04001479 if (vgacon_text_force())
1480 return -EINVAL;
Rob Clark96c5d072014-10-15 15:00:47 -04001481
Daniel Vetter10631d72017-05-24 16:51:40 +02001482 ret = pci_register_driver(&vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001483 if (ret)
1484 DRM_ERROR("Failed initializing DRM.\n");
1485 return ret;
1486}
1487
1488static void __exit vmwgfx_exit(void)
1489{
Daniel Vetter10631d72017-05-24 16:51:40 +02001490 pci_unregister_driver(&vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001491}
1492
1493module_init(vmwgfx_init);
1494module_exit(vmwgfx_exit);
1495
1496MODULE_AUTHOR("VMware Inc. and others");
1497MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1498MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001499MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1500 __stringify(VMWGFX_DRIVER_MINOR) "."
1501 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1502 "0");