blob: f076f66b1153c8509611105c6c94743b44d3458e [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31#include "ttm/ttm_placement.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_object.h"
34#include "ttm/ttm_module.h"
35
36#define VMWGFX_DRIVER_NAME "vmwgfx"
37#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38#define VMWGFX_CHIP_SVGAII 0
39#define VMW_FB_RESERVATION 0
40
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010041#define VMW_MIN_INITIAL_WIDTH 800
42#define VMW_MIN_INITIAL_HEIGHT 600
43
44
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000045/**
46 * Fully encoded drm commands. Might move to vmw_drm.h
47 */
48
49#define DRM_IOCTL_VMW_GET_PARAM \
50 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
51 struct drm_vmw_getparam_arg)
52#define DRM_IOCTL_VMW_ALLOC_DMABUF \
53 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
54 union drm_vmw_alloc_dmabuf_arg)
55#define DRM_IOCTL_VMW_UNREF_DMABUF \
56 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
57 struct drm_vmw_unref_dmabuf_arg)
58#define DRM_IOCTL_VMW_CURSOR_BYPASS \
59 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
60 struct drm_vmw_cursor_bypass_arg)
61
62#define DRM_IOCTL_VMW_CONTROL_STREAM \
63 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
64 struct drm_vmw_control_stream_arg)
65#define DRM_IOCTL_VMW_CLAIM_STREAM \
66 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
67 struct drm_vmw_stream_arg)
68#define DRM_IOCTL_VMW_UNREF_STREAM \
69 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
70 struct drm_vmw_stream_arg)
71
72#define DRM_IOCTL_VMW_CREATE_CONTEXT \
73 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
74 struct drm_vmw_context_arg)
75#define DRM_IOCTL_VMW_UNREF_CONTEXT \
76 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
77 struct drm_vmw_context_arg)
78#define DRM_IOCTL_VMW_CREATE_SURFACE \
79 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
80 union drm_vmw_surface_create_arg)
81#define DRM_IOCTL_VMW_UNREF_SURFACE \
82 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
83 struct drm_vmw_surface_arg)
84#define DRM_IOCTL_VMW_REF_SURFACE \
85 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
86 union drm_vmw_surface_reference_arg)
87#define DRM_IOCTL_VMW_EXECBUF \
88 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
89 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000090#define DRM_IOCTL_VMW_GET_3D_CAP \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
92 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000093#define DRM_IOCTL_VMW_FENCE_WAIT \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
95 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000096#define DRM_IOCTL_VMW_FENCE_SIGNALED \
97 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
98 struct drm_vmw_fence_signaled_arg)
99#define DRM_IOCTL_VMW_FENCE_UNREF \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
101 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200102#define DRM_IOCTL_VMW_FENCE_EVENT \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
104 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200105#define DRM_IOCTL_VMW_PRESENT \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
107 struct drm_vmw_present_arg)
108#define DRM_IOCTL_VMW_PRESENT_READBACK \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
110 struct drm_vmw_present_readback_arg)
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200111#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
113 struct drm_vmw_update_layout_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000114
115/**
116 * The core DRM version of this macro doesn't account for
117 * DRM_COMMAND_BASE.
118 */
119
120#define VMW_IOCTL_DEF(ioctl, func, flags) \
Dave Airlie1b2f1482010-08-14 20:20:34 +1000121 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000122
123/**
124 * Ioctl definitions.
125 */
126
127static struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100129 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100131 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000132 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100133 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000134 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100135 vmw_kms_cursor_bypass_ioctl,
136 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000137
Dave Airlie1b2f1482010-08-14 20:20:34 +1000138 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100139 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000140 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100141 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000142 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000144
Dave Airlie1b2f1482010-08-14 20:20:34 +1000145 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100146 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000147 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100148 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000149 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100150 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000151 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100152 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000153 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100154 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000155 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100156 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000157 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
158 DRM_AUTH | DRM_UNLOCKED),
159 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
160 vmw_fence_obj_signaled_ioctl,
161 DRM_AUTH | DRM_UNLOCKED),
162 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +0200163 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200164 VMW_IOCTL_DEF(VMW_FENCE_EVENT,
165 vmw_fence_event_ioctl,
166 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000167 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
168 DRM_AUTH | DRM_UNLOCKED),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200169
170 /* these allow direct access to the framebuffers mark as master only */
171 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
172 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
173 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
174 vmw_present_readback_ioctl,
175 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200176 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
177 vmw_kms_update_layout_ioctl,
178 DRM_MASTER | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179};
180
181static struct pci_device_id vmw_pci_id_list[] = {
182 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
183 {0, 0, 0}
184};
185
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200186static int enable_fbdev;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000187
188static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
189static void vmw_master_init(struct vmw_master *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100190static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
191 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000192
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200193MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
194module_param_named(enable_fbdev, enable_fbdev, int, 0600);
195
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000196static void vmw_print_capabilities(uint32_t capabilities)
197{
198 DRM_INFO("Capabilities:\n");
199 if (capabilities & SVGA_CAP_RECT_COPY)
200 DRM_INFO(" Rect copy.\n");
201 if (capabilities & SVGA_CAP_CURSOR)
202 DRM_INFO(" Cursor.\n");
203 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
204 DRM_INFO(" Cursor bypass.\n");
205 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
206 DRM_INFO(" Cursor bypass 2.\n");
207 if (capabilities & SVGA_CAP_8BIT_EMULATION)
208 DRM_INFO(" 8bit emulation.\n");
209 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
210 DRM_INFO(" Alpha cursor.\n");
211 if (capabilities & SVGA_CAP_3D)
212 DRM_INFO(" 3D.\n");
213 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
214 DRM_INFO(" Extended Fifo.\n");
215 if (capabilities & SVGA_CAP_MULTIMON)
216 DRM_INFO(" Multimon.\n");
217 if (capabilities & SVGA_CAP_PITCHLOCK)
218 DRM_INFO(" Pitchlock.\n");
219 if (capabilities & SVGA_CAP_IRQMASK)
220 DRM_INFO(" Irq mask.\n");
221 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
222 DRM_INFO(" Display Topology.\n");
223 if (capabilities & SVGA_CAP_GMR)
224 DRM_INFO(" GMR.\n");
225 if (capabilities & SVGA_CAP_TRACES)
226 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000227 if (capabilities & SVGA_CAP_GMR2)
228 DRM_INFO(" GMR2.\n");
229 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
230 DRM_INFO(" Screen Object 2.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000231}
232
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200233
234/**
235 * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
236 * the start of a buffer object.
237 *
238 * @dev_priv: The device private structure.
239 *
240 * This function will idle the buffer using an uninterruptible wait, then
241 * map the first page and initialize a pending occlusion query result structure,
242 * Finally it will unmap the buffer.
243 *
244 * TODO: Since we're only mapping a single page, we should optimize the map
245 * to use kmap_atomic / iomap_atomic.
246 */
247static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
248{
249 struct ttm_bo_kmap_obj map;
250 volatile SVGA3dQueryResult *result;
251 bool dummy;
252 int ret;
253 struct ttm_bo_device *bdev = &dev_priv->bdev;
254 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
255
256 ttm_bo_reserve(bo, false, false, false, 0);
257 spin_lock(&bdev->fence_lock);
Dave Airlie1717c0e2011-10-27 18:28:37 +0200258 ret = ttm_bo_wait(bo, false, false, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200259 spin_unlock(&bdev->fence_lock);
260 if (unlikely(ret != 0))
261 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
262 10*HZ);
263
264 ret = ttm_bo_kmap(bo, 0, 1, &map);
265 if (likely(ret == 0)) {
266 result = ttm_kmap_obj_virtual(&map, &dummy);
267 result->totalSize = sizeof(*result);
268 result->state = SVGA3D_QUERYSTATE_PENDING;
269 result->result32 = 0xff;
270 ttm_bo_kunmap(&map);
271 } else
272 DRM_ERROR("Dummy query buffer map failed.\n");
273 ttm_bo_unreserve(bo);
274}
275
276
277/**
278 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
279 *
280 * @dev_priv: A device private structure.
281 *
282 * This function creates a small buffer object that holds the query
283 * result for dummy queries emitted as query barriers.
284 * No interruptible waits are done within this function.
285 *
286 * Returns an error if bo creation fails.
287 */
288static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
289{
290 return ttm_bo_create(&dev_priv->bdev,
291 PAGE_SIZE,
292 ttm_bo_type_device,
293 &vmw_vram_sys_placement,
294 0, 0, false, NULL,
295 &dev_priv->dummy_query_bo);
296}
297
298
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000299static int vmw_request_device(struct vmw_private *dev_priv)
300{
301 int ret;
302
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000303 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
304 if (unlikely(ret != 0)) {
305 DRM_ERROR("Unable to initialize FIFO.\n");
306 return ret;
307 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000308 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200309 ret = vmw_dummy_query_bo_create(dev_priv);
310 if (unlikely(ret != 0))
311 goto out_no_query_bo;
312 vmw_dummy_query_bo_prepare(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000313
314 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200315
316out_no_query_bo:
317 vmw_fence_fifo_down(dev_priv->fman);
318 vmw_fifo_release(dev_priv, &dev_priv->fifo);
319 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000320}
321
322static void vmw_release_device(struct vmw_private *dev_priv)
323{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200324 /*
325 * Previous destructions should've released
326 * the pinned bo.
327 */
328
329 BUG_ON(dev_priv->pinned_bo != NULL);
330
331 ttm_bo_unref(&dev_priv->dummy_query_bo);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000332 vmw_fence_fifo_down(dev_priv->fman);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000333 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000334}
335
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000336/**
337 * Increase the 3d resource refcount.
338 * If the count was prevously zero, initialize the fifo, switching to svga
339 * mode. Note that the master holds a ref as well, and may request an
340 * explicit switch to svga mode if fb is not running, using @unhide_svga.
341 */
342int vmw_3d_resource_inc(struct vmw_private *dev_priv,
343 bool unhide_svga)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200344{
345 int ret = 0;
346
347 mutex_lock(&dev_priv->release_mutex);
348 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
349 ret = vmw_request_device(dev_priv);
350 if (unlikely(ret != 0))
351 --dev_priv->num_3d_resources;
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000352 } else if (unhide_svga) {
353 mutex_lock(&dev_priv->hw_mutex);
354 vmw_write(dev_priv, SVGA_REG_ENABLE,
355 vmw_read(dev_priv, SVGA_REG_ENABLE) &
356 ~SVGA_REG_ENABLE_HIDE);
357 mutex_unlock(&dev_priv->hw_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200358 }
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000359
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200360 mutex_unlock(&dev_priv->release_mutex);
361 return ret;
362}
363
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000364/**
365 * Decrease the 3d resource refcount.
366 * If the count reaches zero, disable the fifo, switching to vga mode.
367 * Note that the master holds a refcount as well, and may request an
368 * explicit switch to vga mode when it releases its refcount to account
369 * for the situation of an X server vt switch to VGA with 3d resources
370 * active.
371 */
372void vmw_3d_resource_dec(struct vmw_private *dev_priv,
373 bool hide_svga)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200374{
375 int32_t n3d;
376
377 mutex_lock(&dev_priv->release_mutex);
378 if (unlikely(--dev_priv->num_3d_resources == 0))
379 vmw_release_device(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000380 else if (hide_svga) {
381 mutex_lock(&dev_priv->hw_mutex);
382 vmw_write(dev_priv, SVGA_REG_ENABLE,
383 vmw_read(dev_priv, SVGA_REG_ENABLE) |
384 SVGA_REG_ENABLE_HIDE);
385 mutex_unlock(&dev_priv->hw_mutex);
386 }
387
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200388 n3d = (int32_t) dev_priv->num_3d_resources;
389 mutex_unlock(&dev_priv->release_mutex);
390
391 BUG_ON(n3d < 0);
392}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000393
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100394/**
395 * Sets the initial_[width|height] fields on the given vmw_private.
396 *
397 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100398 * clamping the value to fb_max_[width|height] fields and the
399 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
400 * If the values appear to be invalid, set them to
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100401 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
402 */
403static void vmw_get_initial_size(struct vmw_private *dev_priv)
404{
405 uint32_t width;
406 uint32_t height;
407
408 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
409 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
410
411 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100412 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100413
414 if (width > dev_priv->fb_max_width ||
415 height > dev_priv->fb_max_height) {
416
417 /*
418 * This is a host error and shouldn't occur.
419 */
420
421 width = VMW_MIN_INITIAL_WIDTH;
422 height = VMW_MIN_INITIAL_HEIGHT;
423 }
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100424
425 dev_priv->initial_width = width;
426 dev_priv->initial_height = height;
427}
428
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000429static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
430{
431 struct vmw_private *dev_priv;
432 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000433 uint32_t svga_id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000434
435 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
436 if (unlikely(dev_priv == NULL)) {
437 DRM_ERROR("Failed allocating a device private struct.\n");
438 return -ENOMEM;
439 }
440 memset(dev_priv, 0, sizeof(*dev_priv));
441
442 dev_priv->dev = dev;
443 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000444 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000445 mutex_init(&dev_priv->hw_mutex);
446 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200447 mutex_init(&dev_priv->release_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000448 rwlock_init(&dev_priv->resource_lock);
449 idr_init(&dev_priv->context_idr);
450 idr_init(&dev_priv->surface_idr);
451 idr_init(&dev_priv->stream_idr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000452 mutex_init(&dev_priv->init_mutex);
453 init_waitqueue_head(&dev_priv->fence_queue);
454 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000455 dev_priv->fence_queue_waiters = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000456 atomic_set(&dev_priv->fifo_queue_waiters, 0);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200457 INIT_LIST_HEAD(&dev_priv->surface_lru);
458 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000459
460 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
461 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
462 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
463
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200464 dev_priv->enable_fb = enable_fbdev;
465
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000466 mutex_lock(&dev_priv->hw_mutex);
Peter Hanzelc1886602010-01-30 03:38:07 +0000467
468 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
469 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
470 if (svga_id != SVGA_ID_2) {
471 ret = -ENOSYS;
472 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
473 mutex_unlock(&dev_priv->hw_mutex);
474 goto out_err0;
475 }
476
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000477 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
478
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200479 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
480 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
481 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
482 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100483
484 vmw_get_initial_size(dev_priv);
485
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000486 if (dev_priv->capabilities & SVGA_CAP_GMR) {
487 dev_priv->max_gmr_descriptors =
488 vmw_read(dev_priv,
489 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
490 dev_priv->max_gmr_ids =
491 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
492 }
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000493 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
494 dev_priv->max_gmr_pages =
495 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
496 dev_priv->memory_size =
497 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200498 dev_priv->memory_size -= dev_priv->vram_size;
499 } else {
500 /*
501 * An arbitrary limit of 512MiB on surface
502 * memory. But all HWV8 hardware supports GMR2.
503 */
504 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000505 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000506
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000507 mutex_unlock(&dev_priv->hw_mutex);
508
509 vmw_print_capabilities(dev_priv->capabilities);
510
511 if (dev_priv->capabilities & SVGA_CAP_GMR) {
512 DRM_INFO("Max GMR ids is %u\n",
513 (unsigned)dev_priv->max_gmr_ids);
514 DRM_INFO("Max GMR descriptors is %u\n",
515 (unsigned)dev_priv->max_gmr_descriptors);
516 }
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000517 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
518 DRM_INFO("Max number of GMR pages is %u\n",
519 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200520 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
521 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000522 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000523 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
524 dev_priv->vram_start, dev_priv->vram_size / 1024);
525 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
526 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
527
528 ret = vmw_ttm_global_init(dev_priv);
529 if (unlikely(ret != 0))
530 goto out_err0;
531
532
533 vmw_master_init(&dev_priv->fbdev_master);
534 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
535 dev_priv->active_master = &dev_priv->fbdev_master;
536
Dave Airliea2c06ee2011-02-23 14:24:01 +1000537
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000538 ret = ttm_bo_device_init(&dev_priv->bdev,
539 dev_priv->bo_global_ref.ref.object,
540 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
541 false);
542 if (unlikely(ret != 0)) {
543 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
544 goto out_err1;
545 }
546
547 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
548 (dev_priv->vram_size >> PAGE_SHIFT));
549 if (unlikely(ret != 0)) {
550 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
551 goto out_err2;
552 }
553
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200554 dev_priv->has_gmr = true;
555 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
556 dev_priv->max_gmr_ids) != 0) {
557 DRM_INFO("No GMR memory available. "
558 "Graphics memory resources are very limited.\n");
559 dev_priv->has_gmr = false;
560 }
561
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000562 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
563 dev_priv->mmio_size, DRM_MTRR_WC);
564
565 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
566 dev_priv->mmio_size);
567
568 if (unlikely(dev_priv->mmio_virt == NULL)) {
569 ret = -ENOMEM;
570 DRM_ERROR("Failed mapping MMIO.\n");
571 goto out_err3;
572 }
573
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200574 /* Need mmio memory to check for fifo pitchlock cap. */
575 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
576 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
577 !vmw_fifo_have_pitchlock(dev_priv)) {
578 ret = -ENOSYS;
579 DRM_ERROR("Hardware has no pitchlock\n");
580 goto out_err4;
581 }
582
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000583 dev_priv->tdev = ttm_object_device_init
584 (dev_priv->mem_global_ref.object, 12);
585
586 if (unlikely(dev_priv->tdev == NULL)) {
587 DRM_ERROR("Unable to initialize TTM object management.\n");
588 ret = -ENOMEM;
589 goto out_err4;
590 }
591
592 dev->dev_private = dev_priv;
593
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000594 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
595 dev_priv->stealth = (ret != 0);
596 if (dev_priv->stealth) {
597 /**
598 * Request at least the mmio PCI resource.
599 */
600
601 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000602 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000603 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
604 if (unlikely(ret != 0)) {
605 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
606 goto out_no_device;
607 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000608 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000609
610 dev_priv->fman = vmw_fence_manager_init(dev_priv);
611 if (unlikely(dev_priv->fman == NULL))
612 goto out_no_fman;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200613
614 /* Need to start the fifo to check if we can do screen objects */
615 ret = vmw_3d_resource_inc(dev_priv, true);
616 if (unlikely(ret != 0))
617 goto out_no_fifo;
618 vmw_kms_save_vga(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200619
620 /* Start kms and overlay systems, needs fifo. */
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200621 ret = vmw_kms_init(dev_priv);
622 if (unlikely(ret != 0))
623 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000624 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200625
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200626 /* 3D Depends on Screen Objects being used. */
Thomas Hellstrom6ea77d12011-10-04 20:13:36 +0200627 DRM_INFO("Detected %sdevice 3D availability.\n",
628 vmw_fifo_have_3d(dev_priv) ?
629 "" : "no ");
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200630
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200631 /* We might be done with the fifo now */
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200632 if (dev_priv->enable_fb) {
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200633 vmw_fb_init(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200634 } else {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200635 vmw_kms_restore_vga(dev_priv);
636 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200637 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000638
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200639 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
640 ret = drm_irq_install(dev);
641 if (unlikely(ret != 0)) {
642 DRM_ERROR("Failed installing irq: %d\n", ret);
643 goto out_no_irq;
644 }
645 }
646
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100647 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
648 register_pm_notifier(&dev_priv->pm_nb);
649
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000650 return 0;
651
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200652out_no_irq:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200653 if (dev_priv->enable_fb)
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200654 vmw_fb_close(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200655 vmw_overlay_close(dev_priv);
656 vmw_kms_close(dev_priv);
657out_no_kms:
658 /* We still have a 3D resource reference held */
659 if (dev_priv->enable_fb) {
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200660 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000661 vmw_3d_resource_dec(dev_priv, false);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200662 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200663out_no_fifo:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000664 vmw_fence_manager_takedown(dev_priv->fman);
665out_no_fman:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200666 if (dev_priv->stealth)
667 pci_release_region(dev->pdev, 2);
668 else
669 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000670out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000671 ttm_object_device_release(&dev_priv->tdev);
672out_err4:
673 iounmap(dev_priv->mmio_virt);
674out_err3:
675 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
676 dev_priv->mmio_size, DRM_MTRR_WC);
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200677 if (dev_priv->has_gmr)
678 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000679 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
680out_err2:
681 (void)ttm_bo_device_release(&dev_priv->bdev);
682out_err1:
683 vmw_ttm_global_release(dev_priv);
684out_err0:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000685 idr_destroy(&dev_priv->surface_idr);
686 idr_destroy(&dev_priv->context_idr);
687 idr_destroy(&dev_priv->stream_idr);
688 kfree(dev_priv);
689 return ret;
690}
691
692static int vmw_driver_unload(struct drm_device *dev)
693{
694 struct vmw_private *dev_priv = vmw_priv(dev);
695
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100696 unregister_pm_notifier(&dev_priv->pm_nb);
697
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000698 if (dev_priv->ctx.cmd_bounce)
699 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200700 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
701 drm_irq_uninstall(dev_priv->dev);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200702 if (dev_priv->enable_fb) {
703 vmw_fb_close(dev_priv);
704 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000705 vmw_3d_resource_dec(dev_priv, false);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200706 }
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000707 vmw_kms_close(dev_priv);
708 vmw_overlay_close(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000709 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000710 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000711 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000712 else
713 pci_release_regions(dev->pdev);
714
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000715 ttm_object_device_release(&dev_priv->tdev);
716 iounmap(dev_priv->mmio_virt);
717 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
718 dev_priv->mmio_size, DRM_MTRR_WC);
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200719 if (dev_priv->has_gmr)
720 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000721 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
722 (void)ttm_bo_device_release(&dev_priv->bdev);
723 vmw_ttm_global_release(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000724 idr_destroy(&dev_priv->surface_idr);
725 idr_destroy(&dev_priv->context_idr);
726 idr_destroy(&dev_priv->stream_idr);
727
728 kfree(dev_priv);
729
730 return 0;
731}
732
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +0100733static void vmw_preclose(struct drm_device *dev,
734 struct drm_file *file_priv)
735{
736 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
737 struct vmw_private *dev_priv = vmw_priv(dev);
738
739 vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
740}
741
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000742static void vmw_postclose(struct drm_device *dev,
743 struct drm_file *file_priv)
744{
745 struct vmw_fpriv *vmw_fp;
746
747 vmw_fp = vmw_fpriv(file_priv);
748 ttm_object_file_release(&vmw_fp->tfile);
749 if (vmw_fp->locked_master)
750 drm_master_put(&vmw_fp->locked_master);
751 kfree(vmw_fp);
752}
753
754static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
755{
756 struct vmw_private *dev_priv = vmw_priv(dev);
757 struct vmw_fpriv *vmw_fp;
758 int ret = -ENOMEM;
759
760 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
761 if (unlikely(vmw_fp == NULL))
762 return ret;
763
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +0100764 INIT_LIST_HEAD(&vmw_fp->fence_events);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000765 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
766 if (unlikely(vmw_fp->tfile == NULL))
767 goto out_no_tfile;
768
769 file_priv->driver_priv = vmw_fp;
770
771 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
772 dev_priv->bdev.dev_mapping =
773 file_priv->filp->f_path.dentry->d_inode->i_mapping;
774
775 return 0;
776
777out_no_tfile:
778 kfree(vmw_fp);
779 return ret;
780}
781
782static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
783 unsigned long arg)
784{
785 struct drm_file *file_priv = filp->private_data;
786 struct drm_device *dev = file_priv->minor->dev;
787 unsigned int nr = DRM_IOCTL_NR(cmd);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000788
789 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100790 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000791 */
792
793 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
794 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
795 struct drm_ioctl_desc *ioctl =
796 &vmw_ioctls[nr - DRM_COMMAND_BASE];
797
Thomas Hellstrom2854eed2010-09-30 12:18:33 +0200798 if (unlikely(ioctl->cmd_drv != cmd)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000799 DRM_ERROR("Invalid command format, ioctl %d\n",
800 nr - DRM_COMMAND_BASE);
801 return -EINVAL;
802 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000803 }
804
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100805 return drm_ioctl(filp, cmd, arg);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000806}
807
808static int vmw_firstopen(struct drm_device *dev)
809{
810 struct vmw_private *dev_priv = vmw_priv(dev);
811 dev_priv->is_opened = true;
812
813 return 0;
814}
815
816static void vmw_lastclose(struct drm_device *dev)
817{
818 struct vmw_private *dev_priv = vmw_priv(dev);
819 struct drm_crtc *crtc;
820 struct drm_mode_set set;
821 int ret;
822
823 /**
824 * Do nothing on the lastclose call from drm_unload.
825 */
826
827 if (!dev_priv->is_opened)
828 return;
829
830 dev_priv->is_opened = false;
831 set.x = 0;
832 set.y = 0;
833 set.fb = NULL;
834 set.mode = NULL;
835 set.connectors = NULL;
836 set.num_connectors = 0;
837
838 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
839 set.crtc = crtc;
840 ret = crtc->funcs->set_config(&set);
841 WARN_ON(ret != 0);
842 }
843
844}
845
846static void vmw_master_init(struct vmw_master *vmaster)
847{
848 ttm_lock_init(&vmaster->lock);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200849 INIT_LIST_HEAD(&vmaster->fb_surf);
850 mutex_init(&vmaster->fb_surf_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000851}
852
853static int vmw_master_create(struct drm_device *dev,
854 struct drm_master *master)
855{
856 struct vmw_master *vmaster;
857
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000858 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
859 if (unlikely(vmaster == NULL))
860 return -ENOMEM;
861
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200862 vmw_master_init(vmaster);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000863 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
864 master->driver_priv = vmaster;
865
866 return 0;
867}
868
869static void vmw_master_destroy(struct drm_device *dev,
870 struct drm_master *master)
871{
872 struct vmw_master *vmaster = vmw_master(master);
873
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000874 master->driver_priv = NULL;
875 kfree(vmaster);
876}
877
878
879static int vmw_master_set(struct drm_device *dev,
880 struct drm_file *file_priv,
881 bool from_open)
882{
883 struct vmw_private *dev_priv = vmw_priv(dev);
884 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
885 struct vmw_master *active = dev_priv->active_master;
886 struct vmw_master *vmaster = vmw_master(file_priv->master);
887 int ret = 0;
888
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200889 if (!dev_priv->enable_fb) {
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000890 ret = vmw_3d_resource_inc(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200891 if (unlikely(ret != 0))
892 return ret;
893 vmw_kms_save_vga(dev_priv);
894 mutex_lock(&dev_priv->hw_mutex);
895 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
896 mutex_unlock(&dev_priv->hw_mutex);
897 }
898
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000899 if (active) {
900 BUG_ON(active != &dev_priv->fbdev_master);
901 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
902 if (unlikely(ret != 0))
903 goto out_no_active_lock;
904
905 ttm_lock_set_kill(&active->lock, true, SIGTERM);
906 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
907 if (unlikely(ret != 0)) {
908 DRM_ERROR("Unable to clean VRAM on "
909 "master drop.\n");
910 }
911
912 dev_priv->active_master = NULL;
913 }
914
915 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
916 if (!from_open) {
917 ttm_vt_unlock(&vmaster->lock);
918 BUG_ON(vmw_fp->locked_master != file_priv->master);
919 drm_master_put(&vmw_fp->locked_master);
920 }
921
922 dev_priv->active_master = vmaster;
923
924 return 0;
925
926out_no_active_lock:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200927 if (!dev_priv->enable_fb) {
928 mutex_lock(&dev_priv->hw_mutex);
929 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
930 mutex_unlock(&dev_priv->hw_mutex);
931 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000932 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200933 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000934 return ret;
935}
936
937static void vmw_master_drop(struct drm_device *dev,
938 struct drm_file *file_priv,
939 bool from_release)
940{
941 struct vmw_private *dev_priv = vmw_priv(dev);
942 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
943 struct vmw_master *vmaster = vmw_master(file_priv->master);
944 int ret;
945
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000946 /**
947 * Make sure the master doesn't disappear while we have
948 * it locked.
949 */
950
951 vmw_fp->locked_master = drm_master_get(file_priv->master);
952 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200953 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
954
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000955 if (unlikely((ret != 0))) {
956 DRM_ERROR("Unable to lock TTM at VT switch.\n");
957 drm_master_put(&vmw_fp->locked_master);
958 }
959
960 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
961
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200962 if (!dev_priv->enable_fb) {
963 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
964 if (unlikely(ret != 0))
965 DRM_ERROR("Unable to clean VRAM on master drop.\n");
966 mutex_lock(&dev_priv->hw_mutex);
967 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
968 mutex_unlock(&dev_priv->hw_mutex);
969 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000970 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200971 }
972
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000973 dev_priv->active_master = &dev_priv->fbdev_master;
974 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
975 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
976
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200977 if (dev_priv->enable_fb)
978 vmw_fb_on(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000979}
980
981
982static void vmw_remove(struct pci_dev *pdev)
983{
984 struct drm_device *dev = pci_get_drvdata(pdev);
985
986 drm_put_dev(dev);
987}
988
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100989static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
990 void *ptr)
991{
992 struct vmw_private *dev_priv =
993 container_of(nb, struct vmw_private, pm_nb);
994 struct vmw_master *vmaster = dev_priv->active_master;
995
996 switch (val) {
997 case PM_HIBERNATION_PREPARE:
998 case PM_SUSPEND_PREPARE:
999 ttm_suspend_lock(&vmaster->lock);
1000
1001 /**
1002 * This empties VRAM and unbinds all GMR bindings.
1003 * Buffer contents is moved to swappable memory.
1004 */
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001005 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001006 ttm_bo_swapout_all(&dev_priv->bdev);
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001007
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001008 break;
1009 case PM_POST_HIBERNATION:
1010 case PM_POST_SUSPEND:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001011 case PM_POST_RESTORE:
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001012 ttm_suspend_unlock(&vmaster->lock);
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001013
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001014 break;
1015 case PM_RESTORE_PREPARE:
1016 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001017 default:
1018 break;
1019 }
1020 return 0;
1021}
1022
1023/**
1024 * These might not be needed with the virtual SVGA device.
1025 */
1026
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001027static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001028{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001029 struct drm_device *dev = pci_get_drvdata(pdev);
1030 struct vmw_private *dev_priv = vmw_priv(dev);
1031
1032 if (dev_priv->num_3d_resources != 0) {
1033 DRM_INFO("Can't suspend or hibernate "
1034 "while 3D resources are active.\n");
1035 return -EBUSY;
1036 }
1037
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001038 pci_save_state(pdev);
1039 pci_disable_device(pdev);
1040 pci_set_power_state(pdev, PCI_D3hot);
1041 return 0;
1042}
1043
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001044static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001045{
1046 pci_set_power_state(pdev, PCI_D0);
1047 pci_restore_state(pdev);
1048 return pci_enable_device(pdev);
1049}
1050
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001051static int vmw_pm_suspend(struct device *kdev)
1052{
1053 struct pci_dev *pdev = to_pci_dev(kdev);
1054 struct pm_message dummy;
1055
1056 dummy.event = 0;
1057
1058 return vmw_pci_suspend(pdev, dummy);
1059}
1060
1061static int vmw_pm_resume(struct device *kdev)
1062{
1063 struct pci_dev *pdev = to_pci_dev(kdev);
1064
1065 return vmw_pci_resume(pdev);
1066}
1067
1068static int vmw_pm_prepare(struct device *kdev)
1069{
1070 struct pci_dev *pdev = to_pci_dev(kdev);
1071 struct drm_device *dev = pci_get_drvdata(pdev);
1072 struct vmw_private *dev_priv = vmw_priv(dev);
1073
1074 /**
1075 * Release 3d reference held by fbdev and potentially
1076 * stop fifo.
1077 */
1078 dev_priv->suspended = true;
1079 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001080 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001081
1082 if (dev_priv->num_3d_resources != 0) {
1083
1084 DRM_INFO("Can't suspend or hibernate "
1085 "while 3D resources are active.\n");
1086
1087 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001088 vmw_3d_resource_inc(dev_priv, true);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001089 dev_priv->suspended = false;
1090 return -EBUSY;
1091 }
1092
1093 return 0;
1094}
1095
1096static void vmw_pm_complete(struct device *kdev)
1097{
1098 struct pci_dev *pdev = to_pci_dev(kdev);
1099 struct drm_device *dev = pci_get_drvdata(pdev);
1100 struct vmw_private *dev_priv = vmw_priv(dev);
1101
1102 /**
1103 * Reclaim 3d reference held by fbdev and potentially
1104 * start fifo.
1105 */
1106 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001107 vmw_3d_resource_inc(dev_priv, false);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001108
1109 dev_priv->suspended = false;
1110}
1111
1112static const struct dev_pm_ops vmw_pm_ops = {
1113 .prepare = vmw_pm_prepare,
1114 .complete = vmw_pm_complete,
1115 .suspend = vmw_pm_suspend,
1116 .resume = vmw_pm_resume,
1117};
1118
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001119static const struct file_operations vmwgfx_driver_fops = {
1120 .owner = THIS_MODULE,
1121 .open = drm_open,
1122 .release = drm_release,
1123 .unlocked_ioctl = vmw_unlocked_ioctl,
1124 .mmap = vmw_mmap,
1125 .poll = vmw_fops_poll,
1126 .read = vmw_fops_read,
1127 .fasync = drm_fasync,
1128#if defined(CONFIG_COMPAT)
1129 .compat_ioctl = drm_compat_ioctl,
1130#endif
1131 .llseek = noop_llseek,
1132};
1133
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001134static struct drm_driver driver = {
1135 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1136 DRIVER_MODESET,
1137 .load = vmw_driver_load,
1138 .unload = vmw_driver_unload,
1139 .firstopen = vmw_firstopen,
1140 .lastclose = vmw_lastclose,
1141 .irq_preinstall = vmw_irq_preinstall,
1142 .irq_postinstall = vmw_irq_postinstall,
1143 .irq_uninstall = vmw_irq_uninstall,
1144 .irq_handler = vmw_irq_handler,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001145 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001146 .enable_vblank = vmw_enable_vblank,
1147 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001148 .reclaim_buffers_locked = NULL,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001149 .ioctls = vmw_ioctls,
1150 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1151 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
1152 .master_create = vmw_master_create,
1153 .master_destroy = vmw_master_destroy,
1154 .master_set = vmw_master_set,
1155 .master_drop = vmw_master_drop,
1156 .open = vmw_driver_open,
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +01001157 .preclose = vmw_preclose,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001158 .postclose = vmw_postclose,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001159 .fops = &vmwgfx_driver_fops,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001160 .name = VMWGFX_DRIVER_NAME,
1161 .desc = VMWGFX_DRIVER_DESC,
1162 .date = VMWGFX_DRIVER_DATE,
1163 .major = VMWGFX_DRIVER_MAJOR,
1164 .minor = VMWGFX_DRIVER_MINOR,
1165 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1166};
1167
Dave Airlie8410ea32010-12-15 03:16:38 +10001168static struct pci_driver vmw_pci_driver = {
1169 .name = VMWGFX_DRIVER_NAME,
1170 .id_table = vmw_pci_id_list,
1171 .probe = vmw_probe,
1172 .remove = vmw_remove,
1173 .driver = {
1174 .pm = &vmw_pm_ops
1175 }
1176};
1177
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001178static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1179{
Jordan Crousedcdb1672010-05-27 13:40:25 -06001180 return drm_get_pci_dev(pdev, ent, &driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001181}
1182
1183static int __init vmwgfx_init(void)
1184{
1185 int ret;
Dave Airlie8410ea32010-12-15 03:16:38 +10001186 ret = drm_pci_init(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001187 if (ret)
1188 DRM_ERROR("Failed initializing DRM.\n");
1189 return ret;
1190}
1191
1192static void __exit vmwgfx_exit(void)
1193{
Dave Airlie8410ea32010-12-15 03:16:38 +10001194 drm_pci_exit(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001195}
1196
1197module_init(vmwgfx_init);
1198module_exit(vmwgfx_exit);
1199
1200MODULE_AUTHOR("VMware Inc. and others");
1201MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1202MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001203MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1204 __stringify(VMWGFX_DRIVER_MINOR) "."
1205 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1206 "0");