blob: 82f5c8b128440dd7526c405f7948fa80dc94a871 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30#include "ttm/ttm_placement.h"
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_object.h"
33#include "ttm/ttm_module.h"
34
35#define VMWGFX_DRIVER_NAME "vmwgfx"
36#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
37#define VMWGFX_CHIP_SVGAII 0
38#define VMW_FB_RESERVATION 0
39
40/**
41 * Fully encoded drm commands. Might move to vmw_drm.h
42 */
43
44#define DRM_IOCTL_VMW_GET_PARAM \
45 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
46 struct drm_vmw_getparam_arg)
47#define DRM_IOCTL_VMW_ALLOC_DMABUF \
48 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
49 union drm_vmw_alloc_dmabuf_arg)
50#define DRM_IOCTL_VMW_UNREF_DMABUF \
51 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
52 struct drm_vmw_unref_dmabuf_arg)
53#define DRM_IOCTL_VMW_CURSOR_BYPASS \
54 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
55 struct drm_vmw_cursor_bypass_arg)
56
57#define DRM_IOCTL_VMW_CONTROL_STREAM \
58 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
59 struct drm_vmw_control_stream_arg)
60#define DRM_IOCTL_VMW_CLAIM_STREAM \
61 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
62 struct drm_vmw_stream_arg)
63#define DRM_IOCTL_VMW_UNREF_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
65 struct drm_vmw_stream_arg)
66
67#define DRM_IOCTL_VMW_CREATE_CONTEXT \
68 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
69 struct drm_vmw_context_arg)
70#define DRM_IOCTL_VMW_UNREF_CONTEXT \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
72 struct drm_vmw_context_arg)
73#define DRM_IOCTL_VMW_CREATE_SURFACE \
74 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
75 union drm_vmw_surface_create_arg)
76#define DRM_IOCTL_VMW_UNREF_SURFACE \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
78 struct drm_vmw_surface_arg)
79#define DRM_IOCTL_VMW_REF_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
81 union drm_vmw_surface_reference_arg)
82#define DRM_IOCTL_VMW_EXECBUF \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
84 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000085#define DRM_IOCTL_VMW_GET_3D_CAP \
86 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
87 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000088#define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000091#define DRM_IOCTL_VMW_FENCE_SIGNALED \
92 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
93 struct drm_vmw_fence_signaled_arg)
94#define DRM_IOCTL_VMW_FENCE_UNREF \
95 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
96 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020097#define DRM_IOCTL_VMW_FENCE_EVENT \
98 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
99 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200100#define DRM_IOCTL_VMW_PRESENT \
101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
102 struct drm_vmw_present_arg)
103#define DRM_IOCTL_VMW_PRESENT_READBACK \
104 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
105 struct drm_vmw_present_readback_arg)
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200106#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
107 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
108 struct drm_vmw_update_layout_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000109
110/**
111 * The core DRM version of this macro doesn't account for
112 * DRM_COMMAND_BASE.
113 */
114
115#define VMW_IOCTL_DEF(ioctl, func, flags) \
Dave Airlie1b2f1482010-08-14 20:20:34 +1000116 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000117
118/**
119 * Ioctl definitions.
120 */
121
122static struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000123 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100124 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000125 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100126 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000127 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100128 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000129 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100130 vmw_kms_cursor_bypass_ioctl,
131 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000132
Dave Airlie1b2f1482010-08-14 20:20:34 +1000133 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100134 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000135 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100136 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000137 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100138 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000139
Dave Airlie1b2f1482010-08-14 20:20:34 +1000140 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100141 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000142 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100143 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000144 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100145 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000146 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100147 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000148 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100149 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000150 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100151 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000152 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
153 DRM_AUTH | DRM_UNLOCKED),
154 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
155 vmw_fence_obj_signaled_ioctl,
156 DRM_AUTH | DRM_UNLOCKED),
157 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +0200158 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200159 VMW_IOCTL_DEF(VMW_FENCE_EVENT,
160 vmw_fence_event_ioctl,
161 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000162 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
163 DRM_AUTH | DRM_UNLOCKED),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200164
165 /* these allow direct access to the framebuffers mark as master only */
166 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
167 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
168 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
169 vmw_present_readback_ioctl,
170 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200171 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
172 vmw_kms_update_layout_ioctl,
173 DRM_MASTER | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000174};
175
176static struct pci_device_id vmw_pci_id_list[] = {
177 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
178 {0, 0, 0}
179};
180
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200181static int enable_fbdev;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000182
183static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
184static void vmw_master_init(struct vmw_master *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100185static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
186 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000187
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200188MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
189module_param_named(enable_fbdev, enable_fbdev, int, 0600);
190
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000191static void vmw_print_capabilities(uint32_t capabilities)
192{
193 DRM_INFO("Capabilities:\n");
194 if (capabilities & SVGA_CAP_RECT_COPY)
195 DRM_INFO(" Rect copy.\n");
196 if (capabilities & SVGA_CAP_CURSOR)
197 DRM_INFO(" Cursor.\n");
198 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
199 DRM_INFO(" Cursor bypass.\n");
200 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
201 DRM_INFO(" Cursor bypass 2.\n");
202 if (capabilities & SVGA_CAP_8BIT_EMULATION)
203 DRM_INFO(" 8bit emulation.\n");
204 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
205 DRM_INFO(" Alpha cursor.\n");
206 if (capabilities & SVGA_CAP_3D)
207 DRM_INFO(" 3D.\n");
208 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
209 DRM_INFO(" Extended Fifo.\n");
210 if (capabilities & SVGA_CAP_MULTIMON)
211 DRM_INFO(" Multimon.\n");
212 if (capabilities & SVGA_CAP_PITCHLOCK)
213 DRM_INFO(" Pitchlock.\n");
214 if (capabilities & SVGA_CAP_IRQMASK)
215 DRM_INFO(" Irq mask.\n");
216 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
217 DRM_INFO(" Display Topology.\n");
218 if (capabilities & SVGA_CAP_GMR)
219 DRM_INFO(" GMR.\n");
220 if (capabilities & SVGA_CAP_TRACES)
221 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000222 if (capabilities & SVGA_CAP_GMR2)
223 DRM_INFO(" GMR2.\n");
224 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
225 DRM_INFO(" Screen Object 2.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000226}
227
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200228
229/**
230 * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
231 * the start of a buffer object.
232 *
233 * @dev_priv: The device private structure.
234 *
235 * This function will idle the buffer using an uninterruptible wait, then
236 * map the first page and initialize a pending occlusion query result structure,
237 * Finally it will unmap the buffer.
238 *
239 * TODO: Since we're only mapping a single page, we should optimize the map
240 * to use kmap_atomic / iomap_atomic.
241 */
242static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
243{
244 struct ttm_bo_kmap_obj map;
245 volatile SVGA3dQueryResult *result;
246 bool dummy;
247 int ret;
248 struct ttm_bo_device *bdev = &dev_priv->bdev;
249 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
250
251 ttm_bo_reserve(bo, false, false, false, 0);
252 spin_lock(&bdev->fence_lock);
Dave Airlie1717c0e2011-10-27 18:28:37 +0200253 ret = ttm_bo_wait(bo, false, false, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200254 spin_unlock(&bdev->fence_lock);
255 if (unlikely(ret != 0))
256 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
257 10*HZ);
258
259 ret = ttm_bo_kmap(bo, 0, 1, &map);
260 if (likely(ret == 0)) {
261 result = ttm_kmap_obj_virtual(&map, &dummy);
262 result->totalSize = sizeof(*result);
263 result->state = SVGA3D_QUERYSTATE_PENDING;
264 result->result32 = 0xff;
265 ttm_bo_kunmap(&map);
266 } else
267 DRM_ERROR("Dummy query buffer map failed.\n");
268 ttm_bo_unreserve(bo);
269}
270
271
272/**
273 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
274 *
275 * @dev_priv: A device private structure.
276 *
277 * This function creates a small buffer object that holds the query
278 * result for dummy queries emitted as query barriers.
279 * No interruptible waits are done within this function.
280 *
281 * Returns an error if bo creation fails.
282 */
283static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
284{
285 return ttm_bo_create(&dev_priv->bdev,
286 PAGE_SIZE,
287 ttm_bo_type_device,
288 &vmw_vram_sys_placement,
289 0, 0, false, NULL,
290 &dev_priv->dummy_query_bo);
291}
292
293
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000294static int vmw_request_device(struct vmw_private *dev_priv)
295{
296 int ret;
297
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000298 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
299 if (unlikely(ret != 0)) {
300 DRM_ERROR("Unable to initialize FIFO.\n");
301 return ret;
302 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000303 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200304 ret = vmw_dummy_query_bo_create(dev_priv);
305 if (unlikely(ret != 0))
306 goto out_no_query_bo;
307 vmw_dummy_query_bo_prepare(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000308
309 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200310
311out_no_query_bo:
312 vmw_fence_fifo_down(dev_priv->fman);
313 vmw_fifo_release(dev_priv, &dev_priv->fifo);
314 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000315}
316
317static void vmw_release_device(struct vmw_private *dev_priv)
318{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200319 /*
320 * Previous destructions should've released
321 * the pinned bo.
322 */
323
324 BUG_ON(dev_priv->pinned_bo != NULL);
325
326 ttm_bo_unref(&dev_priv->dummy_query_bo);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000327 vmw_fence_fifo_down(dev_priv->fman);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000328 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000329}
330
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000331/**
332 * Increase the 3d resource refcount.
333 * If the count was prevously zero, initialize the fifo, switching to svga
334 * mode. Note that the master holds a ref as well, and may request an
335 * explicit switch to svga mode if fb is not running, using @unhide_svga.
336 */
337int vmw_3d_resource_inc(struct vmw_private *dev_priv,
338 bool unhide_svga)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200339{
340 int ret = 0;
341
342 mutex_lock(&dev_priv->release_mutex);
343 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
344 ret = vmw_request_device(dev_priv);
345 if (unlikely(ret != 0))
346 --dev_priv->num_3d_resources;
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000347 } else if (unhide_svga) {
348 mutex_lock(&dev_priv->hw_mutex);
349 vmw_write(dev_priv, SVGA_REG_ENABLE,
350 vmw_read(dev_priv, SVGA_REG_ENABLE) &
351 ~SVGA_REG_ENABLE_HIDE);
352 mutex_unlock(&dev_priv->hw_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200353 }
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000354
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200355 mutex_unlock(&dev_priv->release_mutex);
356 return ret;
357}
358
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000359/**
360 * Decrease the 3d resource refcount.
361 * If the count reaches zero, disable the fifo, switching to vga mode.
362 * Note that the master holds a refcount as well, and may request an
363 * explicit switch to vga mode when it releases its refcount to account
364 * for the situation of an X server vt switch to VGA with 3d resources
365 * active.
366 */
367void vmw_3d_resource_dec(struct vmw_private *dev_priv,
368 bool hide_svga)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200369{
370 int32_t n3d;
371
372 mutex_lock(&dev_priv->release_mutex);
373 if (unlikely(--dev_priv->num_3d_resources == 0))
374 vmw_release_device(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000375 else if (hide_svga) {
376 mutex_lock(&dev_priv->hw_mutex);
377 vmw_write(dev_priv, SVGA_REG_ENABLE,
378 vmw_read(dev_priv, SVGA_REG_ENABLE) |
379 SVGA_REG_ENABLE_HIDE);
380 mutex_unlock(&dev_priv->hw_mutex);
381 }
382
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200383 n3d = (int32_t) dev_priv->num_3d_resources;
384 mutex_unlock(&dev_priv->release_mutex);
385
386 BUG_ON(n3d < 0);
387}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000388
389static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
390{
391 struct vmw_private *dev_priv;
392 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000393 uint32_t svga_id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000394
395 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
396 if (unlikely(dev_priv == NULL)) {
397 DRM_ERROR("Failed allocating a device private struct.\n");
398 return -ENOMEM;
399 }
400 memset(dev_priv, 0, sizeof(*dev_priv));
401
402 dev_priv->dev = dev;
403 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000404 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000405 mutex_init(&dev_priv->hw_mutex);
406 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200407 mutex_init(&dev_priv->release_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000408 rwlock_init(&dev_priv->resource_lock);
409 idr_init(&dev_priv->context_idr);
410 idr_init(&dev_priv->surface_idr);
411 idr_init(&dev_priv->stream_idr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000412 mutex_init(&dev_priv->init_mutex);
413 init_waitqueue_head(&dev_priv->fence_queue);
414 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000415 dev_priv->fence_queue_waiters = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000416 atomic_set(&dev_priv->fifo_queue_waiters, 0);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200417 INIT_LIST_HEAD(&dev_priv->surface_lru);
418 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000419
420 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
421 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
422 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
423
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200424 dev_priv->enable_fb = enable_fbdev;
425
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000426 mutex_lock(&dev_priv->hw_mutex);
Peter Hanzelc1886602010-01-30 03:38:07 +0000427
428 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
429 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
430 if (svga_id != SVGA_ID_2) {
431 ret = -ENOSYS;
432 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
433 mutex_unlock(&dev_priv->hw_mutex);
434 goto out_err0;
435 }
436
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000437 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
438
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200439 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
440 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
441 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
442 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000443 if (dev_priv->capabilities & SVGA_CAP_GMR) {
444 dev_priv->max_gmr_descriptors =
445 vmw_read(dev_priv,
446 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
447 dev_priv->max_gmr_ids =
448 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
449 }
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000450 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
451 dev_priv->max_gmr_pages =
452 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
453 dev_priv->memory_size =
454 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200455 dev_priv->memory_size -= dev_priv->vram_size;
456 } else {
457 /*
458 * An arbitrary limit of 512MiB on surface
459 * memory. But all HWV8 hardware supports GMR2.
460 */
461 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000462 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000463
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000464 mutex_unlock(&dev_priv->hw_mutex);
465
466 vmw_print_capabilities(dev_priv->capabilities);
467
468 if (dev_priv->capabilities & SVGA_CAP_GMR) {
469 DRM_INFO("Max GMR ids is %u\n",
470 (unsigned)dev_priv->max_gmr_ids);
471 DRM_INFO("Max GMR descriptors is %u\n",
472 (unsigned)dev_priv->max_gmr_descriptors);
473 }
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000474 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
475 DRM_INFO("Max number of GMR pages is %u\n",
476 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200477 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
478 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000479 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000480 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
481 dev_priv->vram_start, dev_priv->vram_size / 1024);
482 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
483 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
484
485 ret = vmw_ttm_global_init(dev_priv);
486 if (unlikely(ret != 0))
487 goto out_err0;
488
489
490 vmw_master_init(&dev_priv->fbdev_master);
491 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
492 dev_priv->active_master = &dev_priv->fbdev_master;
493
Dave Airliea2c06ee2011-02-23 14:24:01 +1000494
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000495 ret = ttm_bo_device_init(&dev_priv->bdev,
496 dev_priv->bo_global_ref.ref.object,
497 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
498 false);
499 if (unlikely(ret != 0)) {
500 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
501 goto out_err1;
502 }
503
504 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
505 (dev_priv->vram_size >> PAGE_SHIFT));
506 if (unlikely(ret != 0)) {
507 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
508 goto out_err2;
509 }
510
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200511 dev_priv->has_gmr = true;
512 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
513 dev_priv->max_gmr_ids) != 0) {
514 DRM_INFO("No GMR memory available. "
515 "Graphics memory resources are very limited.\n");
516 dev_priv->has_gmr = false;
517 }
518
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000519 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
520 dev_priv->mmio_size, DRM_MTRR_WC);
521
522 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
523 dev_priv->mmio_size);
524
525 if (unlikely(dev_priv->mmio_virt == NULL)) {
526 ret = -ENOMEM;
527 DRM_ERROR("Failed mapping MMIO.\n");
528 goto out_err3;
529 }
530
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200531 /* Need mmio memory to check for fifo pitchlock cap. */
532 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
533 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
534 !vmw_fifo_have_pitchlock(dev_priv)) {
535 ret = -ENOSYS;
536 DRM_ERROR("Hardware has no pitchlock\n");
537 goto out_err4;
538 }
539
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000540 dev_priv->tdev = ttm_object_device_init
541 (dev_priv->mem_global_ref.object, 12);
542
543 if (unlikely(dev_priv->tdev == NULL)) {
544 DRM_ERROR("Unable to initialize TTM object management.\n");
545 ret = -ENOMEM;
546 goto out_err4;
547 }
548
549 dev->dev_private = dev_priv;
550
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000551 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
552 dev_priv->stealth = (ret != 0);
553 if (dev_priv->stealth) {
554 /**
555 * Request at least the mmio PCI resource.
556 */
557
558 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000559 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000560 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
561 if (unlikely(ret != 0)) {
562 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
563 goto out_no_device;
564 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000565 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000566
567 dev_priv->fman = vmw_fence_manager_init(dev_priv);
568 if (unlikely(dev_priv->fman == NULL))
569 goto out_no_fman;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200570
571 /* Need to start the fifo to check if we can do screen objects */
572 ret = vmw_3d_resource_inc(dev_priv, true);
573 if (unlikely(ret != 0))
574 goto out_no_fifo;
575 vmw_kms_save_vga(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200576
577 /* Start kms and overlay systems, needs fifo. */
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200578 ret = vmw_kms_init(dev_priv);
579 if (unlikely(ret != 0))
580 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000581 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200582
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200583 /* 3D Depends on Screen Objects being used. */
Thomas Hellstrom6ea77d12011-10-04 20:13:36 +0200584 DRM_INFO("Detected %sdevice 3D availability.\n",
585 vmw_fifo_have_3d(dev_priv) ?
586 "" : "no ");
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200587
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200588 /* We might be done with the fifo now */
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200589 if (dev_priv->enable_fb) {
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200590 vmw_fb_init(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200591 } else {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200592 vmw_kms_restore_vga(dev_priv);
593 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200594 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000595
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200596 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
597 ret = drm_irq_install(dev);
598 if (unlikely(ret != 0)) {
599 DRM_ERROR("Failed installing irq: %d\n", ret);
600 goto out_no_irq;
601 }
602 }
603
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100604 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
605 register_pm_notifier(&dev_priv->pm_nb);
606
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000607 return 0;
608
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200609out_no_irq:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200610 if (dev_priv->enable_fb)
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200611 vmw_fb_close(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200612 vmw_overlay_close(dev_priv);
613 vmw_kms_close(dev_priv);
614out_no_kms:
615 /* We still have a 3D resource reference held */
616 if (dev_priv->enable_fb) {
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200617 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000618 vmw_3d_resource_dec(dev_priv, false);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200619 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200620out_no_fifo:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000621 vmw_fence_manager_takedown(dev_priv->fman);
622out_no_fman:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200623 if (dev_priv->stealth)
624 pci_release_region(dev->pdev, 2);
625 else
626 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000627out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000628 ttm_object_device_release(&dev_priv->tdev);
629out_err4:
630 iounmap(dev_priv->mmio_virt);
631out_err3:
632 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
633 dev_priv->mmio_size, DRM_MTRR_WC);
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200634 if (dev_priv->has_gmr)
635 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000636 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
637out_err2:
638 (void)ttm_bo_device_release(&dev_priv->bdev);
639out_err1:
640 vmw_ttm_global_release(dev_priv);
641out_err0:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000642 idr_destroy(&dev_priv->surface_idr);
643 idr_destroy(&dev_priv->context_idr);
644 idr_destroy(&dev_priv->stream_idr);
645 kfree(dev_priv);
646 return ret;
647}
648
649static int vmw_driver_unload(struct drm_device *dev)
650{
651 struct vmw_private *dev_priv = vmw_priv(dev);
652
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100653 unregister_pm_notifier(&dev_priv->pm_nb);
654
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000655 if (dev_priv->ctx.cmd_bounce)
656 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200657 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
658 drm_irq_uninstall(dev_priv->dev);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200659 if (dev_priv->enable_fb) {
660 vmw_fb_close(dev_priv);
661 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000662 vmw_3d_resource_dec(dev_priv, false);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200663 }
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000664 vmw_kms_close(dev_priv);
665 vmw_overlay_close(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000666 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000667 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000668 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000669 else
670 pci_release_regions(dev->pdev);
671
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000672 ttm_object_device_release(&dev_priv->tdev);
673 iounmap(dev_priv->mmio_virt);
674 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
675 dev_priv->mmio_size, DRM_MTRR_WC);
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200676 if (dev_priv->has_gmr)
677 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000678 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
679 (void)ttm_bo_device_release(&dev_priv->bdev);
680 vmw_ttm_global_release(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000681 idr_destroy(&dev_priv->surface_idr);
682 idr_destroy(&dev_priv->context_idr);
683 idr_destroy(&dev_priv->stream_idr);
684
685 kfree(dev_priv);
686
687 return 0;
688}
689
690static void vmw_postclose(struct drm_device *dev,
691 struct drm_file *file_priv)
692{
693 struct vmw_fpriv *vmw_fp;
694
695 vmw_fp = vmw_fpriv(file_priv);
696 ttm_object_file_release(&vmw_fp->tfile);
697 if (vmw_fp->locked_master)
698 drm_master_put(&vmw_fp->locked_master);
699 kfree(vmw_fp);
700}
701
702static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
703{
704 struct vmw_private *dev_priv = vmw_priv(dev);
705 struct vmw_fpriv *vmw_fp;
706 int ret = -ENOMEM;
707
708 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
709 if (unlikely(vmw_fp == NULL))
710 return ret;
711
712 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
713 if (unlikely(vmw_fp->tfile == NULL))
714 goto out_no_tfile;
715
716 file_priv->driver_priv = vmw_fp;
717
718 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
719 dev_priv->bdev.dev_mapping =
720 file_priv->filp->f_path.dentry->d_inode->i_mapping;
721
722 return 0;
723
724out_no_tfile:
725 kfree(vmw_fp);
726 return ret;
727}
728
729static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
730 unsigned long arg)
731{
732 struct drm_file *file_priv = filp->private_data;
733 struct drm_device *dev = file_priv->minor->dev;
734 unsigned int nr = DRM_IOCTL_NR(cmd);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000735
736 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100737 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000738 */
739
740 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
741 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
742 struct drm_ioctl_desc *ioctl =
743 &vmw_ioctls[nr - DRM_COMMAND_BASE];
744
Thomas Hellstrom2854eed2010-09-30 12:18:33 +0200745 if (unlikely(ioctl->cmd_drv != cmd)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000746 DRM_ERROR("Invalid command format, ioctl %d\n",
747 nr - DRM_COMMAND_BASE);
748 return -EINVAL;
749 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000750 }
751
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100752 return drm_ioctl(filp, cmd, arg);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000753}
754
755static int vmw_firstopen(struct drm_device *dev)
756{
757 struct vmw_private *dev_priv = vmw_priv(dev);
758 dev_priv->is_opened = true;
759
760 return 0;
761}
762
763static void vmw_lastclose(struct drm_device *dev)
764{
765 struct vmw_private *dev_priv = vmw_priv(dev);
766 struct drm_crtc *crtc;
767 struct drm_mode_set set;
768 int ret;
769
770 /**
771 * Do nothing on the lastclose call from drm_unload.
772 */
773
774 if (!dev_priv->is_opened)
775 return;
776
777 dev_priv->is_opened = false;
778 set.x = 0;
779 set.y = 0;
780 set.fb = NULL;
781 set.mode = NULL;
782 set.connectors = NULL;
783 set.num_connectors = 0;
784
785 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
786 set.crtc = crtc;
787 ret = crtc->funcs->set_config(&set);
788 WARN_ON(ret != 0);
789 }
790
791}
792
793static void vmw_master_init(struct vmw_master *vmaster)
794{
795 ttm_lock_init(&vmaster->lock);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200796 INIT_LIST_HEAD(&vmaster->fb_surf);
797 mutex_init(&vmaster->fb_surf_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000798}
799
800static int vmw_master_create(struct drm_device *dev,
801 struct drm_master *master)
802{
803 struct vmw_master *vmaster;
804
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000805 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
806 if (unlikely(vmaster == NULL))
807 return -ENOMEM;
808
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200809 vmw_master_init(vmaster);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000810 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
811 master->driver_priv = vmaster;
812
813 return 0;
814}
815
816static void vmw_master_destroy(struct drm_device *dev,
817 struct drm_master *master)
818{
819 struct vmw_master *vmaster = vmw_master(master);
820
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000821 master->driver_priv = NULL;
822 kfree(vmaster);
823}
824
825
826static int vmw_master_set(struct drm_device *dev,
827 struct drm_file *file_priv,
828 bool from_open)
829{
830 struct vmw_private *dev_priv = vmw_priv(dev);
831 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
832 struct vmw_master *active = dev_priv->active_master;
833 struct vmw_master *vmaster = vmw_master(file_priv->master);
834 int ret = 0;
835
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200836 if (!dev_priv->enable_fb) {
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000837 ret = vmw_3d_resource_inc(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200838 if (unlikely(ret != 0))
839 return ret;
840 vmw_kms_save_vga(dev_priv);
841 mutex_lock(&dev_priv->hw_mutex);
842 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
843 mutex_unlock(&dev_priv->hw_mutex);
844 }
845
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000846 if (active) {
847 BUG_ON(active != &dev_priv->fbdev_master);
848 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
849 if (unlikely(ret != 0))
850 goto out_no_active_lock;
851
852 ttm_lock_set_kill(&active->lock, true, SIGTERM);
853 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
854 if (unlikely(ret != 0)) {
855 DRM_ERROR("Unable to clean VRAM on "
856 "master drop.\n");
857 }
858
859 dev_priv->active_master = NULL;
860 }
861
862 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
863 if (!from_open) {
864 ttm_vt_unlock(&vmaster->lock);
865 BUG_ON(vmw_fp->locked_master != file_priv->master);
866 drm_master_put(&vmw_fp->locked_master);
867 }
868
869 dev_priv->active_master = vmaster;
870
871 return 0;
872
873out_no_active_lock:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200874 if (!dev_priv->enable_fb) {
875 mutex_lock(&dev_priv->hw_mutex);
876 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
877 mutex_unlock(&dev_priv->hw_mutex);
878 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000879 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200880 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000881 return ret;
882}
883
884static void vmw_master_drop(struct drm_device *dev,
885 struct drm_file *file_priv,
886 bool from_release)
887{
888 struct vmw_private *dev_priv = vmw_priv(dev);
889 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
890 struct vmw_master *vmaster = vmw_master(file_priv->master);
891 int ret;
892
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000893 /**
894 * Make sure the master doesn't disappear while we have
895 * it locked.
896 */
897
898 vmw_fp->locked_master = drm_master_get(file_priv->master);
899 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200900 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
901
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000902 if (unlikely((ret != 0))) {
903 DRM_ERROR("Unable to lock TTM at VT switch.\n");
904 drm_master_put(&vmw_fp->locked_master);
905 }
906
907 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
908
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200909 if (!dev_priv->enable_fb) {
910 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
911 if (unlikely(ret != 0))
912 DRM_ERROR("Unable to clean VRAM on master drop.\n");
913 mutex_lock(&dev_priv->hw_mutex);
914 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
915 mutex_unlock(&dev_priv->hw_mutex);
916 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000917 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200918 }
919
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000920 dev_priv->active_master = &dev_priv->fbdev_master;
921 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
922 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
923
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200924 if (dev_priv->enable_fb)
925 vmw_fb_on(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000926}
927
928
929static void vmw_remove(struct pci_dev *pdev)
930{
931 struct drm_device *dev = pci_get_drvdata(pdev);
932
933 drm_put_dev(dev);
934}
935
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100936static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
937 void *ptr)
938{
939 struct vmw_private *dev_priv =
940 container_of(nb, struct vmw_private, pm_nb);
941 struct vmw_master *vmaster = dev_priv->active_master;
942
943 switch (val) {
944 case PM_HIBERNATION_PREPARE:
945 case PM_SUSPEND_PREPARE:
946 ttm_suspend_lock(&vmaster->lock);
947
948 /**
949 * This empties VRAM and unbinds all GMR bindings.
950 * Buffer contents is moved to swappable memory.
951 */
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200952 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100953 ttm_bo_swapout_all(&dev_priv->bdev);
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200954
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100955 break;
956 case PM_POST_HIBERNATION:
957 case PM_POST_SUSPEND:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200958 case PM_POST_RESTORE:
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100959 ttm_suspend_unlock(&vmaster->lock);
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200960
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100961 break;
962 case PM_RESTORE_PREPARE:
963 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100964 default:
965 break;
966 }
967 return 0;
968}
969
970/**
971 * These might not be needed with the virtual SVGA device.
972 */
973
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +0200974static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100975{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200976 struct drm_device *dev = pci_get_drvdata(pdev);
977 struct vmw_private *dev_priv = vmw_priv(dev);
978
979 if (dev_priv->num_3d_resources != 0) {
980 DRM_INFO("Can't suspend or hibernate "
981 "while 3D resources are active.\n");
982 return -EBUSY;
983 }
984
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100985 pci_save_state(pdev);
986 pci_disable_device(pdev);
987 pci_set_power_state(pdev, PCI_D3hot);
988 return 0;
989}
990
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +0200991static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100992{
993 pci_set_power_state(pdev, PCI_D0);
994 pci_restore_state(pdev);
995 return pci_enable_device(pdev);
996}
997
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +0200998static int vmw_pm_suspend(struct device *kdev)
999{
1000 struct pci_dev *pdev = to_pci_dev(kdev);
1001 struct pm_message dummy;
1002
1003 dummy.event = 0;
1004
1005 return vmw_pci_suspend(pdev, dummy);
1006}
1007
1008static int vmw_pm_resume(struct device *kdev)
1009{
1010 struct pci_dev *pdev = to_pci_dev(kdev);
1011
1012 return vmw_pci_resume(pdev);
1013}
1014
1015static int vmw_pm_prepare(struct device *kdev)
1016{
1017 struct pci_dev *pdev = to_pci_dev(kdev);
1018 struct drm_device *dev = pci_get_drvdata(pdev);
1019 struct vmw_private *dev_priv = vmw_priv(dev);
1020
1021 /**
1022 * Release 3d reference held by fbdev and potentially
1023 * stop fifo.
1024 */
1025 dev_priv->suspended = true;
1026 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001027 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001028
1029 if (dev_priv->num_3d_resources != 0) {
1030
1031 DRM_INFO("Can't suspend or hibernate "
1032 "while 3D resources are active.\n");
1033
1034 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001035 vmw_3d_resource_inc(dev_priv, true);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001036 dev_priv->suspended = false;
1037 return -EBUSY;
1038 }
1039
1040 return 0;
1041}
1042
1043static void vmw_pm_complete(struct device *kdev)
1044{
1045 struct pci_dev *pdev = to_pci_dev(kdev);
1046 struct drm_device *dev = pci_get_drvdata(pdev);
1047 struct vmw_private *dev_priv = vmw_priv(dev);
1048
1049 /**
1050 * Reclaim 3d reference held by fbdev and potentially
1051 * start fifo.
1052 */
1053 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001054 vmw_3d_resource_inc(dev_priv, false);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001055
1056 dev_priv->suspended = false;
1057}
1058
1059static const struct dev_pm_ops vmw_pm_ops = {
1060 .prepare = vmw_pm_prepare,
1061 .complete = vmw_pm_complete,
1062 .suspend = vmw_pm_suspend,
1063 .resume = vmw_pm_resume,
1064};
1065
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001066static struct drm_driver driver = {
1067 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1068 DRIVER_MODESET,
1069 .load = vmw_driver_load,
1070 .unload = vmw_driver_unload,
1071 .firstopen = vmw_firstopen,
1072 .lastclose = vmw_lastclose,
1073 .irq_preinstall = vmw_irq_preinstall,
1074 .irq_postinstall = vmw_irq_postinstall,
1075 .irq_uninstall = vmw_irq_uninstall,
1076 .irq_handler = vmw_irq_handler,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001077 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001078 .enable_vblank = vmw_enable_vblank,
1079 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001080 .reclaim_buffers_locked = NULL,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001081 .ioctls = vmw_ioctls,
1082 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1083 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
1084 .master_create = vmw_master_create,
1085 .master_destroy = vmw_master_destroy,
1086 .master_set = vmw_master_set,
1087 .master_drop = vmw_master_drop,
1088 .open = vmw_driver_open,
1089 .postclose = vmw_postclose,
1090 .fops = {
1091 .owner = THIS_MODULE,
1092 .open = drm_open,
1093 .release = drm_release,
1094 .unlocked_ioctl = vmw_unlocked_ioctl,
1095 .mmap = vmw_mmap,
Thomas Hellstrom5438ae82011-10-10 12:23:27 +02001096 .poll = vmw_fops_poll,
1097 .read = vmw_fops_read,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001098 .fasync = drm_fasync,
1099#if defined(CONFIG_COMPAT)
1100 .compat_ioctl = drm_compat_ioctl,
1101#endif
Arnd Bergmanndc880ab2010-07-06 18:54:47 +02001102 .llseek = noop_llseek,
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001103 },
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001104 .name = VMWGFX_DRIVER_NAME,
1105 .desc = VMWGFX_DRIVER_DESC,
1106 .date = VMWGFX_DRIVER_DATE,
1107 .major = VMWGFX_DRIVER_MAJOR,
1108 .minor = VMWGFX_DRIVER_MINOR,
1109 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1110};
1111
Dave Airlie8410ea32010-12-15 03:16:38 +10001112static struct pci_driver vmw_pci_driver = {
1113 .name = VMWGFX_DRIVER_NAME,
1114 .id_table = vmw_pci_id_list,
1115 .probe = vmw_probe,
1116 .remove = vmw_remove,
1117 .driver = {
1118 .pm = &vmw_pm_ops
1119 }
1120};
1121
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001122static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1123{
Jordan Crousedcdb1672010-05-27 13:40:25 -06001124 return drm_get_pci_dev(pdev, ent, &driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001125}
1126
1127static int __init vmwgfx_init(void)
1128{
1129 int ret;
Dave Airlie8410ea32010-12-15 03:16:38 +10001130 ret = drm_pci_init(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001131 if (ret)
1132 DRM_ERROR("Failed initializing DRM.\n");
1133 return ret;
1134}
1135
1136static void __exit vmwgfx_exit(void)
1137{
Dave Airlie8410ea32010-12-15 03:16:38 +10001138 drm_pci_exit(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001139}
1140
1141module_init(vmwgfx_init);
1142module_exit(vmwgfx_exit);
1143
1144MODULE_AUTHOR("VMware Inc. and others");
1145MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1146MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001147MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1148 __stringify(VMWGFX_DRIVER_MINOR) "."
1149 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1150 "0");