Dirk Hohndel (VMware) | dff9688 | 2018-05-07 01:16:26 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 2 | /************************************************************************** |
| 3 | * |
Dirk Hohndel (VMware) | dff9688 | 2018-05-07 01:16:26 +0200 | [diff] [blame] | 4 | * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
Sam Ravnborg | 6ae8748 | 2019-06-23 12:23:34 +0200 | [diff] [blame] | 27 | |
Rob Clark | 96c5d07 | 2014-10-15 15:00:47 -0400 | [diff] [blame] | 28 | #include <linux/console.h> |
Thomas Hellstrom | 9ddac73 | 2019-01-17 14:34:52 +0100 | [diff] [blame] | 29 | #include <linux/dma-mapping.h> |
Sam Ravnborg | 6ae8748 | 2019-06-23 12:23:34 +0200 | [diff] [blame] | 30 | #include <linux/module.h> |
Thomas Zimmermann | 36891da | 2019-12-10 13:43:22 +0100 | [diff] [blame] | 31 | #include <linux/pci.h> |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 32 | |
Sam Ravnborg | 6ae8748 | 2019-06-23 12:23:34 +0200 | [diff] [blame] | 33 | #include <drm/drm_drv.h> |
| 34 | #include <drm/drm_ioctl.h> |
Sam Ravnborg | 6ae8748 | 2019-06-23 12:23:34 +0200 | [diff] [blame] | 35 | #include <drm/drm_sysfs.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 36 | #include <drm/ttm/ttm_bo_driver.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 37 | #include <drm/ttm/ttm_module.h> |
Sam Ravnborg | 6ae8748 | 2019-06-23 12:23:34 +0200 | [diff] [blame] | 38 | #include <drm/ttm/ttm_placement.h> |
| 39 | |
| 40 | #include "ttm_object.h" |
| 41 | #include "vmwgfx_binding.h" |
| 42 | #include "vmwgfx_drv.h" |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 43 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 44 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
| 45 | #define VMWGFX_CHIP_SVGAII 0 |
| 46 | #define VMW_FB_RESERVATION 0 |
| 47 | |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 48 | #define VMW_MIN_INITIAL_WIDTH 800 |
| 49 | #define VMW_MIN_INITIAL_HEIGHT 600 |
| 50 | |
Sinclair Yeh | f921791 | 2016-04-27 19:11:18 -0700 | [diff] [blame] | 51 | #ifndef VMWGFX_GIT_VERSION |
| 52 | #define VMWGFX_GIT_VERSION "Unknown" |
| 53 | #endif |
| 54 | |
| 55 | #define VMWGFX_REPO "In Tree" |
| 56 | |
Thomas Hellstrom | fd56746 | 2018-12-12 11:52:08 +0100 | [diff] [blame] | 57 | #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) |
| 58 | |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 59 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 60 | /** |
| 61 | * Fully encoded drm commands. Might move to vmw_drm.h |
| 62 | */ |
| 63 | |
| 64 | #define DRM_IOCTL_VMW_GET_PARAM \ |
| 65 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ |
| 66 | struct drm_vmw_getparam_arg) |
| 67 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ |
| 68 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ |
| 69 | union drm_vmw_alloc_dmabuf_arg) |
| 70 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ |
| 71 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ |
| 72 | struct drm_vmw_unref_dmabuf_arg) |
| 73 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ |
| 74 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ |
| 75 | struct drm_vmw_cursor_bypass_arg) |
| 76 | |
| 77 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ |
| 78 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ |
| 79 | struct drm_vmw_control_stream_arg) |
| 80 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ |
| 81 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ |
| 82 | struct drm_vmw_stream_arg) |
| 83 | #define DRM_IOCTL_VMW_UNREF_STREAM \ |
| 84 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ |
| 85 | struct drm_vmw_stream_arg) |
| 86 | |
| 87 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ |
| 88 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ |
| 89 | struct drm_vmw_context_arg) |
| 90 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ |
| 91 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ |
| 92 | struct drm_vmw_context_arg) |
| 93 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ |
| 94 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ |
| 95 | union drm_vmw_surface_create_arg) |
| 96 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ |
| 97 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ |
| 98 | struct drm_vmw_surface_arg) |
| 99 | #define DRM_IOCTL_VMW_REF_SURFACE \ |
| 100 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ |
| 101 | union drm_vmw_surface_reference_arg) |
| 102 | #define DRM_IOCTL_VMW_EXECBUF \ |
| 103 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ |
| 104 | struct drm_vmw_execbuf_arg) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 105 | #define DRM_IOCTL_VMW_GET_3D_CAP \ |
| 106 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ |
| 107 | struct drm_vmw_get_3d_cap_arg) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 108 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
| 109 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
| 110 | struct drm_vmw_fence_wait_arg) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 111 | #define DRM_IOCTL_VMW_FENCE_SIGNALED \ |
| 112 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ |
| 113 | struct drm_vmw_fence_signaled_arg) |
| 114 | #define DRM_IOCTL_VMW_FENCE_UNREF \ |
| 115 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ |
| 116 | struct drm_vmw_fence_arg) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 117 | #define DRM_IOCTL_VMW_FENCE_EVENT \ |
| 118 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ |
| 119 | struct drm_vmw_fence_event_arg) |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 120 | #define DRM_IOCTL_VMW_PRESENT \ |
| 121 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ |
| 122 | struct drm_vmw_present_arg) |
| 123 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ |
| 124 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ |
| 125 | struct drm_vmw_present_readback_arg) |
Thomas Hellstrom | cd2b89e | 2011-10-25 23:35:53 +0200 | [diff] [blame] | 126 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
| 127 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
| 128 | struct drm_vmw_update_layout_arg) |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 129 | #define DRM_IOCTL_VMW_CREATE_SHADER \ |
| 130 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ |
| 131 | struct drm_vmw_shader_create_arg) |
| 132 | #define DRM_IOCTL_VMW_UNREF_SHADER \ |
| 133 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ |
| 134 | struct drm_vmw_shader_arg) |
Thomas Hellstrom | a97e219 | 2012-11-21 11:45:13 +0100 | [diff] [blame] | 135 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ |
| 136 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ |
| 137 | union drm_vmw_gb_surface_create_arg) |
| 138 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ |
| 139 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ |
| 140 | union drm_vmw_gb_surface_reference_arg) |
Thomas Hellstrom | 1d7a5cb | 2012-11-21 12:32:19 +0100 | [diff] [blame] | 141 | #define DRM_IOCTL_VMW_SYNCCPU \ |
| 142 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ |
| 143 | struct drm_vmw_synccpu_arg) |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 144 | #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ |
| 145 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ |
| 146 | struct drm_vmw_context_arg) |
Deepak Rawat | 14b1c33 | 2018-06-20 14:48:35 -0700 | [diff] [blame] | 147 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \ |
| 148 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \ |
| 149 | union drm_vmw_gb_surface_create_ext_arg) |
| 150 | #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \ |
| 151 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \ |
| 152 | union drm_vmw_gb_surface_reference_ext_arg) |
Roland Scheidegger | cb92a32 | 2019-11-21 17:44:56 +0100 | [diff] [blame] | 153 | #define DRM_IOCTL_VMW_MSG \ |
| 154 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ |
| 155 | struct drm_vmw_msg_arg) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 156 | |
| 157 | /** |
| 158 | * The core DRM version of this macro doesn't account for |
| 159 | * DRM_COMMAND_BASE. |
| 160 | */ |
| 161 | |
| 162 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ |
Ville Syrjälä | 7e7392a | 2015-03-27 15:51:56 +0200 | [diff] [blame] | 163 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 164 | |
| 165 | /** |
| 166 | * Ioctl definitions. |
| 167 | */ |
| 168 | |
Rob Clark | baa7094 | 2013-08-02 13:27:49 -0400 | [diff] [blame] | 169 | static const struct drm_ioctl_desc vmw_ioctls[] = { |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 170 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 171 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 172 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 173 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 174 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 175 | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 176 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
Thomas Hellstrom | e1f7800 | 2009-12-08 12:57:51 +0100 | [diff] [blame] | 177 | vmw_kms_cursor_bypass_ioctl, |
Daniel Vetter | 190c462 | 2018-04-20 08:51:58 +0200 | [diff] [blame] | 178 | DRM_MASTER), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 179 | |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 180 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
Daniel Vetter | 190c462 | 2018-04-20 08:51:58 +0200 | [diff] [blame] | 181 | DRM_MASTER), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 182 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
Daniel Vetter | 190c462 | 2018-04-20 08:51:58 +0200 | [diff] [blame] | 183 | DRM_MASTER), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 184 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
Daniel Vetter | 190c462 | 2018-04-20 08:51:58 +0200 | [diff] [blame] | 185 | DRM_MASTER), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 186 | |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 187 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 188 | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 189 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 190 | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 191 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 192 | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 193 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 194 | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 195 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 196 | DRM_RENDER_ALLOW), |
| 197 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 198 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 199 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 200 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 201 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
| 202 | vmw_fence_obj_signaled_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 203 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 204 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 205 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | 03f8026 | 2014-03-20 13:06:34 +0100 | [diff] [blame] | 206 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 207 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | f63f6a5 | 2011-09-01 20:18:41 +0000 | [diff] [blame] | 208 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 209 | DRM_RENDER_ALLOW), |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 210 | |
| 211 | /* these allow direct access to the framebuffers mark as master only */ |
| 212 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 213 | DRM_MASTER | DRM_AUTH), |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 214 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, |
| 215 | vmw_present_readback_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 216 | DRM_MASTER | DRM_AUTH), |
Thomas Hellstrom | 31788ca | 2017-02-21 17:42:27 +0700 | [diff] [blame] | 217 | /* |
| 218 | * The permissions of the below ioctl are overridden in |
| 219 | * vmw_generic_ioctl(). We require either |
| 220 | * DRM_MASTER or capable(CAP_SYS_ADMIN). |
| 221 | */ |
Thomas Hellstrom | cd2b89e | 2011-10-25 23:35:53 +0200 | [diff] [blame] | 222 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
| 223 | vmw_kms_update_layout_ioctl, |
Thomas Hellstrom | 31788ca | 2017-02-21 17:42:27 +0700 | [diff] [blame] | 224 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 225 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, |
| 226 | vmw_shader_define_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 227 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 228 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, |
| 229 | vmw_shader_destroy_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 230 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | a97e219 | 2012-11-21 11:45:13 +0100 | [diff] [blame] | 231 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, |
| 232 | vmw_gb_surface_define_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 233 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | a97e219 | 2012-11-21 11:45:13 +0100 | [diff] [blame] | 234 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, |
| 235 | vmw_gb_surface_reference_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 236 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | 1d7a5cb | 2012-11-21 12:32:19 +0100 | [diff] [blame] | 237 | VMW_IOCTL_DEF(VMW_SYNCCPU, |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 238 | vmw_user_bo_synccpu_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 239 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 240 | VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, |
| 241 | vmw_extended_context_define_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 242 | DRM_RENDER_ALLOW), |
Deepak Rawat | 14b1c33 | 2018-06-20 14:48:35 -0700 | [diff] [blame] | 243 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT, |
| 244 | vmw_gb_surface_define_ext_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 245 | DRM_RENDER_ALLOW), |
Deepak Rawat | 14b1c33 | 2018-06-20 14:48:35 -0700 | [diff] [blame] | 246 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT, |
| 247 | vmw_gb_surface_reference_ext_ioctl, |
Emil Velikov | 0d4c19f | 2019-11-01 13:03:11 +0000 | [diff] [blame] | 248 | DRM_RENDER_ALLOW), |
Roland Scheidegger | cb92a32 | 2019-11-21 17:44:56 +0100 | [diff] [blame] | 249 | VMW_IOCTL_DEF(VMW_MSG, |
| 250 | vmw_msg_ioctl, |
| 251 | DRM_RENDER_ALLOW), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 252 | }; |
| 253 | |
Arvind Yadav | 8046306 | 2017-07-15 12:44:53 +0530 | [diff] [blame] | 254 | static const struct pci_device_id vmw_pci_id_list[] = { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 255 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, |
| 256 | {0, 0, 0} |
| 257 | }; |
Dave Airlie | c490342 | 2012-08-28 21:40:51 -0400 | [diff] [blame] | 258 | MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 259 | |
Dave Airlie | 5d2afab | 2012-08-28 21:38:49 -0400 | [diff] [blame] | 260 | static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 261 | static int vmw_force_iommu; |
| 262 | static int vmw_restrict_iommu; |
| 263 | static int vmw_force_coherent; |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 264 | static int vmw_restrict_dma_mask; |
Sinclair Yeh | 04319d8 | 2016-06-29 12:15:48 -0700 | [diff] [blame] | 265 | static int vmw_assume_16bpp; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 266 | |
| 267 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 268 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
| 269 | void *ptr); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 270 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 271 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
Øyvind A. Holm | 50f8373 | 2017-03-23 14:54:48 -0700 | [diff] [blame] | 272 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 273 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
Øyvind A. Holm | 50f8373 | 2017-03-23 14:54:48 -0700 | [diff] [blame] | 274 | module_param_named(force_dma_api, vmw_force_iommu, int, 0600); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 275 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); |
Øyvind A. Holm | 50f8373 | 2017-03-23 14:54:48 -0700 | [diff] [blame] | 276 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 277 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
Øyvind A. Holm | 50f8373 | 2017-03-23 14:54:48 -0700 | [diff] [blame] | 278 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 279 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
Øyvind A. Holm | 7a9d200 | 2017-04-03 22:06:24 +0200 | [diff] [blame] | 280 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); |
Sinclair Yeh | 04319d8 | 2016-06-29 12:15:48 -0700 | [diff] [blame] | 281 | MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); |
| 282 | module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 283 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 284 | |
Neha Bhende | 3b4c251 | 2018-06-18 16:44:48 -0700 | [diff] [blame] | 285 | static void vmw_print_capabilities2(uint32_t capabilities2) |
| 286 | { |
| 287 | DRM_INFO("Capabilities2:\n"); |
| 288 | if (capabilities2 & SVGA_CAP2_GROW_OTABLE) |
| 289 | DRM_INFO(" Grow oTable.\n"); |
| 290 | if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY) |
| 291 | DRM_INFO(" IntraSurface copy.\n"); |
| 292 | } |
| 293 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 294 | static void vmw_print_capabilities(uint32_t capabilities) |
| 295 | { |
| 296 | DRM_INFO("Capabilities:\n"); |
| 297 | if (capabilities & SVGA_CAP_RECT_COPY) |
| 298 | DRM_INFO(" Rect copy.\n"); |
| 299 | if (capabilities & SVGA_CAP_CURSOR) |
| 300 | DRM_INFO(" Cursor.\n"); |
| 301 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) |
| 302 | DRM_INFO(" Cursor bypass.\n"); |
| 303 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) |
| 304 | DRM_INFO(" Cursor bypass 2.\n"); |
| 305 | if (capabilities & SVGA_CAP_8BIT_EMULATION) |
| 306 | DRM_INFO(" 8bit emulation.\n"); |
| 307 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) |
| 308 | DRM_INFO(" Alpha cursor.\n"); |
| 309 | if (capabilities & SVGA_CAP_3D) |
| 310 | DRM_INFO(" 3D.\n"); |
| 311 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) |
| 312 | DRM_INFO(" Extended Fifo.\n"); |
| 313 | if (capabilities & SVGA_CAP_MULTIMON) |
| 314 | DRM_INFO(" Multimon.\n"); |
| 315 | if (capabilities & SVGA_CAP_PITCHLOCK) |
| 316 | DRM_INFO(" Pitchlock.\n"); |
| 317 | if (capabilities & SVGA_CAP_IRQMASK) |
| 318 | DRM_INFO(" Irq mask.\n"); |
| 319 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) |
| 320 | DRM_INFO(" Display Topology.\n"); |
| 321 | if (capabilities & SVGA_CAP_GMR) |
| 322 | DRM_INFO(" GMR.\n"); |
| 323 | if (capabilities & SVGA_CAP_TRACES) |
| 324 | DRM_INFO(" Traces.\n"); |
Thomas Hellstrom | dcca286 | 2011-08-31 07:42:51 +0000 | [diff] [blame] | 325 | if (capabilities & SVGA_CAP_GMR2) |
| 326 | DRM_INFO(" GMR2.\n"); |
| 327 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
| 328 | DRM_INFO(" Screen Object 2.\n"); |
Thomas Hellstrom | c1234db | 2012-11-21 10:35:08 +0100 | [diff] [blame] | 329 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) |
| 330 | DRM_INFO(" Command Buffers.\n"); |
| 331 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) |
| 332 | DRM_INFO(" Command Buffers 2.\n"); |
| 333 | if (capabilities & SVGA_CAP_GBOBJECTS) |
| 334 | DRM_INFO(" Guest Backed Resources.\n"); |
Sinclair Yeh | 8ce75f8 | 2015-07-08 21:20:39 -0700 | [diff] [blame] | 335 | if (capabilities & SVGA_CAP_DX) |
| 336 | DRM_INFO(" DX Features.\n"); |
Thomas Hellstrom | dc36636 | 2018-03-22 10:15:23 +0100 | [diff] [blame] | 337 | if (capabilities & SVGA_CAP_HP_CMD_QUEUE) |
| 338 | DRM_INFO(" HP Command Queue.\n"); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 339 | } |
| 340 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 341 | /** |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 342 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 343 | * |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 344 | * @dev_priv: A device private structure. |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 345 | * |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 346 | * This function creates a small buffer object that holds the query |
| 347 | * result for dummy queries emitted as query barriers. |
| 348 | * The function will then map the first page and initialize a pending |
| 349 | * occlusion query result structure, Finally it will unmap the buffer. |
| 350 | * No interruptible waits are done within this function. |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 351 | * |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 352 | * Returns an error if bo creation or initialization fails. |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 353 | */ |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 354 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 355 | { |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 356 | int ret; |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 357 | struct vmw_buffer_object *vbo; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 358 | struct ttm_bo_kmap_obj map; |
| 359 | volatile SVGA3dQueryResult *result; |
| 360 | bool dummy; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 361 | |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 362 | /* |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 363 | * Create the vbo as pinned, so that a tryreserve will |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 364 | * immediately succeed. This is because we're the only |
| 365 | * user of the bo currently. |
| 366 | */ |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 367 | vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); |
| 368 | if (!vbo) |
| 369 | return -ENOMEM; |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 370 | |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 371 | ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, |
| 372 | &vmw_sys_ne_placement, false, |
| 373 | &vmw_bo_bo_free); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 374 | if (unlikely(ret != 0)) |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 375 | return ret; |
| 376 | |
Christian König | dfd5e50 | 2016-04-06 11:12:03 +0200 | [diff] [blame] | 377 | ret = ttm_bo_reserve(&vbo->base, false, true, NULL); |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 378 | BUG_ON(ret != 0); |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 379 | vmw_bo_pin_reserved(vbo, true); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 380 | |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 381 | ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 382 | if (likely(ret == 0)) { |
| 383 | result = ttm_kmap_obj_virtual(&map, &dummy); |
| 384 | result->totalSize = sizeof(*result); |
| 385 | result->state = SVGA3D_QUERYSTATE_PENDING; |
| 386 | result->result32 = 0xff; |
| 387 | ttm_bo_kunmap(&map); |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 388 | } |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 389 | vmw_bo_pin_reserved(vbo, false); |
| 390 | ttm_bo_unreserve(&vbo->base); |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 391 | |
| 392 | if (unlikely(ret != 0)) { |
| 393 | DRM_ERROR("Dummy query buffer map failed.\n"); |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 394 | vmw_bo_unreference(&vbo); |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 395 | } else |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 396 | dev_priv->dummy_query_bo = vbo; |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 397 | |
| 398 | return ret; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 399 | } |
| 400 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 401 | /** |
| 402 | * vmw_request_device_late - Perform late device setup |
| 403 | * |
| 404 | * @dev_priv: Pointer to device private. |
| 405 | * |
| 406 | * This function performs setup of otables and enables large command |
| 407 | * buffer submission. These tasks are split out to a separate function |
| 408 | * because it reverts vmw_release_device_early and is intended to be used |
| 409 | * by an error path in the hibernation code. |
| 410 | */ |
| 411 | static int vmw_request_device_late(struct vmw_private *dev_priv) |
| 412 | { |
| 413 | int ret; |
| 414 | |
| 415 | if (dev_priv->has_mob) { |
| 416 | ret = vmw_otables_setup(dev_priv); |
| 417 | if (unlikely(ret != 0)) { |
| 418 | DRM_ERROR("Unable to initialize " |
| 419 | "guest Memory OBjects.\n"); |
| 420 | return ret; |
| 421 | } |
| 422 | } |
| 423 | |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 424 | if (dev_priv->cman) { |
| 425 | ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, |
| 426 | 256*4096, 2*4096); |
| 427 | if (ret) { |
| 428 | struct vmw_cmdbuf_man *man = dev_priv->cman; |
| 429 | |
| 430 | dev_priv->cman = NULL; |
| 431 | vmw_cmdbuf_man_destroy(man); |
| 432 | } |
| 433 | } |
| 434 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 435 | return 0; |
| 436 | } |
| 437 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 438 | static int vmw_request_device(struct vmw_private *dev_priv) |
| 439 | { |
| 440 | int ret; |
| 441 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 442 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
| 443 | if (unlikely(ret != 0)) { |
| 444 | DRM_ERROR("Unable to initialize FIFO.\n"); |
| 445 | return ret; |
| 446 | } |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 447 | vmw_fence_fifo_up(dev_priv->fman); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 448 | dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 449 | if (IS_ERR(dev_priv->cman)) { |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 450 | dev_priv->cman = NULL; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 451 | dev_priv->has_dx = false; |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 452 | } |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 453 | |
| 454 | ret = vmw_request_device_late(dev_priv); |
| 455 | if (ret) |
| 456 | goto out_no_mob; |
| 457 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 458 | ret = vmw_dummy_query_bo_create(dev_priv); |
| 459 | if (unlikely(ret != 0)) |
| 460 | goto out_no_query_bo; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 461 | |
| 462 | return 0; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 463 | |
| 464 | out_no_query_bo: |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 465 | if (dev_priv->cman) |
| 466 | vmw_cmdbuf_remove_pool(dev_priv->cman); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 467 | if (dev_priv->has_mob) { |
| 468 | (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 469 | vmw_otables_takedown(dev_priv); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 470 | } |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 471 | if (dev_priv->cman) |
| 472 | vmw_cmdbuf_man_destroy(dev_priv->cman); |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 473 | out_no_mob: |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 474 | vmw_fence_fifo_down(dev_priv->fman); |
| 475 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 476 | return ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 477 | } |
| 478 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 479 | /** |
| 480 | * vmw_release_device_early - Early part of fifo takedown. |
| 481 | * |
| 482 | * @dev_priv: Pointer to device private struct. |
| 483 | * |
| 484 | * This is the first part of command submission takedown, to be called before |
| 485 | * buffer management is taken down. |
| 486 | */ |
| 487 | static void vmw_release_device_early(struct vmw_private *dev_priv) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 488 | { |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 489 | /* |
| 490 | * Previous destructions should've released |
| 491 | * the pinned bo. |
| 492 | */ |
| 493 | |
| 494 | BUG_ON(dev_priv->pinned_bo != NULL); |
| 495 | |
Thomas Hellstrom | f1d34bf | 2018-06-19 15:02:16 +0200 | [diff] [blame] | 496 | vmw_bo_unreference(&dev_priv->dummy_query_bo); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 497 | if (dev_priv->cman) |
| 498 | vmw_cmdbuf_remove_pool(dev_priv->cman); |
| 499 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 500 | if (dev_priv->has_mob) { |
| 501 | ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 502 | vmw_otables_takedown(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 503 | } |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 504 | } |
| 505 | |
Thomas Hellstrom | 05730b3 | 2011-08-31 07:42:52 +0000 | [diff] [blame] | 506 | /** |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 507 | * vmw_release_device_late - Late part of fifo takedown. |
| 508 | * |
| 509 | * @dev_priv: Pointer to device private struct. |
| 510 | * |
| 511 | * This is the last part of the command submission takedown, to be called when |
| 512 | * command submission is no longer needed. It may wait on pending fences. |
Thomas Hellstrom | 05730b3 | 2011-08-31 07:42:52 +0000 | [diff] [blame] | 513 | */ |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 514 | static void vmw_release_device_late(struct vmw_private *dev_priv) |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 515 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 516 | vmw_fence_fifo_down(dev_priv->fman); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 517 | if (dev_priv->cman) |
| 518 | vmw_cmdbuf_man_destroy(dev_priv->cman); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 519 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 520 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 521 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 522 | |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 523 | /** |
| 524 | * Sets the initial_[width|height] fields on the given vmw_private. |
| 525 | * |
| 526 | * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then |
Thomas Hellstrom | 67d4a87 | 2012-02-09 16:56:47 +0100 | [diff] [blame] | 527 | * clamping the value to fb_max_[width|height] fields and the |
| 528 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
| 529 | * If the values appear to be invalid, set them to |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 530 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
| 531 | */ |
| 532 | static void vmw_get_initial_size(struct vmw_private *dev_priv) |
| 533 | { |
| 534 | uint32_t width; |
| 535 | uint32_t height; |
| 536 | |
| 537 | width = vmw_read(dev_priv, SVGA_REG_WIDTH); |
| 538 | height = vmw_read(dev_priv, SVGA_REG_HEIGHT); |
| 539 | |
| 540 | width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 541 | height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); |
Thomas Hellstrom | 67d4a87 | 2012-02-09 16:56:47 +0100 | [diff] [blame] | 542 | |
| 543 | if (width > dev_priv->fb_max_width || |
| 544 | height > dev_priv->fb_max_height) { |
| 545 | |
| 546 | /* |
| 547 | * This is a host error and shouldn't occur. |
| 548 | */ |
| 549 | |
| 550 | width = VMW_MIN_INITIAL_WIDTH; |
| 551 | height = VMW_MIN_INITIAL_HEIGHT; |
| 552 | } |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 553 | |
| 554 | dev_priv->initial_width = width; |
| 555 | dev_priv->initial_height = height; |
| 556 | } |
| 557 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 558 | /** |
| 559 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this |
| 560 | * system. |
| 561 | * |
| 562 | * @dev_priv: Pointer to a struct vmw_private |
| 563 | * |
Thomas Hellstrom | 8110335 | 2019-04-23 14:02:57 +0200 | [diff] [blame] | 564 | * This functions tries to determine what actions need to be taken by the |
| 565 | * driver to make system pages visible to the device. |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 566 | * If this function decides that DMA is not possible, it returns -EINVAL. |
| 567 | * The driver may then try to disable features of the device that require |
| 568 | * DMA. |
| 569 | */ |
| 570 | static int vmw_dma_select_mode(struct vmw_private *dev_priv) |
| 571 | { |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 572 | static const char *names[vmw_dma_map_max] = { |
| 573 | [vmw_dma_phys] = "Using physical TTM page addresses.", |
| 574 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
Thomas Hellstrom | 8110335 | 2019-04-23 14:02:57 +0200 | [diff] [blame] | 575 | [vmw_dma_map_populate] = "Caching DMA mappings.", |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 576 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 577 | |
| 578 | if (vmw_force_coherent) |
| 579 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
Thomas Hellstrom | 8110335 | 2019-04-23 14:02:57 +0200 | [diff] [blame] | 580 | else if (vmw_restrict_iommu) |
| 581 | dev_priv->map_mode = vmw_dma_map_bind; |
Christoph Hellwig | 05f9467 | 2019-01-05 09:01:08 +0100 | [diff] [blame] | 582 | else |
| 583 | dev_priv->map_mode = vmw_dma_map_populate; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 584 | |
Thomas Hellstrom | e2e9666 | 2019-11-13 19:02:42 +0100 | [diff] [blame] | 585 | if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) && |
Christoph Hellwig | 9b5bf24 | 2019-01-05 09:01:06 +0100 | [diff] [blame] | 586 | (dev_priv->map_mode == vmw_dma_alloc_coherent)) |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 587 | return -EINVAL; |
Thomas Hellstrom | e14cd95 | 2013-11-11 23:49:26 -0800 | [diff] [blame] | 588 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 589 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 590 | return 0; |
| 591 | } |
| 592 | |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 593 | /** |
| 594 | * vmw_dma_masks - set required page- and dma masks |
| 595 | * |
| 596 | * @dev: Pointer to struct drm-device |
| 597 | * |
| 598 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that |
| 599 | * restriction also for 64-bit systems. |
| 600 | */ |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 601 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
| 602 | { |
| 603 | struct drm_device *dev = dev_priv->dev; |
Thomas Hellstrom | 4cbfa1e | 2019-01-28 10:31:33 +0100 | [diff] [blame] | 604 | int ret = 0; |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 605 | |
Thomas Hellstrom | 4cbfa1e | 2019-01-28 10:31:33 +0100 | [diff] [blame] | 606 | ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); |
| 607 | if (dev_priv->map_mode != vmw_dma_phys && |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 608 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
| 609 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
Thomas Hellstrom | 4cbfa1e | 2019-01-28 10:31:33 +0100 | [diff] [blame] | 610 | return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 611 | } |
Thomas Hellstrom | 4cbfa1e | 2019-01-28 10:31:33 +0100 | [diff] [blame] | 612 | |
| 613 | return ret; |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 614 | } |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 615 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 616 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
| 617 | { |
| 618 | struct vmw_private *dev_priv; |
| 619 | int ret; |
Peter Hanzel | c188660 | 2010-01-30 03:38:07 +0000 | [diff] [blame] | 620 | uint32_t svga_id; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 621 | enum vmw_res_type i; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 622 | bool refuse_dma = false; |
Sinclair Yeh | f921791 | 2016-04-27 19:11:18 -0700 | [diff] [blame] | 623 | char host_log[100] = {0}; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 624 | |
| 625 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 626 | if (unlikely(!dev_priv)) { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 627 | DRM_ERROR("Failed allocating a device private struct.\n"); |
| 628 | return -ENOMEM; |
| 629 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 630 | |
Dave Airlie | 466e69b | 2011-12-19 11:15:29 +0000 | [diff] [blame] | 631 | pci_set_master(dev->pdev); |
| 632 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 633 | dev_priv->dev = dev; |
| 634 | dev_priv->vmw_chipset = chipset; |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 635 | dev_priv->last_read_seqno = (uint32_t) -100; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 636 | mutex_init(&dev_priv->cmdbuf_mutex); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 637 | mutex_init(&dev_priv->release_mutex); |
Thomas Hellstrom | 173fb7d | 2013-10-08 02:32:36 -0700 | [diff] [blame] | 638 | mutex_init(&dev_priv->binding_mutex); |
Thomas Hellstrom | 93cd168 | 2016-05-03 11:24:35 +0200 | [diff] [blame] | 639 | mutex_init(&dev_priv->global_kms_state_mutex); |
Thomas Hellstrom | 294adf7 | 2014-02-27 12:34:51 +0100 | [diff] [blame] | 640 | ttm_lock_init(&dev_priv->reservation_sem); |
Thomas Hellstrom | 1328924 | 2018-09-26 15:41:52 +0200 | [diff] [blame] | 641 | spin_lock_init(&dev_priv->resource_lock); |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 642 | spin_lock_init(&dev_priv->hw_lock); |
| 643 | spin_lock_init(&dev_priv->waiter_lock); |
| 644 | spin_lock_init(&dev_priv->cap_lock); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 645 | spin_lock_init(&dev_priv->svga_lock); |
Sinclair Yeh | 36cc79b | 2017-03-23 11:28:11 -0700 | [diff] [blame] | 646 | spin_lock_init(&dev_priv->cursor_lock); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 647 | |
| 648 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
| 649 | idr_init(&dev_priv->res_idr[i]); |
| 650 | INIT_LIST_HEAD(&dev_priv->res_lru[i]); |
| 651 | } |
| 652 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 653 | init_waitqueue_head(&dev_priv->fence_queue); |
| 654 | init_waitqueue_head(&dev_priv->fifo_queue); |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 655 | dev_priv->fence_queue_waiters = 0; |
Thomas Hellstrom | d2e8851 | 2015-10-28 19:07:35 +0100 | [diff] [blame] | 656 | dev_priv->fifo_queue_waiters = 0; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 657 | |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 658 | dev_priv->used_memory_size = 0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 659 | |
| 660 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
| 661 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
| 662 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
| 663 | |
Sinclair Yeh | 04319d8 | 2016-06-29 12:15:48 -0700 | [diff] [blame] | 664 | dev_priv->assume_16bpp = !!vmw_assume_16bpp; |
| 665 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 666 | dev_priv->enable_fb = enable_fbdev; |
| 667 | |
Peter Hanzel | c188660 | 2010-01-30 03:38:07 +0000 | [diff] [blame] | 668 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| 669 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
| 670 | if (svga_id != SVGA_ID_2) { |
| 671 | ret = -ENOSYS; |
Masanari Iida | 4962590 | 2012-02-05 22:50:36 +0900 | [diff] [blame] | 672 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
Peter Hanzel | c188660 | 2010-01-30 03:38:07 +0000 | [diff] [blame] | 673 | goto out_err0; |
| 674 | } |
| 675 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 676 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
Neha Bhende | 3b4c251 | 2018-06-18 16:44:48 -0700 | [diff] [blame] | 677 | |
| 678 | if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) { |
| 679 | dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2); |
| 680 | } |
| 681 | |
| 682 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 683 | ret = vmw_dma_select_mode(dev_priv); |
| 684 | if (unlikely(ret != 0)) { |
| 685 | DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); |
| 686 | refuse_dma = true; |
| 687 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 688 | |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 689 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
| 690 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
| 691 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); |
| 692 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 693 | |
| 694 | vmw_get_initial_size(dev_priv); |
| 695 | |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 696 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 697 | dev_priv->max_gmr_ids = |
| 698 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 699 | dev_priv->max_gmr_pages = |
| 700 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
| 701 | dev_priv->memory_size = |
| 702 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 703 | dev_priv->memory_size -= dev_priv->vram_size; |
| 704 | } else { |
| 705 | /* |
| 706 | * An arbitrary limit of 512MiB on surface |
| 707 | * memory. But all HWV8 hardware supports GMR2. |
| 708 | */ |
| 709 | dev_priv->memory_size = 512*1024*1024; |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 710 | } |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 711 | dev_priv->max_mob_pages = 0; |
Charmaine Lee | 857aea1 | 2014-02-12 12:07:38 +0100 | [diff] [blame] | 712 | dev_priv->max_mob_size = 0; |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 713 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
| 714 | uint64_t mem_size = |
| 715 | vmw_read(dev_priv, |
| 716 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); |
| 717 | |
Sinclair Yeh | 7c20d21 | 2016-06-29 11:29:47 -0700 | [diff] [blame] | 718 | /* |
| 719 | * Workaround for low memory 2D VMs to compensate for the |
| 720 | * allocation taken by fbdev |
| 721 | */ |
| 722 | if (!(dev_priv->capabilities & SVGA_CAP_3D)) |
Sinclair Yeh | cef7503 | 2017-11-01 10:47:05 -0700 | [diff] [blame] | 723 | mem_size *= 3; |
Sinclair Yeh | 7c20d21 | 2016-06-29 11:29:47 -0700 | [diff] [blame] | 724 | |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 725 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; |
Thomas Hellstrom | afb0e50 | 2012-11-21 11:09:56 +0100 | [diff] [blame] | 726 | dev_priv->prim_bb_mem = |
| 727 | vmw_read(dev_priv, |
| 728 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
Charmaine Lee | 857aea1 | 2014-02-12 12:07:38 +0100 | [diff] [blame] | 729 | dev_priv->max_mob_size = |
| 730 | vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 731 | dev_priv->stdu_max_width = |
| 732 | vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); |
| 733 | dev_priv->stdu_max_height = |
| 734 | vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); |
| 735 | |
| 736 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, |
| 737 | SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); |
| 738 | dev_priv->texture_max_width = vmw_read(dev_priv, |
| 739 | SVGA_REG_DEV_CAP); |
| 740 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, |
| 741 | SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); |
| 742 | dev_priv->texture_max_height = vmw_read(dev_priv, |
| 743 | SVGA_REG_DEV_CAP); |
Thomas Hellstrom | df45e9d | 2015-08-12 09:30:09 -0700 | [diff] [blame] | 744 | } else { |
| 745 | dev_priv->texture_max_width = 8192; |
| 746 | dev_priv->texture_max_height = 8192; |
Thomas Hellstrom | afb0e50 | 2012-11-21 11:09:56 +0100 | [diff] [blame] | 747 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
Thomas Hellstrom | df45e9d | 2015-08-12 09:30:09 -0700 | [diff] [blame] | 748 | } |
| 749 | |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 750 | vmw_print_capabilities(dev_priv->capabilities); |
Neha Bhende | 3b4c251 | 2018-06-18 16:44:48 -0700 | [diff] [blame] | 751 | if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) |
| 752 | vmw_print_capabilities2(dev_priv->capabilities2); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 753 | |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 754 | ret = vmw_dma_masks(dev_priv); |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 755 | if (unlikely(ret != 0)) |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 756 | goto out_err0; |
| 757 | |
Qian Cai | 3991689 | 2019-06-03 16:44:15 -0400 | [diff] [blame] | 758 | dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK, |
| 759 | SCATTERLIST_MAX_SEGMENT)); |
| 760 | |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 761 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 762 | DRM_INFO("Max GMR ids is %u\n", |
| 763 | (unsigned)dev_priv->max_gmr_ids); |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 764 | DRM_INFO("Max number of GMR pages is %u\n", |
| 765 | (unsigned)dev_priv->max_gmr_pages); |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 766 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
| 767 | (unsigned)dev_priv->memory_size / 1024); |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 768 | } |
Thomas Hellstrom | bc2d650 | 2012-11-21 10:32:36 +0100 | [diff] [blame] | 769 | DRM_INFO("Maximum display memory size is %u kiB\n", |
| 770 | dev_priv->prim_bb_mem / 1024); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 771 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
| 772 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
| 773 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
| 774 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); |
| 775 | |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 776 | dev_priv->mmio_virt = memremap(dev_priv->mmio_start, |
| 777 | dev_priv->mmio_size, MEMREMAP_WB); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 778 | |
| 779 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
| 780 | ret = -ENOMEM; |
| 781 | DRM_ERROR("Failed mapping MMIO.\n"); |
Christian König | a64f784 | 2018-10-19 16:55:26 +0200 | [diff] [blame] | 782 | goto out_err0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 783 | } |
| 784 | |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 785 | /* Need mmio memory to check for fifo pitchlock cap. */ |
| 786 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
| 787 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && |
| 788 | !vmw_fifo_have_pitchlock(dev_priv)) { |
| 789 | ret = -ENOSYS; |
| 790 | DRM_ERROR("Hardware has no pitchlock\n"); |
| 791 | goto out_err4; |
| 792 | } |
| 793 | |
Christian König | 27eb1fa | 2018-10-19 13:49:05 +0200 | [diff] [blame] | 794 | dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, |
| 795 | &vmw_prime_dmabuf_ops); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 796 | |
| 797 | if (unlikely(dev_priv->tdev == NULL)) { |
| 798 | DRM_ERROR("Unable to initialize TTM object management.\n"); |
| 799 | ret = -ENOMEM; |
| 800 | goto out_err4; |
| 801 | } |
| 802 | |
| 803 | dev->dev_private = dev_priv; |
| 804 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 805 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
| 806 | dev_priv->stealth = (ret != 0); |
| 807 | if (dev_priv->stealth) { |
| 808 | /** |
| 809 | * Request at least the mmio PCI resource. |
| 810 | */ |
| 811 | |
| 812 | DRM_INFO("It appears like vesafb is loaded. " |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 813 | "Ignore above error if any.\n"); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 814 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
| 815 | if (unlikely(ret != 0)) { |
| 816 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); |
| 817 | goto out_no_device; |
| 818 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 819 | } |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 820 | |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 821 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
Thomas Hellstrom | e300173 | 2017-08-24 08:06:27 +0200 | [diff] [blame] | 822 | ret = vmw_irq_install(dev, dev->pdev->irq); |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 823 | if (ret != 0) { |
| 824 | DRM_ERROR("Failed installing irq: %d\n", ret); |
| 825 | goto out_no_irq; |
| 826 | } |
| 827 | } |
| 828 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 829 | dev_priv->fman = vmw_fence_manager_init(dev_priv); |
Wei Yongjun | 14bbf20 | 2013-08-26 15:15:37 +0800 | [diff] [blame] | 830 | if (unlikely(dev_priv->fman == NULL)) { |
| 831 | ret = -ENOMEM; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 832 | goto out_no_fman; |
Wei Yongjun | 14bbf20 | 2013-08-26 15:15:37 +0800 | [diff] [blame] | 833 | } |
Jakob Bornecrantz | 56d1c78 | 2011-10-04 20:13:22 +0200 | [diff] [blame] | 834 | |
Gerd Hoffmann | 293f86b | 2019-09-05 09:05:08 +0200 | [diff] [blame] | 835 | drm_vma_offset_manager_init(&dev_priv->vma_manager, |
| 836 | DRM_FILE_PAGE_OFFSET_START, |
| 837 | DRM_FILE_PAGE_OFFSET_SIZE); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 838 | ret = ttm_bo_device_init(&dev_priv->bdev, |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 839 | &vmw_bo_driver, |
| 840 | dev->anon_inode->i_mapping, |
Gerd Hoffmann | 293f86b | 2019-09-05 09:05:08 +0200 | [diff] [blame] | 841 | &dev_priv->vma_manager, |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 842 | false); |
| 843 | if (unlikely(ret != 0)) { |
| 844 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); |
| 845 | goto out_no_bdev; |
| 846 | } |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 847 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 848 | /* |
| 849 | * Enable VRAM, but initially don't use it until SVGA is enabled and |
| 850 | * unhidden. |
| 851 | */ |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 852 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, |
| 853 | (dev_priv->vram_size >> PAGE_SHIFT)); |
| 854 | if (unlikely(ret != 0)) { |
| 855 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); |
| 856 | goto out_no_vram; |
| 857 | } |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 858 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 859 | |
| 860 | dev_priv->has_gmr = true; |
| 861 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
| 862 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
| 863 | VMW_PL_GMR) != 0) { |
| 864 | DRM_INFO("No GMR memory available. " |
| 865 | "Graphics memory resources are very limited.\n"); |
| 866 | dev_priv->has_gmr = false; |
| 867 | } |
| 868 | |
| 869 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
| 870 | dev_priv->has_mob = true; |
| 871 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, |
| 872 | VMW_PL_MOB) != 0) { |
| 873 | DRM_INFO("No MOB memory available. " |
| 874 | "3D will be disabled.\n"); |
| 875 | dev_priv->has_mob = false; |
| 876 | } |
| 877 | } |
| 878 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 879 | if (dev_priv->has_mob) { |
| 880 | spin_lock(&dev_priv->cap_lock); |
Deepak Rawat | dc75e73 | 2018-06-13 13:53:28 -0700 | [diff] [blame] | 881 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 882 | dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
| 883 | spin_unlock(&dev_priv->cap_lock); |
| 884 | } |
Jakob Bornecrantz | 56d1c78 | 2011-10-04 20:13:22 +0200 | [diff] [blame] | 885 | |
Thomas Hellstrom | fd56746 | 2018-12-12 11:52:08 +0100 | [diff] [blame] | 886 | vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); |
Thomas Hellstrom | 7a1c2f6 | 2010-10-01 10:21:49 +0200 | [diff] [blame] | 887 | ret = vmw_kms_init(dev_priv); |
| 888 | if (unlikely(ret != 0)) |
| 889 | goto out_no_kms; |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 890 | vmw_overlay_init(dev_priv); |
Jakob Bornecrantz | 56d1c78 | 2011-10-04 20:13:22 +0200 | [diff] [blame] | 891 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 892 | ret = vmw_request_device(dev_priv); |
| 893 | if (ret) |
| 894 | goto out_no_fifo; |
| 895 | |
Deepak Rawat | 30aeee6 | 2018-06-20 13:52:32 -0700 | [diff] [blame] | 896 | if (dev_priv->has_dx) { |
| 897 | /* |
| 898 | * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 |
| 899 | * support |
| 900 | */ |
| 901 | if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) { |
| 902 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, |
| 903 | SVGA3D_DEVCAP_SM41); |
| 904 | dev_priv->has_sm4_1 = vmw_read(dev_priv, |
| 905 | SVGA_REG_DEV_CAP); |
| 906 | } |
| 907 | } |
| 908 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 909 | DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); |
Deepak Rawat | 30aeee6 | 2018-06-20 13:52:32 -0700 | [diff] [blame] | 910 | DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC) |
| 911 | ? "yes." : "no."); |
| 912 | DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no."); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 913 | |
Sinclair Yeh | f921791 | 2016-04-27 19:11:18 -0700 | [diff] [blame] | 914 | snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", |
| 915 | VMWGFX_REPO, VMWGFX_GIT_VERSION); |
| 916 | vmw_host_log(host_log); |
| 917 | |
| 918 | memset(host_log, 0, sizeof(host_log)); |
| 919 | snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d", |
| 920 | VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, |
| 921 | VMWGFX_DRIVER_PATCHLEVEL); |
| 922 | vmw_host_log(host_log); |
| 923 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 924 | if (dev_priv->enable_fb) { |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 925 | vmw_fifo_resource_inc(dev_priv); |
| 926 | vmw_svga_enable(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 927 | vmw_fb_init(dev_priv); |
Thomas Hellstrom | 7a1c2f6 | 2010-10-01 10:21:49 +0200 | [diff] [blame] | 928 | } |
| 929 | |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 930 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
| 931 | register_pm_notifier(&dev_priv->pm_nb); |
| 932 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 933 | return 0; |
| 934 | |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 935 | out_no_fifo: |
Jakob Bornecrantz | 56d1c78 | 2011-10-04 20:13:22 +0200 | [diff] [blame] | 936 | vmw_overlay_close(dev_priv); |
| 937 | vmw_kms_close(dev_priv); |
| 938 | out_no_kms: |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 939 | if (dev_priv->has_mob) |
| 940 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
| 941 | if (dev_priv->has_gmr) |
| 942 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 943 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| 944 | out_no_vram: |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 945 | (void)ttm_bo_device_release(&dev_priv->bdev); |
| 946 | out_no_bdev: |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 947 | vmw_fence_manager_takedown(dev_priv->fman); |
| 948 | out_no_fman: |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 949 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
Thomas Hellstrom | e300173 | 2017-08-24 08:06:27 +0200 | [diff] [blame] | 950 | vmw_irq_uninstall(dev_priv->dev); |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 951 | out_no_irq: |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 952 | if (dev_priv->stealth) |
| 953 | pci_release_region(dev->pdev, 2); |
| 954 | else |
| 955 | pci_release_regions(dev->pdev); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 956 | out_no_device: |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 957 | ttm_object_device_release(&dev_priv->tdev); |
| 958 | out_err4: |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 959 | memunmap(dev_priv->mmio_virt); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 960 | out_err0: |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 961 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
| 962 | idr_destroy(&dev_priv->res_idr[i]); |
| 963 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 964 | if (dev_priv->ctx.staged_bindings) |
| 965 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 966 | kfree(dev_priv); |
| 967 | return ret; |
| 968 | } |
| 969 | |
Gabriel Krisman Bertazi | 11b3c20 | 2017-01-06 15:57:31 -0200 | [diff] [blame] | 970 | static void vmw_driver_unload(struct drm_device *dev) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 971 | { |
| 972 | struct vmw_private *dev_priv = vmw_priv(dev); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 973 | enum vmw_res_type i; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 974 | |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 975 | unregister_pm_notifier(&dev_priv->pm_nb); |
| 976 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 977 | if (dev_priv->ctx.res_ht_initialized) |
| 978 | drm_ht_remove(&dev_priv->ctx.res_ht); |
Markus Elfring | a3a1a66 | 2014-11-19 17:50:19 +0100 | [diff] [blame] | 979 | vfree(dev_priv->ctx.cmd_bounce); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 980 | if (dev_priv->enable_fb) { |
Sinclair Yeh | 05c9501 | 2015-08-11 22:53:39 -0700 | [diff] [blame] | 981 | vmw_fb_off(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 982 | vmw_fb_close(dev_priv); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 983 | vmw_fifo_resource_dec(dev_priv); |
| 984 | vmw_svga_disable(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 985 | } |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 986 | |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 987 | vmw_kms_close(dev_priv); |
| 988 | vmw_overlay_close(dev_priv); |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 989 | |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 990 | if (dev_priv->has_gmr) |
| 991 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 992 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| 993 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 994 | vmw_release_device_early(dev_priv); |
| 995 | if (dev_priv->has_mob) |
| 996 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
| 997 | (void) ttm_bo_device_release(&dev_priv->bdev); |
Gerd Hoffmann | 293f86b | 2019-09-05 09:05:08 +0200 | [diff] [blame] | 998 | drm_vma_offset_manager_destroy(&dev_priv->vma_manager); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 999 | vmw_release_device_late(dev_priv); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1000 | vmw_fence_manager_takedown(dev_priv->fman); |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 1001 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
Thomas Hellstrom | e300173 | 2017-08-24 08:06:27 +0200 | [diff] [blame] | 1002 | vmw_irq_uninstall(dev_priv->dev); |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 1003 | if (dev_priv->stealth) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1004 | pci_release_region(dev->pdev, 2); |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 1005 | else |
| 1006 | pci_release_regions(dev->pdev); |
| 1007 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1008 | ttm_object_device_release(&dev_priv->tdev); |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 1009 | memunmap(dev_priv->mmio_virt); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1010 | if (dev_priv->ctx.staged_bindings) |
| 1011 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 1012 | |
| 1013 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
| 1014 | idr_destroy(&dev_priv->res_idr[i]); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1015 | |
| 1016 | kfree(dev_priv); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1017 | } |
| 1018 | |
| 1019 | static void vmw_postclose(struct drm_device *dev, |
| 1020 | struct drm_file *file_priv) |
| 1021 | { |
Thomas Hellstrom | 9c84aeb | 2019-05-28 08:08:55 +0200 | [diff] [blame] | 1022 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
Thomas Hellstrom | c424985 | 2013-10-09 01:42:51 -0700 | [diff] [blame] | 1023 | |
| 1024 | ttm_object_file_release(&vmw_fp->tfile); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1025 | kfree(vmw_fp); |
| 1026 | } |
| 1027 | |
| 1028 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) |
| 1029 | { |
| 1030 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1031 | struct vmw_fpriv *vmw_fp; |
| 1032 | int ret = -ENOMEM; |
| 1033 | |
| 1034 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 1035 | if (unlikely(!vmw_fp)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1036 | return ret; |
| 1037 | |
| 1038 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
| 1039 | if (unlikely(vmw_fp->tfile == NULL)) |
| 1040 | goto out_no_tfile; |
| 1041 | |
| 1042 | file_priv->driver_priv = vmw_fp; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1043 | |
| 1044 | return 0; |
| 1045 | |
| 1046 | out_no_tfile: |
| 1047 | kfree(vmw_fp); |
| 1048 | return ret; |
| 1049 | } |
| 1050 | |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1051 | static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, |
| 1052 | unsigned long arg, |
| 1053 | long (*ioctl_func)(struct file *, unsigned int, |
| 1054 | unsigned long)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1055 | { |
| 1056 | struct drm_file *file_priv = filp->private_data; |
| 1057 | struct drm_device *dev = file_priv->minor->dev; |
| 1058 | unsigned int nr = DRM_IOCTL_NR(cmd); |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1059 | unsigned int flags; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1060 | |
| 1061 | /* |
Thomas Hellstrom | e1f7800 | 2009-12-08 12:57:51 +0100 | [diff] [blame] | 1062 | * Do extra checking on driver private ioctls. |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1063 | */ |
| 1064 | |
| 1065 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) |
| 1066 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
Rob Clark | baa7094 | 2013-08-02 13:27:49 -0400 | [diff] [blame] | 1067 | const struct drm_ioctl_desc *ioctl = |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1068 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1069 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1070 | if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { |
Emil Velikov | cbfbe47 | 2019-05-22 17:41:17 +0100 | [diff] [blame] | 1071 | return ioctl_func(filp, cmd, arg); |
Thomas Hellstrom | 31788ca | 2017-02-21 17:42:27 +0700 | [diff] [blame] | 1072 | } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { |
| 1073 | if (!drm_is_current_master(file_priv) && |
| 1074 | !capable(CAP_SYS_ADMIN)) |
| 1075 | return -EACCES; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1076 | } |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1077 | |
| 1078 | if (unlikely(ioctl->cmd != cmd)) |
| 1079 | goto out_io_encoding; |
| 1080 | |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1081 | flags = ioctl->flags; |
| 1082 | } else if (!drm_ioctl_flags(nr, &flags)) |
| 1083 | return -EINVAL; |
| 1084 | |
Thomas Hellstrom | 9c84aeb | 2019-05-28 08:08:55 +0200 | [diff] [blame] | 1085 | return ioctl_func(filp, cmd, arg); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1086 | |
| 1087 | out_io_encoding: |
| 1088 | DRM_ERROR("Invalid command format, ioctl %d\n", |
| 1089 | nr - DRM_COMMAND_BASE); |
| 1090 | |
| 1091 | return -EINVAL; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1092 | } |
| 1093 | |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1094 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, |
| 1095 | unsigned long arg) |
| 1096 | { |
| 1097 | return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); |
| 1098 | } |
| 1099 | |
| 1100 | #ifdef CONFIG_COMPAT |
| 1101 | static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, |
| 1102 | unsigned long arg) |
| 1103 | { |
| 1104 | return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); |
| 1105 | } |
| 1106 | #endif |
| 1107 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1108 | static int vmw_master_set(struct drm_device *dev, |
| 1109 | struct drm_file *file_priv, |
| 1110 | bool from_open) |
| 1111 | { |
Thomas Hellstrom | 63cb444 | 2019-05-07 11:07:53 +0200 | [diff] [blame] | 1112 | /* |
| 1113 | * Inform a new master that the layout may have changed while |
| 1114 | * it was gone. |
| 1115 | */ |
| 1116 | if (!from_open) |
| 1117 | drm_sysfs_hotplug_event(dev); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1118 | |
| 1119 | return 0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1120 | } |
| 1121 | |
| 1122 | static void vmw_master_drop(struct drm_device *dev, |
Daniel Vetter | d6ed682 | 2016-06-21 14:20:38 +0200 | [diff] [blame] | 1123 | struct drm_file *file_priv) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1124 | { |
| 1125 | struct vmw_private *dev_priv = vmw_priv(dev); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1126 | |
Thomas Hellstrom | 8fbf9d9 | 2015-11-26 19:45:16 +0100 | [diff] [blame] | 1127 | vmw_kms_legacy_hotspot_clear(dev_priv); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1128 | if (!dev_priv->enable_fb) |
| 1129 | vmw_svga_disable(dev_priv); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1130 | } |
| 1131 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1132 | /** |
| 1133 | * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. |
| 1134 | * |
| 1135 | * @dev_priv: Pointer to device private struct. |
| 1136 | * Needs the reservation sem to be held in non-exclusive mode. |
| 1137 | */ |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 1138 | static void __vmw_svga_enable(struct vmw_private *dev_priv) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1139 | { |
| 1140 | spin_lock(&dev_priv->svga_lock); |
| 1141 | if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1142 | vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); |
| 1143 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = true; |
| 1144 | } |
| 1145 | spin_unlock(&dev_priv->svga_lock); |
| 1146 | } |
| 1147 | |
| 1148 | /** |
| 1149 | * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. |
| 1150 | * |
| 1151 | * @dev_priv: Pointer to device private struct. |
| 1152 | */ |
| 1153 | void vmw_svga_enable(struct vmw_private *dev_priv) |
| 1154 | { |
Thomas Hellstrom | f08c86c | 2017-01-19 10:57:00 -0800 | [diff] [blame] | 1155 | (void) ttm_read_lock(&dev_priv->reservation_sem, false); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1156 | __vmw_svga_enable(dev_priv); |
| 1157 | ttm_read_unlock(&dev_priv->reservation_sem); |
| 1158 | } |
| 1159 | |
| 1160 | /** |
| 1161 | * __vmw_svga_disable - Disable SVGA mode and use of VRAM. |
| 1162 | * |
| 1163 | * @dev_priv: Pointer to device private struct. |
| 1164 | * Needs the reservation sem to be held in exclusive mode. |
| 1165 | * Will not empty VRAM. VRAM must be emptied by caller. |
| 1166 | */ |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 1167 | static void __vmw_svga_disable(struct vmw_private *dev_priv) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1168 | { |
| 1169 | spin_lock(&dev_priv->svga_lock); |
| 1170 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1171 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
| 1172 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
Sinclair Yeh | 8ce75f8 | 2015-07-08 21:20:39 -0700 | [diff] [blame] | 1173 | SVGA_REG_ENABLE_HIDE | |
| 1174 | SVGA_REG_ENABLE_ENABLE); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1175 | } |
| 1176 | spin_unlock(&dev_priv->svga_lock); |
| 1177 | } |
| 1178 | |
| 1179 | /** |
| 1180 | * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo |
| 1181 | * running. |
| 1182 | * |
| 1183 | * @dev_priv: Pointer to device private struct. |
| 1184 | * Will empty VRAM. |
| 1185 | */ |
| 1186 | void vmw_svga_disable(struct vmw_private *dev_priv) |
| 1187 | { |
Thomas Hellstrom | 140bcaa | 2018-03-08 10:07:37 +0100 | [diff] [blame] | 1188 | /* |
| 1189 | * Disabling SVGA will turn off device modesetting capabilities, so |
| 1190 | * notify KMS about that so that it doesn't cache atomic state that |
| 1191 | * isn't valid anymore, for example crtcs turned on. |
| 1192 | * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), |
| 1193 | * but vmw_kms_lost_device() takes the reservation sem and thus we'll |
| 1194 | * end up with lock order reversal. Thus, a master may actually perform |
| 1195 | * a new modeset just after we call vmw_kms_lost_device() and race with |
| 1196 | * vmw_svga_disable(), but that should at worst cause atomic KMS state |
| 1197 | * to be inconsistent with the device, causing modesetting problems. |
| 1198 | * |
| 1199 | */ |
| 1200 | vmw_kms_lost_device(dev_priv->dev); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1201 | ttm_write_lock(&dev_priv->reservation_sem, false); |
| 1202 | spin_lock(&dev_priv->svga_lock); |
| 1203 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1204 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1205 | spin_unlock(&dev_priv->svga_lock); |
| 1206 | if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) |
| 1207 | DRM_ERROR("Failed evicting VRAM buffers.\n"); |
Sinclair Yeh | 8ce75f8 | 2015-07-08 21:20:39 -0700 | [diff] [blame] | 1208 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
| 1209 | SVGA_REG_ENABLE_HIDE | |
| 1210 | SVGA_REG_ENABLE_ENABLE); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1211 | } else |
| 1212 | spin_unlock(&dev_priv->svga_lock); |
| 1213 | ttm_write_unlock(&dev_priv->reservation_sem); |
| 1214 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1215 | |
| 1216 | static void vmw_remove(struct pci_dev *pdev) |
| 1217 | { |
| 1218 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1219 | |
Thomas Zimmermann | 36891da | 2019-12-10 13:43:22 +0100 | [diff] [blame] | 1220 | drm_dev_unregister(dev); |
| 1221 | vmw_driver_unload(dev); |
| 1222 | drm_dev_put(dev); |
Thomas Hellstrom | fd3e4d6 | 2015-03-10 11:07:40 -0700 | [diff] [blame] | 1223 | pci_disable_device(pdev); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1224 | } |
| 1225 | |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1226 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
| 1227 | void *ptr) |
| 1228 | { |
| 1229 | struct vmw_private *dev_priv = |
| 1230 | container_of(nb, struct vmw_private, pm_nb); |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1231 | |
| 1232 | switch (val) { |
| 1233 | case PM_HIBERNATION_PREPARE: |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1234 | /* |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1235 | * Take the reservation sem in write mode, which will make sure |
| 1236 | * there are no other processes holding a buffer object |
| 1237 | * reservation, meaning we should be able to evict all buffer |
| 1238 | * objects if needed. |
| 1239 | * Once user-space processes have been frozen, we can release |
| 1240 | * the lock again. |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1241 | */ |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1242 | ttm_suspend_lock(&dev_priv->reservation_sem); |
| 1243 | dev_priv->suspend_locked = true; |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1244 | break; |
| 1245 | case PM_POST_HIBERNATION: |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 1246 | case PM_POST_RESTORE: |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1247 | if (READ_ONCE(dev_priv->suspend_locked)) { |
| 1248 | dev_priv->suspend_locked = false; |
| 1249 | ttm_suspend_unlock(&dev_priv->reservation_sem); |
| 1250 | } |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1251 | break; |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1252 | default: |
| 1253 | break; |
| 1254 | } |
| 1255 | return 0; |
| 1256 | } |
| 1257 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1258 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1259 | { |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 1260 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1261 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1262 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1263 | if (dev_priv->refuse_hibernation) |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 1264 | return -EBUSY; |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 1265 | |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1266 | pci_save_state(pdev); |
| 1267 | pci_disable_device(pdev); |
| 1268 | pci_set_power_state(pdev, PCI_D3hot); |
| 1269 | return 0; |
| 1270 | } |
| 1271 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1272 | static int vmw_pci_resume(struct pci_dev *pdev) |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1273 | { |
| 1274 | pci_set_power_state(pdev, PCI_D0); |
| 1275 | pci_restore_state(pdev); |
| 1276 | return pci_enable_device(pdev); |
| 1277 | } |
| 1278 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1279 | static int vmw_pm_suspend(struct device *kdev) |
| 1280 | { |
| 1281 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1282 | struct pm_message dummy; |
| 1283 | |
| 1284 | dummy.event = 0; |
| 1285 | |
| 1286 | return vmw_pci_suspend(pdev, dummy); |
| 1287 | } |
| 1288 | |
| 1289 | static int vmw_pm_resume(struct device *kdev) |
| 1290 | { |
| 1291 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1292 | |
| 1293 | return vmw_pci_resume(pdev); |
| 1294 | } |
| 1295 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1296 | static int vmw_pm_freeze(struct device *kdev) |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1297 | { |
| 1298 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1299 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1300 | struct vmw_private *dev_priv = vmw_priv(dev); |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1301 | int ret; |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1302 | |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1303 | /* |
| 1304 | * Unlock for vmw_kms_suspend. |
| 1305 | * No user-space processes should be running now. |
| 1306 | */ |
| 1307 | ttm_suspend_unlock(&dev_priv->reservation_sem); |
| 1308 | ret = vmw_kms_suspend(dev_priv->dev); |
| 1309 | if (ret) { |
| 1310 | ttm_suspend_lock(&dev_priv->reservation_sem); |
| 1311 | DRM_ERROR("Failed to freeze modesetting.\n"); |
| 1312 | return ret; |
| 1313 | } |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1314 | if (dev_priv->enable_fb) |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1315 | vmw_fb_off(dev_priv); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1316 | |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1317 | ttm_suspend_lock(&dev_priv->reservation_sem); |
| 1318 | vmw_execbuf_release_pinned_bo(dev_priv); |
| 1319 | vmw_resource_evict_all(dev_priv); |
| 1320 | vmw_release_device_early(dev_priv); |
| 1321 | ttm_bo_swapout_all(&dev_priv->bdev); |
| 1322 | if (dev_priv->enable_fb) |
| 1323 | vmw_fifo_resource_dec(dev_priv); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1324 | if (atomic_read(&dev_priv->num_fifo_resources) != 0) { |
| 1325 | DRM_ERROR("Can't hibernate while 3D resources are active.\n"); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1326 | if (dev_priv->enable_fb) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1327 | vmw_fifo_resource_inc(dev_priv); |
| 1328 | WARN_ON(vmw_request_device_late(dev_priv)); |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1329 | dev_priv->suspend_locked = false; |
| 1330 | ttm_suspend_unlock(&dev_priv->reservation_sem); |
| 1331 | if (dev_priv->suspend_state) |
| 1332 | vmw_kms_resume(dev); |
| 1333 | if (dev_priv->enable_fb) |
| 1334 | vmw_fb_on(dev_priv); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1335 | return -EBUSY; |
| 1336 | } |
| 1337 | |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1338 | vmw_fence_fifo_down(dev_priv->fman); |
| 1339 | __vmw_svga_disable(dev_priv); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1340 | |
| 1341 | vmw_release_device_late(dev_priv); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1342 | return 0; |
| 1343 | } |
| 1344 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1345 | static int vmw_pm_restore(struct device *kdev) |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1346 | { |
| 1347 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1348 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1349 | struct vmw_private *dev_priv = vmw_priv(dev); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1350 | int ret; |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1351 | |
Thomas Hellstrom | 95e8f6a | 2012-11-09 10:05:57 +0100 | [diff] [blame] | 1352 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| 1353 | (void) vmw_read(dev_priv, SVGA_REG_ID); |
Thomas Hellstrom | 95e8f6a | 2012-11-09 10:05:57 +0100 | [diff] [blame] | 1354 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1355 | if (dev_priv->enable_fb) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1356 | vmw_fifo_resource_inc(dev_priv); |
| 1357 | |
| 1358 | ret = vmw_request_device(dev_priv); |
| 1359 | if (ret) |
| 1360 | return ret; |
| 1361 | |
| 1362 | if (dev_priv->enable_fb) |
| 1363 | __vmw_svga_enable(dev_priv); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1364 | |
Thomas Hellstrom | c3b9b16 | 2018-03-22 10:26:37 +0100 | [diff] [blame] | 1365 | vmw_fence_fifo_up(dev_priv->fman); |
| 1366 | dev_priv->suspend_locked = false; |
| 1367 | ttm_suspend_unlock(&dev_priv->reservation_sem); |
| 1368 | if (dev_priv->suspend_state) |
| 1369 | vmw_kms_resume(dev_priv->dev); |
| 1370 | |
| 1371 | if (dev_priv->enable_fb) |
| 1372 | vmw_fb_on(dev_priv); |
| 1373 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1374 | return 0; |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1375 | } |
| 1376 | |
| 1377 | static const struct dev_pm_ops vmw_pm_ops = { |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1378 | .freeze = vmw_pm_freeze, |
| 1379 | .thaw = vmw_pm_restore, |
| 1380 | .restore = vmw_pm_restore, |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1381 | .suspend = vmw_pm_suspend, |
| 1382 | .resume = vmw_pm_resume, |
| 1383 | }; |
| 1384 | |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1385 | static const struct file_operations vmwgfx_driver_fops = { |
| 1386 | .owner = THIS_MODULE, |
| 1387 | .open = drm_open, |
| 1388 | .release = drm_release, |
| 1389 | .unlocked_ioctl = vmw_unlocked_ioctl, |
| 1390 | .mmap = vmw_mmap, |
| 1391 | .poll = vmw_fops_poll, |
| 1392 | .read = vmw_fops_read, |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1393 | #if defined(CONFIG_COMPAT) |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1394 | .compat_ioctl = vmw_compat_ioctl, |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1395 | #endif |
| 1396 | .llseek = noop_llseek, |
| 1397 | }; |
| 1398 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1399 | static struct drm_driver driver = { |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 1400 | .driver_features = |
Daniel Vetter | 0424fda | 2019-06-17 17:39:24 +0200 | [diff] [blame] | 1401 | DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, |
Thomas Hellstrom | 7a1c2f6 | 2010-10-01 10:21:49 +0200 | [diff] [blame] | 1402 | .get_vblank_counter = vmw_get_vblank_counter, |
Jakob Bornecrantz | 1c482ab | 2011-10-17 11:59:45 +0200 | [diff] [blame] | 1403 | .enable_vblank = vmw_enable_vblank, |
| 1404 | .disable_vblank = vmw_disable_vblank, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1405 | .ioctls = vmw_ioctls, |
Damien Lespiau | f95aeb1 | 2014-06-09 14:39:49 +0100 | [diff] [blame] | 1406 | .num_ioctls = ARRAY_SIZE(vmw_ioctls), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1407 | .master_set = vmw_master_set, |
| 1408 | .master_drop = vmw_master_drop, |
| 1409 | .open = vmw_driver_open, |
| 1410 | .postclose = vmw_postclose, |
Dave Airlie | 5e1782d | 2012-08-28 01:53:54 +0000 | [diff] [blame] | 1411 | |
| 1412 | .dumb_create = vmw_dumb_create, |
| 1413 | .dumb_map_offset = vmw_dumb_map_offset, |
| 1414 | .dumb_destroy = vmw_dumb_destroy, |
| 1415 | |
Thomas Hellstrom | 69977ff | 2013-11-13 01:50:46 -0800 | [diff] [blame] | 1416 | .prime_fd_to_handle = vmw_prime_fd_to_handle, |
| 1417 | .prime_handle_to_fd = vmw_prime_handle_to_fd, |
| 1418 | |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1419 | .fops = &vmwgfx_driver_fops, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1420 | .name = VMWGFX_DRIVER_NAME, |
| 1421 | .desc = VMWGFX_DRIVER_DESC, |
| 1422 | .date = VMWGFX_DRIVER_DATE, |
| 1423 | .major = VMWGFX_DRIVER_MAJOR, |
| 1424 | .minor = VMWGFX_DRIVER_MINOR, |
| 1425 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL |
| 1426 | }; |
| 1427 | |
Dave Airlie | 8410ea3 | 2010-12-15 03:16:38 +1000 | [diff] [blame] | 1428 | static struct pci_driver vmw_pci_driver = { |
| 1429 | .name = VMWGFX_DRIVER_NAME, |
| 1430 | .id_table = vmw_pci_id_list, |
| 1431 | .probe = vmw_probe, |
| 1432 | .remove = vmw_remove, |
| 1433 | .driver = { |
| 1434 | .pm = &vmw_pm_ops |
| 1435 | } |
| 1436 | }; |
| 1437 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1438 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 1439 | { |
Thomas Zimmermann | 36891da | 2019-12-10 13:43:22 +0100 | [diff] [blame] | 1440 | struct drm_device *dev; |
| 1441 | int ret; |
| 1442 | |
| 1443 | ret = pci_enable_device(pdev); |
| 1444 | if (ret) |
| 1445 | return ret; |
| 1446 | |
| 1447 | dev = drm_dev_alloc(&driver, &pdev->dev); |
| 1448 | if (IS_ERR(dev)) { |
| 1449 | ret = PTR_ERR(dev); |
| 1450 | goto err_pci_disable_device; |
| 1451 | } |
| 1452 | |
| 1453 | dev->pdev = pdev; |
| 1454 | pci_set_drvdata(pdev, dev); |
| 1455 | |
| 1456 | ret = vmw_driver_load(dev, ent->driver_data); |
| 1457 | if (ret) |
| 1458 | goto err_drm_dev_put; |
| 1459 | |
| 1460 | ret = drm_dev_register(dev, ent->driver_data); |
| 1461 | if (ret) |
| 1462 | goto err_vmw_driver_unload; |
| 1463 | |
| 1464 | return 0; |
| 1465 | |
| 1466 | err_vmw_driver_unload: |
| 1467 | vmw_driver_unload(dev); |
| 1468 | err_drm_dev_put: |
| 1469 | drm_dev_put(dev); |
| 1470 | err_pci_disable_device: |
| 1471 | pci_disable_device(pdev); |
| 1472 | return ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1473 | } |
| 1474 | |
| 1475 | static int __init vmwgfx_init(void) |
| 1476 | { |
| 1477 | int ret; |
Rob Clark | 96c5d07 | 2014-10-15 15:00:47 -0400 | [diff] [blame] | 1478 | |
Rob Clark | 96c5d07 | 2014-10-15 15:00:47 -0400 | [diff] [blame] | 1479 | if (vgacon_text_force()) |
| 1480 | return -EINVAL; |
Rob Clark | 96c5d07 | 2014-10-15 15:00:47 -0400 | [diff] [blame] | 1481 | |
Daniel Vetter | 10631d7 | 2017-05-24 16:51:40 +0200 | [diff] [blame] | 1482 | ret = pci_register_driver(&vmw_pci_driver); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1483 | if (ret) |
| 1484 | DRM_ERROR("Failed initializing DRM.\n"); |
| 1485 | return ret; |
| 1486 | } |
| 1487 | |
| 1488 | static void __exit vmwgfx_exit(void) |
| 1489 | { |
Daniel Vetter | 10631d7 | 2017-05-24 16:51:40 +0200 | [diff] [blame] | 1490 | pci_unregister_driver(&vmw_pci_driver); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1491 | } |
| 1492 | |
| 1493 | module_init(vmwgfx_init); |
| 1494 | module_exit(vmwgfx_exit); |
| 1495 | |
| 1496 | MODULE_AUTHOR("VMware Inc. and others"); |
| 1497 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
| 1498 | MODULE_LICENSE("GPL and additional rights"); |
Thomas Hellstrom | 73558ea | 2010-10-05 12:43:07 +0200 | [diff] [blame] | 1499 | MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." |
| 1500 | __stringify(VMWGFX_DRIVER_MINOR) "." |
| 1501 | __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." |
| 1502 | "0"); |