Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
Sinclair Yeh | 54fbde8 | 2015-07-29 12:38:02 -0700 | [diff] [blame] | 3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
Paul Gortmaker | e0cd360 | 2011-08-30 11:04:30 -0400 | [diff] [blame] | 27 | #include <linux/module.h> |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 28 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/drmP.h> |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 30 | #include "vmwgfx_drv.h" |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 31 | #include "vmwgfx_binding.h" |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 32 | #include <drm/ttm/ttm_placement.h> |
| 33 | #include <drm/ttm/ttm_bo_driver.h> |
| 34 | #include <drm/ttm/ttm_object.h> |
| 35 | #include <drm/ttm/ttm_module.h> |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 36 | #include <linux/dma_remapping.h> |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 37 | |
| 38 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
| 39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
| 40 | #define VMWGFX_CHIP_SVGAII 0 |
| 41 | #define VMW_FB_RESERVATION 0 |
| 42 | |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 43 | #define VMW_MIN_INITIAL_WIDTH 800 |
| 44 | #define VMW_MIN_INITIAL_HEIGHT 600 |
| 45 | |
| 46 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 47 | /** |
| 48 | * Fully encoded drm commands. Might move to vmw_drm.h |
| 49 | */ |
| 50 | |
| 51 | #define DRM_IOCTL_VMW_GET_PARAM \ |
| 52 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ |
| 53 | struct drm_vmw_getparam_arg) |
| 54 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ |
| 55 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ |
| 56 | union drm_vmw_alloc_dmabuf_arg) |
| 57 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ |
| 58 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ |
| 59 | struct drm_vmw_unref_dmabuf_arg) |
| 60 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ |
| 61 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ |
| 62 | struct drm_vmw_cursor_bypass_arg) |
| 63 | |
| 64 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ |
| 65 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ |
| 66 | struct drm_vmw_control_stream_arg) |
| 67 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ |
| 68 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ |
| 69 | struct drm_vmw_stream_arg) |
| 70 | #define DRM_IOCTL_VMW_UNREF_STREAM \ |
| 71 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ |
| 72 | struct drm_vmw_stream_arg) |
| 73 | |
| 74 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ |
| 75 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ |
| 76 | struct drm_vmw_context_arg) |
| 77 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ |
| 78 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ |
| 79 | struct drm_vmw_context_arg) |
| 80 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ |
| 81 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ |
| 82 | union drm_vmw_surface_create_arg) |
| 83 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ |
| 84 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ |
| 85 | struct drm_vmw_surface_arg) |
| 86 | #define DRM_IOCTL_VMW_REF_SURFACE \ |
| 87 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ |
| 88 | union drm_vmw_surface_reference_arg) |
| 89 | #define DRM_IOCTL_VMW_EXECBUF \ |
| 90 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ |
| 91 | struct drm_vmw_execbuf_arg) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 92 | #define DRM_IOCTL_VMW_GET_3D_CAP \ |
| 93 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ |
| 94 | struct drm_vmw_get_3d_cap_arg) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 95 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
| 96 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
| 97 | struct drm_vmw_fence_wait_arg) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 98 | #define DRM_IOCTL_VMW_FENCE_SIGNALED \ |
| 99 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ |
| 100 | struct drm_vmw_fence_signaled_arg) |
| 101 | #define DRM_IOCTL_VMW_FENCE_UNREF \ |
| 102 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ |
| 103 | struct drm_vmw_fence_arg) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 104 | #define DRM_IOCTL_VMW_FENCE_EVENT \ |
| 105 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ |
| 106 | struct drm_vmw_fence_event_arg) |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 107 | #define DRM_IOCTL_VMW_PRESENT \ |
| 108 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ |
| 109 | struct drm_vmw_present_arg) |
| 110 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ |
| 111 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ |
| 112 | struct drm_vmw_present_readback_arg) |
Thomas Hellstrom | cd2b89e | 2011-10-25 23:35:53 +0200 | [diff] [blame] | 113 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
| 114 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
| 115 | struct drm_vmw_update_layout_arg) |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 116 | #define DRM_IOCTL_VMW_CREATE_SHADER \ |
| 117 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ |
| 118 | struct drm_vmw_shader_create_arg) |
| 119 | #define DRM_IOCTL_VMW_UNREF_SHADER \ |
| 120 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ |
| 121 | struct drm_vmw_shader_arg) |
Thomas Hellstrom | a97e219 | 2012-11-21 11:45:13 +0100 | [diff] [blame] | 122 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ |
| 123 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ |
| 124 | union drm_vmw_gb_surface_create_arg) |
| 125 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ |
| 126 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ |
| 127 | union drm_vmw_gb_surface_reference_arg) |
Thomas Hellstrom | 1d7a5cb | 2012-11-21 12:32:19 +0100 | [diff] [blame] | 128 | #define DRM_IOCTL_VMW_SYNCCPU \ |
| 129 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ |
| 130 | struct drm_vmw_synccpu_arg) |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 131 | #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ |
| 132 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ |
| 133 | struct drm_vmw_context_arg) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 134 | |
| 135 | /** |
| 136 | * The core DRM version of this macro doesn't account for |
| 137 | * DRM_COMMAND_BASE. |
| 138 | */ |
| 139 | |
| 140 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ |
Ville Syrjälä | 7e7392a | 2015-03-27 15:51:56 +0200 | [diff] [blame] | 141 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 142 | |
| 143 | /** |
| 144 | * Ioctl definitions. |
| 145 | */ |
| 146 | |
Rob Clark | baa7094 | 2013-08-02 13:27:49 -0400 | [diff] [blame] | 147 | static const struct drm_ioctl_desc vmw_ioctls[] = { |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 148 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 149 | DRM_AUTH | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 150 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 151 | DRM_AUTH | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 152 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 153 | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 154 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
Thomas Hellstrom | e1f7800 | 2009-12-08 12:57:51 +0100 | [diff] [blame] | 155 | vmw_kms_cursor_bypass_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 156 | DRM_MASTER | DRM_CONTROL_ALLOW), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 157 | |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 158 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 159 | DRM_MASTER | DRM_CONTROL_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 160 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 161 | DRM_MASTER | DRM_CONTROL_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 162 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 163 | DRM_MASTER | DRM_CONTROL_ALLOW), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 164 | |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 165 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 166 | DRM_AUTH | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 167 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 168 | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 169 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 170 | DRM_AUTH | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 171 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 172 | DRM_RENDER_ALLOW), |
Dave Airlie | 1b2f148 | 2010-08-14 20:20:34 +1000 | [diff] [blame] | 173 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 174 | DRM_AUTH | DRM_RENDER_ALLOW), |
| 175 | VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 176 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 177 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 178 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 179 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
| 180 | vmw_fence_obj_signaled_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 181 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 182 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 183 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | 03f8026 | 2014-03-20 13:06:34 +0100 | [diff] [blame] | 184 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 185 | DRM_AUTH | DRM_RENDER_ALLOW), |
Thomas Hellstrom | f63f6a5 | 2011-09-01 20:18:41 +0000 | [diff] [blame] | 186 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 187 | DRM_AUTH | DRM_RENDER_ALLOW), |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 188 | |
| 189 | /* these allow direct access to the framebuffers mark as master only */ |
| 190 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 191 | DRM_MASTER | DRM_AUTH), |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 192 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, |
| 193 | vmw_present_readback_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 194 | DRM_MASTER | DRM_AUTH), |
Thomas Hellstrom | cd2b89e | 2011-10-25 23:35:53 +0200 | [diff] [blame] | 195 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
| 196 | vmw_kms_update_layout_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 197 | DRM_MASTER), |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 198 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, |
| 199 | vmw_shader_define_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 200 | DRM_AUTH | DRM_RENDER_ALLOW), |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 201 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, |
| 202 | vmw_shader_destroy_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 203 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | a97e219 | 2012-11-21 11:45:13 +0100 | [diff] [blame] | 204 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, |
| 205 | vmw_gb_surface_define_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 206 | DRM_AUTH | DRM_RENDER_ALLOW), |
Thomas Hellstrom | a97e219 | 2012-11-21 11:45:13 +0100 | [diff] [blame] | 207 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, |
| 208 | vmw_gb_surface_reference_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 209 | DRM_AUTH | DRM_RENDER_ALLOW), |
Thomas Hellstrom | 1d7a5cb | 2012-11-21 12:32:19 +0100 | [diff] [blame] | 210 | VMW_IOCTL_DEF(VMW_SYNCCPU, |
| 211 | vmw_user_dmabuf_synccpu_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 212 | DRM_RENDER_ALLOW), |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 213 | VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, |
| 214 | vmw_extended_context_define_ioctl, |
Daniel Vetter | f8c4714 | 2015-09-08 13:56:30 +0200 | [diff] [blame] | 215 | DRM_AUTH | DRM_RENDER_ALLOW), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 216 | }; |
| 217 | |
| 218 | static struct pci_device_id vmw_pci_id_list[] = { |
| 219 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, |
| 220 | {0, 0, 0} |
| 221 | }; |
Dave Airlie | c490342 | 2012-08-28 21:40:51 -0400 | [diff] [blame] | 222 | MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 223 | |
Dave Airlie | 5d2afab | 2012-08-28 21:38:49 -0400 | [diff] [blame] | 224 | static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 225 | static int vmw_force_iommu; |
| 226 | static int vmw_restrict_iommu; |
| 227 | static int vmw_force_coherent; |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 228 | static int vmw_restrict_dma_mask; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 229 | |
| 230 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
| 231 | static void vmw_master_init(struct vmw_master *); |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 232 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
| 233 | void *ptr); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 234 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 235 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
| 236 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 237 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
| 238 | module_param_named(force_dma_api, vmw_force_iommu, int, 0600); |
| 239 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); |
| 240 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
| 241 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
| 242 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 243 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
| 244 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 245 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 246 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 247 | static void vmw_print_capabilities(uint32_t capabilities) |
| 248 | { |
| 249 | DRM_INFO("Capabilities:\n"); |
| 250 | if (capabilities & SVGA_CAP_RECT_COPY) |
| 251 | DRM_INFO(" Rect copy.\n"); |
| 252 | if (capabilities & SVGA_CAP_CURSOR) |
| 253 | DRM_INFO(" Cursor.\n"); |
| 254 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) |
| 255 | DRM_INFO(" Cursor bypass.\n"); |
| 256 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) |
| 257 | DRM_INFO(" Cursor bypass 2.\n"); |
| 258 | if (capabilities & SVGA_CAP_8BIT_EMULATION) |
| 259 | DRM_INFO(" 8bit emulation.\n"); |
| 260 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) |
| 261 | DRM_INFO(" Alpha cursor.\n"); |
| 262 | if (capabilities & SVGA_CAP_3D) |
| 263 | DRM_INFO(" 3D.\n"); |
| 264 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) |
| 265 | DRM_INFO(" Extended Fifo.\n"); |
| 266 | if (capabilities & SVGA_CAP_MULTIMON) |
| 267 | DRM_INFO(" Multimon.\n"); |
| 268 | if (capabilities & SVGA_CAP_PITCHLOCK) |
| 269 | DRM_INFO(" Pitchlock.\n"); |
| 270 | if (capabilities & SVGA_CAP_IRQMASK) |
| 271 | DRM_INFO(" Irq mask.\n"); |
| 272 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) |
| 273 | DRM_INFO(" Display Topology.\n"); |
| 274 | if (capabilities & SVGA_CAP_GMR) |
| 275 | DRM_INFO(" GMR.\n"); |
| 276 | if (capabilities & SVGA_CAP_TRACES) |
| 277 | DRM_INFO(" Traces.\n"); |
Thomas Hellstrom | dcca286 | 2011-08-31 07:42:51 +0000 | [diff] [blame] | 278 | if (capabilities & SVGA_CAP_GMR2) |
| 279 | DRM_INFO(" GMR2.\n"); |
| 280 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
| 281 | DRM_INFO(" Screen Object 2.\n"); |
Thomas Hellstrom | c1234db | 2012-11-21 10:35:08 +0100 | [diff] [blame] | 282 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) |
| 283 | DRM_INFO(" Command Buffers.\n"); |
| 284 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) |
| 285 | DRM_INFO(" Command Buffers 2.\n"); |
| 286 | if (capabilities & SVGA_CAP_GBOBJECTS) |
| 287 | DRM_INFO(" Guest Backed Resources.\n"); |
Sinclair Yeh | 8ce75f8 | 2015-07-08 21:20:39 -0700 | [diff] [blame] | 288 | if (capabilities & SVGA_CAP_DX) |
| 289 | DRM_INFO(" DX Features.\n"); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 290 | } |
| 291 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 292 | /** |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 293 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 294 | * |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 295 | * @dev_priv: A device private structure. |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 296 | * |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 297 | * This function creates a small buffer object that holds the query |
| 298 | * result for dummy queries emitted as query barriers. |
| 299 | * The function will then map the first page and initialize a pending |
| 300 | * occlusion query result structure, Finally it will unmap the buffer. |
| 301 | * No interruptible waits are done within this function. |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 302 | * |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 303 | * Returns an error if bo creation or initialization fails. |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 304 | */ |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 305 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 306 | { |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 307 | int ret; |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 308 | struct vmw_dma_buffer *vbo; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 309 | struct ttm_bo_kmap_obj map; |
| 310 | volatile SVGA3dQueryResult *result; |
| 311 | bool dummy; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 312 | |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 313 | /* |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 314 | * Create the vbo as pinned, so that a tryreserve will |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 315 | * immediately succeed. This is because we're the only |
| 316 | * user of the bo currently. |
| 317 | */ |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 318 | vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); |
| 319 | if (!vbo) |
| 320 | return -ENOMEM; |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 321 | |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 322 | ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, |
| 323 | &vmw_sys_ne_placement, false, |
| 324 | &vmw_dmabuf_bo_free); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 325 | if (unlikely(ret != 0)) |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 326 | return ret; |
| 327 | |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 328 | ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL); |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 329 | BUG_ON(ret != 0); |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 330 | vmw_bo_pin_reserved(vbo, true); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 331 | |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 332 | ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 333 | if (likely(ret == 0)) { |
| 334 | result = ttm_kmap_obj_virtual(&map, &dummy); |
| 335 | result->totalSize = sizeof(*result); |
| 336 | result->state = SVGA3D_QUERYSTATE_PENDING; |
| 337 | result->result32 = 0xff; |
| 338 | ttm_bo_kunmap(&map); |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 339 | } |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 340 | vmw_bo_pin_reserved(vbo, false); |
| 341 | ttm_bo_unreserve(&vbo->base); |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 342 | |
| 343 | if (unlikely(ret != 0)) { |
| 344 | DRM_ERROR("Dummy query buffer map failed.\n"); |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 345 | vmw_dmabuf_unreference(&vbo); |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 346 | } else |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 347 | dev_priv->dummy_query_bo = vbo; |
Thomas Hellstrom | 4b9e45e | 2013-10-10 09:52:52 -0700 | [diff] [blame] | 348 | |
| 349 | return ret; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 350 | } |
| 351 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 352 | /** |
| 353 | * vmw_request_device_late - Perform late device setup |
| 354 | * |
| 355 | * @dev_priv: Pointer to device private. |
| 356 | * |
| 357 | * This function performs setup of otables and enables large command |
| 358 | * buffer submission. These tasks are split out to a separate function |
| 359 | * because it reverts vmw_release_device_early and is intended to be used |
| 360 | * by an error path in the hibernation code. |
| 361 | */ |
| 362 | static int vmw_request_device_late(struct vmw_private *dev_priv) |
| 363 | { |
| 364 | int ret; |
| 365 | |
| 366 | if (dev_priv->has_mob) { |
| 367 | ret = vmw_otables_setup(dev_priv); |
| 368 | if (unlikely(ret != 0)) { |
| 369 | DRM_ERROR("Unable to initialize " |
| 370 | "guest Memory OBjects.\n"); |
| 371 | return ret; |
| 372 | } |
| 373 | } |
| 374 | |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 375 | if (dev_priv->cman) { |
| 376 | ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, |
| 377 | 256*4096, 2*4096); |
| 378 | if (ret) { |
| 379 | struct vmw_cmdbuf_man *man = dev_priv->cman; |
| 380 | |
| 381 | dev_priv->cman = NULL; |
| 382 | vmw_cmdbuf_man_destroy(man); |
| 383 | } |
| 384 | } |
| 385 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 386 | return 0; |
| 387 | } |
| 388 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 389 | static int vmw_request_device(struct vmw_private *dev_priv) |
| 390 | { |
| 391 | int ret; |
| 392 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 393 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
| 394 | if (unlikely(ret != 0)) { |
| 395 | DRM_ERROR("Unable to initialize FIFO.\n"); |
| 396 | return ret; |
| 397 | } |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 398 | vmw_fence_fifo_up(dev_priv->fman); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 399 | dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 400 | if (IS_ERR(dev_priv->cman)) { |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 401 | dev_priv->cman = NULL; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 402 | dev_priv->has_dx = false; |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 403 | } |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 404 | |
| 405 | ret = vmw_request_device_late(dev_priv); |
| 406 | if (ret) |
| 407 | goto out_no_mob; |
| 408 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 409 | ret = vmw_dummy_query_bo_create(dev_priv); |
| 410 | if (unlikely(ret != 0)) |
| 411 | goto out_no_query_bo; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 412 | |
| 413 | return 0; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 414 | |
| 415 | out_no_query_bo: |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 416 | if (dev_priv->cman) |
| 417 | vmw_cmdbuf_remove_pool(dev_priv->cman); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 418 | if (dev_priv->has_mob) { |
| 419 | (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 420 | vmw_otables_takedown(dev_priv); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 421 | } |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 422 | if (dev_priv->cman) |
| 423 | vmw_cmdbuf_man_destroy(dev_priv->cman); |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 424 | out_no_mob: |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 425 | vmw_fence_fifo_down(dev_priv->fman); |
| 426 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 427 | return ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 428 | } |
| 429 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 430 | /** |
| 431 | * vmw_release_device_early - Early part of fifo takedown. |
| 432 | * |
| 433 | * @dev_priv: Pointer to device private struct. |
| 434 | * |
| 435 | * This is the first part of command submission takedown, to be called before |
| 436 | * buffer management is taken down. |
| 437 | */ |
| 438 | static void vmw_release_device_early(struct vmw_private *dev_priv) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 439 | { |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 440 | /* |
| 441 | * Previous destructions should've released |
| 442 | * the pinned bo. |
| 443 | */ |
| 444 | |
| 445 | BUG_ON(dev_priv->pinned_bo != NULL); |
| 446 | |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 447 | vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 448 | if (dev_priv->cman) |
| 449 | vmw_cmdbuf_remove_pool(dev_priv->cman); |
| 450 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 451 | if (dev_priv->has_mob) { |
| 452 | ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 453 | vmw_otables_takedown(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 454 | } |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 455 | } |
| 456 | |
Thomas Hellstrom | 05730b3 | 2011-08-31 07:42:52 +0000 | [diff] [blame] | 457 | /** |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 458 | * vmw_release_device_late - Late part of fifo takedown. |
| 459 | * |
| 460 | * @dev_priv: Pointer to device private struct. |
| 461 | * |
| 462 | * This is the last part of the command submission takedown, to be called when |
| 463 | * command submission is no longer needed. It may wait on pending fences. |
Thomas Hellstrom | 05730b3 | 2011-08-31 07:42:52 +0000 | [diff] [blame] | 464 | */ |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 465 | static void vmw_release_device_late(struct vmw_private *dev_priv) |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 466 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 467 | vmw_fence_fifo_down(dev_priv->fman); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 468 | if (dev_priv->cman) |
| 469 | vmw_cmdbuf_man_destroy(dev_priv->cman); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 470 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 471 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 472 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 473 | |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 474 | /** |
| 475 | * Sets the initial_[width|height] fields on the given vmw_private. |
| 476 | * |
| 477 | * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then |
Thomas Hellstrom | 67d4a87 | 2012-02-09 16:56:47 +0100 | [diff] [blame] | 478 | * clamping the value to fb_max_[width|height] fields and the |
| 479 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
| 480 | * If the values appear to be invalid, set them to |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 481 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
| 482 | */ |
| 483 | static void vmw_get_initial_size(struct vmw_private *dev_priv) |
| 484 | { |
| 485 | uint32_t width; |
| 486 | uint32_t height; |
| 487 | |
| 488 | width = vmw_read(dev_priv, SVGA_REG_WIDTH); |
| 489 | height = vmw_read(dev_priv, SVGA_REG_HEIGHT); |
| 490 | |
| 491 | width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 492 | height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); |
Thomas Hellstrom | 67d4a87 | 2012-02-09 16:56:47 +0100 | [diff] [blame] | 493 | |
| 494 | if (width > dev_priv->fb_max_width || |
| 495 | height > dev_priv->fb_max_height) { |
| 496 | |
| 497 | /* |
| 498 | * This is a host error and shouldn't occur. |
| 499 | */ |
| 500 | |
| 501 | width = VMW_MIN_INITIAL_WIDTH; |
| 502 | height = VMW_MIN_INITIAL_HEIGHT; |
| 503 | } |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 504 | |
| 505 | dev_priv->initial_width = width; |
| 506 | dev_priv->initial_height = height; |
| 507 | } |
| 508 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 509 | /** |
| 510 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this |
| 511 | * system. |
| 512 | * |
| 513 | * @dev_priv: Pointer to a struct vmw_private |
| 514 | * |
| 515 | * This functions tries to determine the IOMMU setup and what actions |
| 516 | * need to be taken by the driver to make system pages visible to the |
| 517 | * device. |
| 518 | * If this function decides that DMA is not possible, it returns -EINVAL. |
| 519 | * The driver may then try to disable features of the device that require |
| 520 | * DMA. |
| 521 | */ |
| 522 | static int vmw_dma_select_mode(struct vmw_private *dev_priv) |
| 523 | { |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 524 | static const char *names[vmw_dma_map_max] = { |
| 525 | [vmw_dma_phys] = "Using physical TTM page addresses.", |
| 526 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
| 527 | [vmw_dma_map_populate] = "Keeping DMA mappings.", |
| 528 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
Thomas Hellstrom | e14cd95 | 2013-11-11 23:49:26 -0800 | [diff] [blame] | 529 | #ifdef CONFIG_X86 |
| 530 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 531 | |
| 532 | #ifdef CONFIG_INTEL_IOMMU |
| 533 | if (intel_iommu_enabled) { |
| 534 | dev_priv->map_mode = vmw_dma_map_populate; |
| 535 | goto out_fixup; |
| 536 | } |
| 537 | #endif |
| 538 | |
| 539 | if (!(vmw_force_iommu || vmw_force_coherent)) { |
| 540 | dev_priv->map_mode = vmw_dma_phys; |
| 541 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
| 542 | return 0; |
| 543 | } |
| 544 | |
| 545 | dev_priv->map_mode = vmw_dma_map_populate; |
| 546 | |
| 547 | if (dma_ops->sync_single_for_cpu) |
| 548 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 549 | #ifdef CONFIG_SWIOTLB |
| 550 | if (swiotlb_nr_tbl() == 0) |
| 551 | dev_priv->map_mode = vmw_dma_map_populate; |
| 552 | #endif |
| 553 | |
Dave Airlie | 2113694 | 2013-11-08 16:12:42 +1000 | [diff] [blame] | 554 | #ifdef CONFIG_INTEL_IOMMU |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 555 | out_fixup: |
Dave Airlie | 2113694 | 2013-11-08 16:12:42 +1000 | [diff] [blame] | 556 | #endif |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 557 | if (dev_priv->map_mode == vmw_dma_map_populate && |
| 558 | vmw_restrict_iommu) |
| 559 | dev_priv->map_mode = vmw_dma_map_bind; |
| 560 | |
| 561 | if (vmw_force_coherent) |
| 562 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 563 | |
| 564 | #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) |
| 565 | /* |
| 566 | * No coherent page pool |
| 567 | */ |
| 568 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) |
| 569 | return -EINVAL; |
| 570 | #endif |
| 571 | |
Thomas Hellstrom | e14cd95 | 2013-11-11 23:49:26 -0800 | [diff] [blame] | 572 | #else /* CONFIG_X86 */ |
| 573 | dev_priv->map_mode = vmw_dma_map_populate; |
| 574 | #endif /* CONFIG_X86 */ |
| 575 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 576 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
| 577 | |
| 578 | return 0; |
| 579 | } |
| 580 | |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 581 | /** |
| 582 | * vmw_dma_masks - set required page- and dma masks |
| 583 | * |
| 584 | * @dev: Pointer to struct drm-device |
| 585 | * |
| 586 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that |
| 587 | * restriction also for 64-bit systems. |
| 588 | */ |
| 589 | #ifdef CONFIG_INTEL_IOMMU |
| 590 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
| 591 | { |
| 592 | struct drm_device *dev = dev_priv->dev; |
| 593 | |
| 594 | if (intel_iommu_enabled && |
| 595 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
| 596 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
| 597 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); |
| 598 | } |
| 599 | return 0; |
| 600 | } |
| 601 | #else |
| 602 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
| 603 | { |
| 604 | return 0; |
| 605 | } |
| 606 | #endif |
| 607 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 608 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
| 609 | { |
| 610 | struct vmw_private *dev_priv; |
| 611 | int ret; |
Peter Hanzel | c188660 | 2010-01-30 03:38:07 +0000 | [diff] [blame] | 612 | uint32_t svga_id; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 613 | enum vmw_res_type i; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 614 | bool refuse_dma = false; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 615 | |
| 616 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
| 617 | if (unlikely(dev_priv == NULL)) { |
| 618 | DRM_ERROR("Failed allocating a device private struct.\n"); |
| 619 | return -ENOMEM; |
| 620 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 621 | |
Dave Airlie | 466e69b | 2011-12-19 11:15:29 +0000 | [diff] [blame] | 622 | pci_set_master(dev->pdev); |
| 623 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 624 | dev_priv->dev = dev; |
| 625 | dev_priv->vmw_chipset = chipset; |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 626 | dev_priv->last_read_seqno = (uint32_t) -100; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 627 | mutex_init(&dev_priv->cmdbuf_mutex); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 628 | mutex_init(&dev_priv->release_mutex); |
Thomas Hellstrom | 173fb7d | 2013-10-08 02:32:36 -0700 | [diff] [blame] | 629 | mutex_init(&dev_priv->binding_mutex); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 630 | rwlock_init(&dev_priv->resource_lock); |
Thomas Hellstrom | 294adf7 | 2014-02-27 12:34:51 +0100 | [diff] [blame] | 631 | ttm_lock_init(&dev_priv->reservation_sem); |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 632 | spin_lock_init(&dev_priv->hw_lock); |
| 633 | spin_lock_init(&dev_priv->waiter_lock); |
| 634 | spin_lock_init(&dev_priv->cap_lock); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 635 | spin_lock_init(&dev_priv->svga_lock); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 636 | |
| 637 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
| 638 | idr_init(&dev_priv->res_idr[i]); |
| 639 | INIT_LIST_HEAD(&dev_priv->res_lru[i]); |
| 640 | } |
| 641 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 642 | mutex_init(&dev_priv->init_mutex); |
| 643 | init_waitqueue_head(&dev_priv->fence_queue); |
| 644 | init_waitqueue_head(&dev_priv->fifo_queue); |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 645 | dev_priv->fence_queue_waiters = 0; |
Thomas Hellstrom | d2e8851 | 2015-10-28 19:07:35 +0100 | [diff] [blame] | 646 | dev_priv->fifo_queue_waiters = 0; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 647 | |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 648 | dev_priv->used_memory_size = 0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 649 | |
| 650 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
| 651 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
| 652 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
| 653 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 654 | dev_priv->enable_fb = enable_fbdev; |
| 655 | |
Peter Hanzel | c188660 | 2010-01-30 03:38:07 +0000 | [diff] [blame] | 656 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| 657 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
| 658 | if (svga_id != SVGA_ID_2) { |
| 659 | ret = -ENOSYS; |
Masanari Iida | 4962590 | 2012-02-05 22:50:36 +0900 | [diff] [blame] | 660 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
Peter Hanzel | c188660 | 2010-01-30 03:38:07 +0000 | [diff] [blame] | 661 | goto out_err0; |
| 662 | } |
| 663 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 664 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 665 | ret = vmw_dma_select_mode(dev_priv); |
| 666 | if (unlikely(ret != 0)) { |
| 667 | DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); |
| 668 | refuse_dma = true; |
| 669 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 670 | |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 671 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
| 672 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
| 673 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); |
| 674 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 675 | |
| 676 | vmw_get_initial_size(dev_priv); |
| 677 | |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 678 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 679 | dev_priv->max_gmr_ids = |
| 680 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 681 | dev_priv->max_gmr_pages = |
| 682 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
| 683 | dev_priv->memory_size = |
| 684 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 685 | dev_priv->memory_size -= dev_priv->vram_size; |
| 686 | } else { |
| 687 | /* |
| 688 | * An arbitrary limit of 512MiB on surface |
| 689 | * memory. But all HWV8 hardware supports GMR2. |
| 690 | */ |
| 691 | dev_priv->memory_size = 512*1024*1024; |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 692 | } |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 693 | dev_priv->max_mob_pages = 0; |
Charmaine Lee | 857aea1 | 2014-02-12 12:07:38 +0100 | [diff] [blame] | 694 | dev_priv->max_mob_size = 0; |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 695 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
| 696 | uint64_t mem_size = |
| 697 | vmw_read(dev_priv, |
| 698 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); |
| 699 | |
| 700 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; |
Thomas Hellstrom | afb0e50 | 2012-11-21 11:09:56 +0100 | [diff] [blame] | 701 | dev_priv->prim_bb_mem = |
| 702 | vmw_read(dev_priv, |
| 703 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
Charmaine Lee | 857aea1 | 2014-02-12 12:07:38 +0100 | [diff] [blame] | 704 | dev_priv->max_mob_size = |
| 705 | vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 706 | dev_priv->stdu_max_width = |
| 707 | vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); |
| 708 | dev_priv->stdu_max_height = |
| 709 | vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); |
| 710 | |
| 711 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, |
| 712 | SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); |
| 713 | dev_priv->texture_max_width = vmw_read(dev_priv, |
| 714 | SVGA_REG_DEV_CAP); |
| 715 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, |
| 716 | SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); |
| 717 | dev_priv->texture_max_height = vmw_read(dev_priv, |
| 718 | SVGA_REG_DEV_CAP); |
Thomas Hellstrom | df45e9d | 2015-08-12 09:30:09 -0700 | [diff] [blame] | 719 | } else { |
| 720 | dev_priv->texture_max_width = 8192; |
| 721 | dev_priv->texture_max_height = 8192; |
Thomas Hellstrom | afb0e50 | 2012-11-21 11:09:56 +0100 | [diff] [blame] | 722 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
Thomas Hellstrom | df45e9d | 2015-08-12 09:30:09 -0700 | [diff] [blame] | 723 | } |
| 724 | |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 725 | vmw_print_capabilities(dev_priv->capabilities); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 726 | |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 727 | ret = vmw_dma_masks(dev_priv); |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 728 | if (unlikely(ret != 0)) |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 729 | goto out_err0; |
| 730 | |
Thomas Hellstrom | 0d00c48 | 2014-01-15 20:19:53 +0100 | [diff] [blame] | 731 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 732 | DRM_INFO("Max GMR ids is %u\n", |
| 733 | (unsigned)dev_priv->max_gmr_ids); |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 734 | DRM_INFO("Max number of GMR pages is %u\n", |
| 735 | (unsigned)dev_priv->max_gmr_pages); |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 736 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
| 737 | (unsigned)dev_priv->memory_size / 1024); |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 738 | } |
Thomas Hellstrom | bc2d650 | 2012-11-21 10:32:36 +0100 | [diff] [blame] | 739 | DRM_INFO("Maximum display memory size is %u kiB\n", |
| 740 | dev_priv->prim_bb_mem / 1024); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 741 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
| 742 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
| 743 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
| 744 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); |
| 745 | |
| 746 | ret = vmw_ttm_global_init(dev_priv); |
| 747 | if (unlikely(ret != 0)) |
| 748 | goto out_err0; |
| 749 | |
| 750 | |
| 751 | vmw_master_init(&dev_priv->fbdev_master); |
| 752 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
| 753 | dev_priv->active_master = &dev_priv->fbdev_master; |
| 754 | |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 755 | dev_priv->mmio_virt = memremap(dev_priv->mmio_start, |
| 756 | dev_priv->mmio_size, MEMREMAP_WB); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 757 | |
| 758 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
| 759 | ret = -ENOMEM; |
| 760 | DRM_ERROR("Failed mapping MMIO.\n"); |
| 761 | goto out_err3; |
| 762 | } |
| 763 | |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 764 | /* Need mmio memory to check for fifo pitchlock cap. */ |
| 765 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
| 766 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && |
| 767 | !vmw_fifo_have_pitchlock(dev_priv)) { |
| 768 | ret = -ENOSYS; |
| 769 | DRM_ERROR("Hardware has no pitchlock\n"); |
| 770 | goto out_err4; |
| 771 | } |
| 772 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 773 | dev_priv->tdev = ttm_object_device_init |
Thomas Hellstrom | 69977ff | 2013-11-13 01:50:46 -0800 | [diff] [blame] | 774 | (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 775 | |
| 776 | if (unlikely(dev_priv->tdev == NULL)) { |
| 777 | DRM_ERROR("Unable to initialize TTM object management.\n"); |
| 778 | ret = -ENOMEM; |
| 779 | goto out_err4; |
| 780 | } |
| 781 | |
| 782 | dev->dev_private = dev_priv; |
| 783 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 784 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
| 785 | dev_priv->stealth = (ret != 0); |
| 786 | if (dev_priv->stealth) { |
| 787 | /** |
| 788 | * Request at least the mmio PCI resource. |
| 789 | */ |
| 790 | |
| 791 | DRM_INFO("It appears like vesafb is loaded. " |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 792 | "Ignore above error if any.\n"); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 793 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
| 794 | if (unlikely(ret != 0)) { |
| 795 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); |
| 796 | goto out_no_device; |
| 797 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 798 | } |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 799 | |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 800 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
Daniel Vetter | bb0f1b5 | 2013-11-03 21:09:27 +0100 | [diff] [blame] | 801 | ret = drm_irq_install(dev, dev->pdev->irq); |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 802 | if (ret != 0) { |
| 803 | DRM_ERROR("Failed installing irq: %d\n", ret); |
| 804 | goto out_no_irq; |
| 805 | } |
| 806 | } |
| 807 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 808 | dev_priv->fman = vmw_fence_manager_init(dev_priv); |
Wei Yongjun | 14bbf20 | 2013-08-26 15:15:37 +0800 | [diff] [blame] | 809 | if (unlikely(dev_priv->fman == NULL)) { |
| 810 | ret = -ENOMEM; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 811 | goto out_no_fman; |
Wei Yongjun | 14bbf20 | 2013-08-26 15:15:37 +0800 | [diff] [blame] | 812 | } |
Jakob Bornecrantz | 56d1c78 | 2011-10-04 20:13:22 +0200 | [diff] [blame] | 813 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 814 | ret = ttm_bo_device_init(&dev_priv->bdev, |
| 815 | dev_priv->bo_global_ref.ref.object, |
| 816 | &vmw_bo_driver, |
| 817 | dev->anon_inode->i_mapping, |
| 818 | VMWGFX_FILE_PAGE_OFFSET, |
| 819 | false); |
| 820 | if (unlikely(ret != 0)) { |
| 821 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); |
| 822 | goto out_no_bdev; |
| 823 | } |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 824 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 825 | /* |
| 826 | * Enable VRAM, but initially don't use it until SVGA is enabled and |
| 827 | * unhidden. |
| 828 | */ |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 829 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, |
| 830 | (dev_priv->vram_size >> PAGE_SHIFT)); |
| 831 | if (unlikely(ret != 0)) { |
| 832 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); |
| 833 | goto out_no_vram; |
| 834 | } |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 835 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 836 | |
| 837 | dev_priv->has_gmr = true; |
| 838 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
| 839 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
| 840 | VMW_PL_GMR) != 0) { |
| 841 | DRM_INFO("No GMR memory available. " |
| 842 | "Graphics memory resources are very limited.\n"); |
| 843 | dev_priv->has_gmr = false; |
| 844 | } |
| 845 | |
| 846 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
| 847 | dev_priv->has_mob = true; |
| 848 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, |
| 849 | VMW_PL_MOB) != 0) { |
| 850 | DRM_INFO("No MOB memory available. " |
| 851 | "3D will be disabled.\n"); |
| 852 | dev_priv->has_mob = false; |
| 853 | } |
| 854 | } |
| 855 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 856 | if (dev_priv->has_mob) { |
| 857 | spin_lock(&dev_priv->cap_lock); |
| 858 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); |
| 859 | dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
| 860 | spin_unlock(&dev_priv->cap_lock); |
| 861 | } |
Jakob Bornecrantz | 56d1c78 | 2011-10-04 20:13:22 +0200 | [diff] [blame] | 862 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 863 | |
Thomas Hellstrom | 7a1c2f6 | 2010-10-01 10:21:49 +0200 | [diff] [blame] | 864 | ret = vmw_kms_init(dev_priv); |
| 865 | if (unlikely(ret != 0)) |
| 866 | goto out_no_kms; |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 867 | vmw_overlay_init(dev_priv); |
Jakob Bornecrantz | 56d1c78 | 2011-10-04 20:13:22 +0200 | [diff] [blame] | 868 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 869 | ret = vmw_request_device(dev_priv); |
| 870 | if (ret) |
| 871 | goto out_no_fifo; |
| 872 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 873 | DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); |
| 874 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 875 | if (dev_priv->enable_fb) { |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 876 | vmw_fifo_resource_inc(dev_priv); |
| 877 | vmw_svga_enable(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 878 | vmw_fb_init(dev_priv); |
Thomas Hellstrom | 7a1c2f6 | 2010-10-01 10:21:49 +0200 | [diff] [blame] | 879 | } |
| 880 | |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 881 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
| 882 | register_pm_notifier(&dev_priv->pm_nb); |
| 883 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 884 | return 0; |
| 885 | |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 886 | out_no_fifo: |
Jakob Bornecrantz | 56d1c78 | 2011-10-04 20:13:22 +0200 | [diff] [blame] | 887 | vmw_overlay_close(dev_priv); |
| 888 | vmw_kms_close(dev_priv); |
| 889 | out_no_kms: |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 890 | if (dev_priv->has_mob) |
| 891 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
| 892 | if (dev_priv->has_gmr) |
| 893 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 894 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| 895 | out_no_vram: |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 896 | (void)ttm_bo_device_release(&dev_priv->bdev); |
| 897 | out_no_bdev: |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 898 | vmw_fence_manager_takedown(dev_priv->fman); |
| 899 | out_no_fman: |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 900 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
| 901 | drm_irq_uninstall(dev_priv->dev); |
| 902 | out_no_irq: |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 903 | if (dev_priv->stealth) |
| 904 | pci_release_region(dev->pdev, 2); |
| 905 | else |
| 906 | pci_release_regions(dev->pdev); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 907 | out_no_device: |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 908 | ttm_object_device_release(&dev_priv->tdev); |
| 909 | out_err4: |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 910 | memunmap(dev_priv->mmio_virt); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 911 | out_err3: |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 912 | vmw_ttm_global_release(dev_priv); |
| 913 | out_err0: |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 914 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
| 915 | idr_destroy(&dev_priv->res_idr[i]); |
| 916 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 917 | if (dev_priv->ctx.staged_bindings) |
| 918 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 919 | kfree(dev_priv); |
| 920 | return ret; |
| 921 | } |
| 922 | |
| 923 | static int vmw_driver_unload(struct drm_device *dev) |
| 924 | { |
| 925 | struct vmw_private *dev_priv = vmw_priv(dev); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 926 | enum vmw_res_type i; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 927 | |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 928 | unregister_pm_notifier(&dev_priv->pm_nb); |
| 929 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 930 | if (dev_priv->ctx.res_ht_initialized) |
| 931 | drm_ht_remove(&dev_priv->ctx.res_ht); |
Markus Elfring | a3a1a66 | 2014-11-19 17:50:19 +0100 | [diff] [blame] | 932 | vfree(dev_priv->ctx.cmd_bounce); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 933 | if (dev_priv->enable_fb) { |
Sinclair Yeh | 05c9501 | 2015-08-11 22:53:39 -0700 | [diff] [blame] | 934 | vmw_fb_off(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 935 | vmw_fb_close(dev_priv); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 936 | vmw_fifo_resource_dec(dev_priv); |
| 937 | vmw_svga_disable(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 938 | } |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 939 | |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 940 | vmw_kms_close(dev_priv); |
| 941 | vmw_overlay_close(dev_priv); |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 942 | |
Thomas Hellstrom | 3458390 | 2015-03-05 02:33:24 -0800 | [diff] [blame] | 943 | if (dev_priv->has_gmr) |
| 944 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 945 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| 946 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 947 | vmw_release_device_early(dev_priv); |
| 948 | if (dev_priv->has_mob) |
| 949 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
| 950 | (void) ttm_bo_device_release(&dev_priv->bdev); |
| 951 | vmw_release_device_late(dev_priv); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 952 | vmw_fence_manager_takedown(dev_priv->fman); |
Thomas Hellstrom | 506ff75 | 2012-11-09 12:26:14 +0000 | [diff] [blame] | 953 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
| 954 | drm_irq_uninstall(dev_priv->dev); |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 955 | if (dev_priv->stealth) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 956 | pci_release_region(dev->pdev, 2); |
Thomas Hellstrom | f2d12b8 | 2010-02-15 14:45:22 +0000 | [diff] [blame] | 957 | else |
| 958 | pci_release_regions(dev->pdev); |
| 959 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 960 | ttm_object_device_release(&dev_priv->tdev); |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 961 | memunmap(dev_priv->mmio_virt); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 962 | if (dev_priv->ctx.staged_bindings) |
| 963 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 964 | vmw_ttm_global_release(dev_priv); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 965 | |
| 966 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
| 967 | idr_destroy(&dev_priv->res_idr[i]); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 968 | |
| 969 | kfree(dev_priv); |
| 970 | |
| 971 | return 0; |
| 972 | } |
| 973 | |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 974 | static void vmw_preclose(struct drm_device *dev, |
| 975 | struct drm_file *file_priv) |
| 976 | { |
| 977 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 978 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 979 | |
| 980 | vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); |
| 981 | } |
| 982 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 983 | static void vmw_postclose(struct drm_device *dev, |
| 984 | struct drm_file *file_priv) |
| 985 | { |
| 986 | struct vmw_fpriv *vmw_fp; |
| 987 | |
| 988 | vmw_fp = vmw_fpriv(file_priv); |
Thomas Hellstrom | c424985 | 2013-10-09 01:42:51 -0700 | [diff] [blame] | 989 | |
| 990 | if (vmw_fp->locked_master) { |
| 991 | struct vmw_master *vmaster = |
| 992 | vmw_master(vmw_fp->locked_master); |
| 993 | |
| 994 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
| 995 | ttm_vt_unlock(&vmaster->lock); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 996 | drm_master_put(&vmw_fp->locked_master); |
Thomas Hellstrom | c424985 | 2013-10-09 01:42:51 -0700 | [diff] [blame] | 997 | } |
| 998 | |
| 999 | ttm_object_file_release(&vmw_fp->tfile); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1000 | kfree(vmw_fp); |
| 1001 | } |
| 1002 | |
| 1003 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) |
| 1004 | { |
| 1005 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1006 | struct vmw_fpriv *vmw_fp; |
| 1007 | int ret = -ENOMEM; |
| 1008 | |
| 1009 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); |
| 1010 | if (unlikely(vmw_fp == NULL)) |
| 1011 | return ret; |
| 1012 | |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 1013 | INIT_LIST_HEAD(&vmw_fp->fence_events); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1014 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
| 1015 | if (unlikely(vmw_fp->tfile == NULL)) |
| 1016 | goto out_no_tfile; |
| 1017 | |
| 1018 | file_priv->driver_priv = vmw_fp; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1019 | |
| 1020 | return 0; |
| 1021 | |
| 1022 | out_no_tfile: |
| 1023 | kfree(vmw_fp); |
| 1024 | return ret; |
| 1025 | } |
| 1026 | |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1027 | static struct vmw_master *vmw_master_check(struct drm_device *dev, |
| 1028 | struct drm_file *file_priv, |
| 1029 | unsigned int flags) |
| 1030 | { |
| 1031 | int ret; |
| 1032 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 1033 | struct vmw_master *vmaster; |
| 1034 | |
| 1035 | if (file_priv->minor->type != DRM_MINOR_LEGACY || |
| 1036 | !(flags & DRM_AUTH)) |
| 1037 | return NULL; |
| 1038 | |
| 1039 | ret = mutex_lock_interruptible(&dev->master_mutex); |
| 1040 | if (unlikely(ret != 0)) |
| 1041 | return ERR_PTR(-ERESTARTSYS); |
| 1042 | |
Dave Airlie | 7963e9d | 2014-08-08 07:30:53 +1000 | [diff] [blame] | 1043 | if (file_priv->is_master) { |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1044 | mutex_unlock(&dev->master_mutex); |
| 1045 | return NULL; |
| 1046 | } |
| 1047 | |
| 1048 | /* |
Thomas Hellstrom | aa3469c | 2015-08-27 10:06:24 -0700 | [diff] [blame] | 1049 | * Check if we were previously master, but now dropped. In that |
| 1050 | * case, allow at least render node functionality. |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1051 | */ |
| 1052 | if (vmw_fp->locked_master) { |
| 1053 | mutex_unlock(&dev->master_mutex); |
Thomas Hellstrom | aa3469c | 2015-08-27 10:06:24 -0700 | [diff] [blame] | 1054 | |
| 1055 | if (flags & DRM_RENDER_ALLOW) |
| 1056 | return NULL; |
| 1057 | |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1058 | DRM_ERROR("Dropped master trying to access ioctl that " |
| 1059 | "requires authentication.\n"); |
| 1060 | return ERR_PTR(-EACCES); |
| 1061 | } |
| 1062 | mutex_unlock(&dev->master_mutex); |
| 1063 | |
| 1064 | /* |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1065 | * Take the TTM lock. Possibly sleep waiting for the authenticating |
| 1066 | * master to become master again, or for a SIGTERM if the |
| 1067 | * authenticating master exits. |
| 1068 | */ |
| 1069 | vmaster = vmw_master(file_priv->master); |
| 1070 | ret = ttm_read_lock(&vmaster->lock, true); |
| 1071 | if (unlikely(ret != 0)) |
| 1072 | vmaster = ERR_PTR(ret); |
| 1073 | |
| 1074 | return vmaster; |
| 1075 | } |
| 1076 | |
| 1077 | static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, |
| 1078 | unsigned long arg, |
| 1079 | long (*ioctl_func)(struct file *, unsigned int, |
| 1080 | unsigned long)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1081 | { |
| 1082 | struct drm_file *file_priv = filp->private_data; |
| 1083 | struct drm_device *dev = file_priv->minor->dev; |
| 1084 | unsigned int nr = DRM_IOCTL_NR(cmd); |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1085 | struct vmw_master *vmaster; |
| 1086 | unsigned int flags; |
| 1087 | long ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1088 | |
| 1089 | /* |
Thomas Hellstrom | e1f7800 | 2009-12-08 12:57:51 +0100 | [diff] [blame] | 1090 | * Do extra checking on driver private ioctls. |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1091 | */ |
| 1092 | |
| 1093 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) |
| 1094 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
Rob Clark | baa7094 | 2013-08-02 13:27:49 -0400 | [diff] [blame] | 1095 | const struct drm_ioctl_desc *ioctl = |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1096 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1097 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1098 | if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { |
| 1099 | ret = (long) drm_ioctl_permit(ioctl->flags, file_priv); |
| 1100 | if (unlikely(ret != 0)) |
| 1101 | return ret; |
| 1102 | |
| 1103 | if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN)) |
| 1104 | goto out_io_encoding; |
| 1105 | |
| 1106 | return (long) vmw_execbuf_ioctl(dev, arg, file_priv, |
| 1107 | _IOC_SIZE(cmd)); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1108 | } |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1109 | |
| 1110 | if (unlikely(ioctl->cmd != cmd)) |
| 1111 | goto out_io_encoding; |
| 1112 | |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1113 | flags = ioctl->flags; |
| 1114 | } else if (!drm_ioctl_flags(nr, &flags)) |
| 1115 | return -EINVAL; |
| 1116 | |
| 1117 | vmaster = vmw_master_check(dev, file_priv, flags); |
Viresh Kumar | 55579cf | 2015-07-31 14:08:24 +0530 | [diff] [blame] | 1118 | if (IS_ERR(vmaster)) { |
Thomas Hellstrom | e338c4c | 2014-11-25 08:20:05 +0100 | [diff] [blame] | 1119 | ret = PTR_ERR(vmaster); |
| 1120 | |
| 1121 | if (ret != -ERESTARTSYS) |
| 1122 | DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", |
| 1123 | nr, ret); |
| 1124 | return ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1125 | } |
| 1126 | |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1127 | ret = ioctl_func(filp, cmd, arg); |
| 1128 | if (vmaster) |
| 1129 | ttm_read_unlock(&vmaster->lock); |
| 1130 | |
| 1131 | return ret; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1132 | |
| 1133 | out_io_encoding: |
| 1134 | DRM_ERROR("Invalid command format, ioctl %d\n", |
| 1135 | nr - DRM_COMMAND_BASE); |
| 1136 | |
| 1137 | return -EINVAL; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1138 | } |
| 1139 | |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1140 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, |
| 1141 | unsigned long arg) |
| 1142 | { |
| 1143 | return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); |
| 1144 | } |
| 1145 | |
| 1146 | #ifdef CONFIG_COMPAT |
| 1147 | static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, |
| 1148 | unsigned long arg) |
| 1149 | { |
| 1150 | return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); |
| 1151 | } |
| 1152 | #endif |
| 1153 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1154 | static void vmw_lastclose(struct drm_device *dev) |
| 1155 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1156 | } |
| 1157 | |
| 1158 | static void vmw_master_init(struct vmw_master *vmaster) |
| 1159 | { |
| 1160 | ttm_lock_init(&vmaster->lock); |
| 1161 | } |
| 1162 | |
| 1163 | static int vmw_master_create(struct drm_device *dev, |
| 1164 | struct drm_master *master) |
| 1165 | { |
| 1166 | struct vmw_master *vmaster; |
| 1167 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1168 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
| 1169 | if (unlikely(vmaster == NULL)) |
| 1170 | return -ENOMEM; |
| 1171 | |
Thomas Hellstrom | 3a939a5 | 2010-10-05 12:43:03 +0200 | [diff] [blame] | 1172 | vmw_master_init(vmaster); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1173 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
| 1174 | master->driver_priv = vmaster; |
| 1175 | |
| 1176 | return 0; |
| 1177 | } |
| 1178 | |
| 1179 | static void vmw_master_destroy(struct drm_device *dev, |
| 1180 | struct drm_master *master) |
| 1181 | { |
| 1182 | struct vmw_master *vmaster = vmw_master(master); |
| 1183 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1184 | master->driver_priv = NULL; |
| 1185 | kfree(vmaster); |
| 1186 | } |
| 1187 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1188 | static int vmw_master_set(struct drm_device *dev, |
| 1189 | struct drm_file *file_priv, |
| 1190 | bool from_open) |
| 1191 | { |
| 1192 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1193 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 1194 | struct vmw_master *active = dev_priv->active_master; |
| 1195 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 1196 | int ret = 0; |
| 1197 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1198 | if (active) { |
| 1199 | BUG_ON(active != &dev_priv->fbdev_master); |
| 1200 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
| 1201 | if (unlikely(ret != 0)) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1202 | return ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1203 | |
| 1204 | ttm_lock_set_kill(&active->lock, true, SIGTERM); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1205 | dev_priv->active_master = NULL; |
| 1206 | } |
| 1207 | |
| 1208 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); |
| 1209 | if (!from_open) { |
| 1210 | ttm_vt_unlock(&vmaster->lock); |
| 1211 | BUG_ON(vmw_fp->locked_master != file_priv->master); |
| 1212 | drm_master_put(&vmw_fp->locked_master); |
| 1213 | } |
| 1214 | |
| 1215 | dev_priv->active_master = vmaster; |
| 1216 | |
| 1217 | return 0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1218 | } |
| 1219 | |
| 1220 | static void vmw_master_drop(struct drm_device *dev, |
| 1221 | struct drm_file *file_priv, |
| 1222 | bool from_release) |
| 1223 | { |
| 1224 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1225 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 1226 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 1227 | int ret; |
| 1228 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1229 | /** |
| 1230 | * Make sure the master doesn't disappear while we have |
| 1231 | * it locked. |
| 1232 | */ |
| 1233 | |
| 1234 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
| 1235 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
Thomas Hellstrom | 8fbf9d9 | 2015-11-26 19:45:16 +0100 | [diff] [blame^] | 1236 | vmw_kms_legacy_hotspot_clear(dev_priv); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1237 | if (unlikely((ret != 0))) { |
| 1238 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
| 1239 | drm_master_put(&vmw_fp->locked_master); |
| 1240 | } |
| 1241 | |
Thomas Hellstrom | c424985 | 2013-10-09 01:42:51 -0700 | [diff] [blame] | 1242 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1243 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1244 | if (!dev_priv->enable_fb) |
| 1245 | vmw_svga_disable(dev_priv); |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 1246 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1247 | dev_priv->active_master = &dev_priv->fbdev_master; |
| 1248 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
| 1249 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
| 1250 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 1251 | if (dev_priv->enable_fb) |
| 1252 | vmw_fb_on(dev_priv); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1253 | } |
| 1254 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1255 | /** |
| 1256 | * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. |
| 1257 | * |
| 1258 | * @dev_priv: Pointer to device private struct. |
| 1259 | * Needs the reservation sem to be held in non-exclusive mode. |
| 1260 | */ |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 1261 | static void __vmw_svga_enable(struct vmw_private *dev_priv) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1262 | { |
| 1263 | spin_lock(&dev_priv->svga_lock); |
| 1264 | if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1265 | vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); |
| 1266 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = true; |
| 1267 | } |
| 1268 | spin_unlock(&dev_priv->svga_lock); |
| 1269 | } |
| 1270 | |
| 1271 | /** |
| 1272 | * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. |
| 1273 | * |
| 1274 | * @dev_priv: Pointer to device private struct. |
| 1275 | */ |
| 1276 | void vmw_svga_enable(struct vmw_private *dev_priv) |
| 1277 | { |
| 1278 | ttm_read_lock(&dev_priv->reservation_sem, false); |
| 1279 | __vmw_svga_enable(dev_priv); |
| 1280 | ttm_read_unlock(&dev_priv->reservation_sem); |
| 1281 | } |
| 1282 | |
| 1283 | /** |
| 1284 | * __vmw_svga_disable - Disable SVGA mode and use of VRAM. |
| 1285 | * |
| 1286 | * @dev_priv: Pointer to device private struct. |
| 1287 | * Needs the reservation sem to be held in exclusive mode. |
| 1288 | * Will not empty VRAM. VRAM must be emptied by caller. |
| 1289 | */ |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 1290 | static void __vmw_svga_disable(struct vmw_private *dev_priv) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1291 | { |
| 1292 | spin_lock(&dev_priv->svga_lock); |
| 1293 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1294 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
| 1295 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
Sinclair Yeh | 8ce75f8 | 2015-07-08 21:20:39 -0700 | [diff] [blame] | 1296 | SVGA_REG_ENABLE_HIDE | |
| 1297 | SVGA_REG_ENABLE_ENABLE); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1298 | } |
| 1299 | spin_unlock(&dev_priv->svga_lock); |
| 1300 | } |
| 1301 | |
| 1302 | /** |
| 1303 | * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo |
| 1304 | * running. |
| 1305 | * |
| 1306 | * @dev_priv: Pointer to device private struct. |
| 1307 | * Will empty VRAM. |
| 1308 | */ |
| 1309 | void vmw_svga_disable(struct vmw_private *dev_priv) |
| 1310 | { |
| 1311 | ttm_write_lock(&dev_priv->reservation_sem, false); |
| 1312 | spin_lock(&dev_priv->svga_lock); |
| 1313 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1314 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1315 | spin_unlock(&dev_priv->svga_lock); |
| 1316 | if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) |
| 1317 | DRM_ERROR("Failed evicting VRAM buffers.\n"); |
Sinclair Yeh | 8ce75f8 | 2015-07-08 21:20:39 -0700 | [diff] [blame] | 1318 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
| 1319 | SVGA_REG_ENABLE_HIDE | |
| 1320 | SVGA_REG_ENABLE_ENABLE); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1321 | } else |
| 1322 | spin_unlock(&dev_priv->svga_lock); |
| 1323 | ttm_write_unlock(&dev_priv->reservation_sem); |
| 1324 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1325 | |
| 1326 | static void vmw_remove(struct pci_dev *pdev) |
| 1327 | { |
| 1328 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1329 | |
Thomas Hellstrom | fd3e4d6 | 2015-03-10 11:07:40 -0700 | [diff] [blame] | 1330 | pci_disable_device(pdev); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1331 | drm_put_dev(dev); |
| 1332 | } |
| 1333 | |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1334 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
| 1335 | void *ptr) |
| 1336 | { |
| 1337 | struct vmw_private *dev_priv = |
| 1338 | container_of(nb, struct vmw_private, pm_nb); |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1339 | |
| 1340 | switch (val) { |
| 1341 | case PM_HIBERNATION_PREPARE: |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 1342 | if (dev_priv->enable_fb) |
| 1343 | vmw_fb_off(dev_priv); |
Thomas Hellstrom | 294adf7 | 2014-02-27 12:34:51 +0100 | [diff] [blame] | 1344 | ttm_suspend_lock(&dev_priv->reservation_sem); |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1345 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1346 | /* |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1347 | * This empties VRAM and unbinds all GMR bindings. |
| 1348 | * Buffer contents is moved to swappable memory. |
| 1349 | */ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 1350 | vmw_execbuf_release_pinned_bo(dev_priv); |
| 1351 | vmw_resource_evict_all(dev_priv); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1352 | vmw_release_device_early(dev_priv); |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1353 | ttm_bo_swapout_all(&dev_priv->bdev); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1354 | vmw_fence_fifo_down(dev_priv->fman); |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1355 | break; |
| 1356 | case PM_POST_HIBERNATION: |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 1357 | case PM_POST_RESTORE: |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1358 | vmw_fence_fifo_up(dev_priv->fman); |
Thomas Hellstrom | 294adf7 | 2014-02-27 12:34:51 +0100 | [diff] [blame] | 1359 | ttm_suspend_unlock(&dev_priv->reservation_sem); |
Thomas Hellstrom | a278724 | 2015-06-29 12:55:07 -0700 | [diff] [blame] | 1360 | if (dev_priv->enable_fb) |
| 1361 | vmw_fb_on(dev_priv); |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1362 | break; |
| 1363 | case PM_RESTORE_PREPARE: |
| 1364 | break; |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1365 | default: |
| 1366 | break; |
| 1367 | } |
| 1368 | return 0; |
| 1369 | } |
| 1370 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1371 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1372 | { |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 1373 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1374 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1375 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1376 | if (dev_priv->refuse_hibernation) |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 1377 | return -EBUSY; |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 1378 | |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1379 | pci_save_state(pdev); |
| 1380 | pci_disable_device(pdev); |
| 1381 | pci_set_power_state(pdev, PCI_D3hot); |
| 1382 | return 0; |
| 1383 | } |
| 1384 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1385 | static int vmw_pci_resume(struct pci_dev *pdev) |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 1386 | { |
| 1387 | pci_set_power_state(pdev, PCI_D0); |
| 1388 | pci_restore_state(pdev); |
| 1389 | return pci_enable_device(pdev); |
| 1390 | } |
| 1391 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1392 | static int vmw_pm_suspend(struct device *kdev) |
| 1393 | { |
| 1394 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1395 | struct pm_message dummy; |
| 1396 | |
| 1397 | dummy.event = 0; |
| 1398 | |
| 1399 | return vmw_pci_suspend(pdev, dummy); |
| 1400 | } |
| 1401 | |
| 1402 | static int vmw_pm_resume(struct device *kdev) |
| 1403 | { |
| 1404 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1405 | |
| 1406 | return vmw_pci_resume(pdev); |
| 1407 | } |
| 1408 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1409 | static int vmw_pm_freeze(struct device *kdev) |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1410 | { |
| 1411 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1412 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1413 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1414 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1415 | dev_priv->suspended = true; |
| 1416 | if (dev_priv->enable_fb) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1417 | vmw_fifo_resource_dec(dev_priv); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1418 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1419 | if (atomic_read(&dev_priv->num_fifo_resources) != 0) { |
| 1420 | DRM_ERROR("Can't hibernate while 3D resources are active.\n"); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1421 | if (dev_priv->enable_fb) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1422 | vmw_fifo_resource_inc(dev_priv); |
| 1423 | WARN_ON(vmw_request_device_late(dev_priv)); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1424 | dev_priv->suspended = false; |
| 1425 | return -EBUSY; |
| 1426 | } |
| 1427 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1428 | if (dev_priv->enable_fb) |
| 1429 | __vmw_svga_disable(dev_priv); |
| 1430 | |
| 1431 | vmw_release_device_late(dev_priv); |
| 1432 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1433 | return 0; |
| 1434 | } |
| 1435 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1436 | static int vmw_pm_restore(struct device *kdev) |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1437 | { |
| 1438 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1439 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1440 | struct vmw_private *dev_priv = vmw_priv(dev); |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1441 | int ret; |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1442 | |
Thomas Hellstrom | 95e8f6a | 2012-11-09 10:05:57 +0100 | [diff] [blame] | 1443 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| 1444 | (void) vmw_read(dev_priv, SVGA_REG_ID); |
Thomas Hellstrom | 95e8f6a | 2012-11-09 10:05:57 +0100 | [diff] [blame] | 1445 | |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1446 | if (dev_priv->enable_fb) |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1447 | vmw_fifo_resource_inc(dev_priv); |
| 1448 | |
| 1449 | ret = vmw_request_device(dev_priv); |
| 1450 | if (ret) |
| 1451 | return ret; |
| 1452 | |
| 1453 | if (dev_priv->enable_fb) |
| 1454 | __vmw_svga_enable(dev_priv); |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1455 | |
| 1456 | dev_priv->suspended = false; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1457 | |
| 1458 | return 0; |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1459 | } |
| 1460 | |
| 1461 | static const struct dev_pm_ops vmw_pm_ops = { |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1462 | .freeze = vmw_pm_freeze, |
| 1463 | .thaw = vmw_pm_restore, |
| 1464 | .restore = vmw_pm_restore, |
Thomas Hellstrom | 7fbd721 | 2010-10-05 12:43:01 +0200 | [diff] [blame] | 1465 | .suspend = vmw_pm_suspend, |
| 1466 | .resume = vmw_pm_resume, |
| 1467 | }; |
| 1468 | |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1469 | static const struct file_operations vmwgfx_driver_fops = { |
| 1470 | .owner = THIS_MODULE, |
| 1471 | .open = drm_open, |
| 1472 | .release = drm_release, |
| 1473 | .unlocked_ioctl = vmw_unlocked_ioctl, |
| 1474 | .mmap = vmw_mmap, |
| 1475 | .poll = vmw_fops_poll, |
| 1476 | .read = vmw_fops_read, |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1477 | #if defined(CONFIG_COMPAT) |
Thomas Hellstrom | 64190bd | 2014-02-27 12:56:08 +0100 | [diff] [blame] | 1478 | .compat_ioctl = vmw_compat_ioctl, |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1479 | #endif |
| 1480 | .llseek = noop_llseek, |
| 1481 | }; |
| 1482 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1483 | static struct drm_driver driver = { |
| 1484 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
Thomas Hellstrom | 03f8026 | 2014-03-20 13:06:34 +0100 | [diff] [blame] | 1485 | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1486 | .load = vmw_driver_load, |
| 1487 | .unload = vmw_driver_unload, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1488 | .lastclose = vmw_lastclose, |
| 1489 | .irq_preinstall = vmw_irq_preinstall, |
| 1490 | .irq_postinstall = vmw_irq_postinstall, |
| 1491 | .irq_uninstall = vmw_irq_uninstall, |
| 1492 | .irq_handler = vmw_irq_handler, |
Thomas Hellstrom | 7a1c2f6 | 2010-10-01 10:21:49 +0200 | [diff] [blame] | 1493 | .get_vblank_counter = vmw_get_vblank_counter, |
Jakob Bornecrantz | 1c482ab | 2011-10-17 11:59:45 +0200 | [diff] [blame] | 1494 | .enable_vblank = vmw_enable_vblank, |
| 1495 | .disable_vblank = vmw_disable_vblank, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1496 | .ioctls = vmw_ioctls, |
Damien Lespiau | f95aeb1 | 2014-06-09 14:39:49 +0100 | [diff] [blame] | 1497 | .num_ioctls = ARRAY_SIZE(vmw_ioctls), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1498 | .master_create = vmw_master_create, |
| 1499 | .master_destroy = vmw_master_destroy, |
| 1500 | .master_set = vmw_master_set, |
| 1501 | .master_drop = vmw_master_drop, |
| 1502 | .open = vmw_driver_open, |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 1503 | .preclose = vmw_preclose, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1504 | .postclose = vmw_postclose, |
David Herrmann | 915b4d1 | 2014-08-29 12:12:43 +0200 | [diff] [blame] | 1505 | .set_busid = drm_pci_set_busid, |
Dave Airlie | 5e1782d | 2012-08-28 01:53:54 +0000 | [diff] [blame] | 1506 | |
| 1507 | .dumb_create = vmw_dumb_create, |
| 1508 | .dumb_map_offset = vmw_dumb_map_offset, |
| 1509 | .dumb_destroy = vmw_dumb_destroy, |
| 1510 | |
Thomas Hellstrom | 69977ff | 2013-11-13 01:50:46 -0800 | [diff] [blame] | 1511 | .prime_fd_to_handle = vmw_prime_fd_to_handle, |
| 1512 | .prime_handle_to_fd = vmw_prime_handle_to_fd, |
| 1513 | |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 1514 | .fops = &vmwgfx_driver_fops, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1515 | .name = VMWGFX_DRIVER_NAME, |
| 1516 | .desc = VMWGFX_DRIVER_DESC, |
| 1517 | .date = VMWGFX_DRIVER_DATE, |
| 1518 | .major = VMWGFX_DRIVER_MAJOR, |
| 1519 | .minor = VMWGFX_DRIVER_MINOR, |
| 1520 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL |
| 1521 | }; |
| 1522 | |
Dave Airlie | 8410ea3 | 2010-12-15 03:16:38 +1000 | [diff] [blame] | 1523 | static struct pci_driver vmw_pci_driver = { |
| 1524 | .name = VMWGFX_DRIVER_NAME, |
| 1525 | .id_table = vmw_pci_id_list, |
| 1526 | .probe = vmw_probe, |
| 1527 | .remove = vmw_remove, |
| 1528 | .driver = { |
| 1529 | .pm = &vmw_pm_ops |
| 1530 | } |
| 1531 | }; |
| 1532 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1533 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 1534 | { |
Jordan Crouse | dcdb167 | 2010-05-27 13:40:25 -0600 | [diff] [blame] | 1535 | return drm_get_pci_dev(pdev, ent, &driver); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1536 | } |
| 1537 | |
| 1538 | static int __init vmwgfx_init(void) |
| 1539 | { |
| 1540 | int ret; |
Dave Airlie | 8410ea3 | 2010-12-15 03:16:38 +1000 | [diff] [blame] | 1541 | ret = drm_pci_init(&driver, &vmw_pci_driver); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1542 | if (ret) |
| 1543 | DRM_ERROR("Failed initializing DRM.\n"); |
| 1544 | return ret; |
| 1545 | } |
| 1546 | |
| 1547 | static void __exit vmwgfx_exit(void) |
| 1548 | { |
Dave Airlie | 8410ea3 | 2010-12-15 03:16:38 +1000 | [diff] [blame] | 1549 | drm_pci_exit(&driver, &vmw_pci_driver); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1550 | } |
| 1551 | |
| 1552 | module_init(vmwgfx_init); |
| 1553 | module_exit(vmwgfx_exit); |
| 1554 | |
| 1555 | MODULE_AUTHOR("VMware Inc. and others"); |
| 1556 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
| 1557 | MODULE_LICENSE("GPL and additional rights"); |
Thomas Hellstrom | 73558ea | 2010-10-05 12:43:07 +0200 | [diff] [blame] | 1558 | MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." |
| 1559 | __stringify(VMWGFX_DRIVER_MINOR) "." |
| 1560 | __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." |
| 1561 | "0"); |