Dirk Hohndel (VMware) | dff9688 | 2018-05-07 01:16:26 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 2 | /************************************************************************** |
| 3 | * |
Dirk Hohndel (VMware) | dff9688 | 2018-05-07 01:16:26 +0200 | [diff] [blame] | 4 | * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
Sam Ravnborg | 6ae8748 | 2019-06-23 12:23:34 +0200 | [diff] [blame] | 28 | #include <linux/sched/signal.h> |
| 29 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 30 | #include "vmwgfx_drv.h" |
| 31 | |
| 32 | #define VMW_FENCE_WRAP (1 << 31) |
| 33 | |
| 34 | struct vmw_fence_manager { |
| 35 | int num_fence_objects; |
| 36 | struct vmw_private *dev_priv; |
| 37 | spinlock_t lock; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 38 | struct list_head fence_list; |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 39 | struct work_struct work; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 40 | u32 user_fence_size; |
| 41 | u32 fence_size; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 42 | u32 event_fence_action_size; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 43 | bool fifo_down; |
| 44 | struct list_head cleanup_list; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 45 | uint32_t pending_actions[VMW_ACTION_MAX]; |
| 46 | struct mutex goal_irq_mutex; |
| 47 | bool goal_irq_on; /* Protected by @goal_irq_mutex */ |
| 48 | bool seqno_valid; /* Protected by @lock, and may not be set to true |
| 49 | without the @goal_irq_mutex held. */ |
Christian König | 76bf0db | 2016-06-01 15:10:02 +0200 | [diff] [blame] | 50 | u64 ctx; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 51 | }; |
| 52 | |
| 53 | struct vmw_user_fence { |
| 54 | struct ttm_base_object base; |
| 55 | struct vmw_fence_obj fence; |
| 56 | }; |
| 57 | |
| 58 | /** |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 59 | * struct vmw_event_fence_action - fence action that delivers a drm event. |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 60 | * |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 61 | * @e: A struct drm_pending_event that controls the event delivery. |
| 62 | * @action: A struct vmw_fence_action to hook up to a fence. |
| 63 | * @fence: A referenced pointer to the fence to keep it alive while @action |
| 64 | * hangs on it. |
| 65 | * @dev: Pointer to a struct drm_device so we can access the event stuff. |
| 66 | * @kref: Both @e and @action has destructors, so we need to refcount. |
| 67 | * @size: Size accounted for this object. |
| 68 | * @tv_sec: If non-null, the variable pointed to will be assigned |
| 69 | * current time tv_sec val when the fence signals. |
| 70 | * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will |
| 71 | * be assigned the current time tv_usec val when the fence signals. |
| 72 | */ |
| 73 | struct vmw_event_fence_action { |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 74 | struct vmw_fence_action action; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 75 | |
| 76 | struct drm_pending_event *event; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 77 | struct vmw_fence_obj *fence; |
| 78 | struct drm_device *dev; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 79 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 80 | uint32_t *tv_sec; |
| 81 | uint32_t *tv_usec; |
| 82 | }; |
| 83 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 84 | static struct vmw_fence_manager * |
| 85 | fman_from_fence(struct vmw_fence_obj *fence) |
| 86 | { |
| 87 | return container_of(fence->base.lock, struct vmw_fence_manager, lock); |
| 88 | } |
| 89 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 90 | /** |
| 91 | * Note on fencing subsystem usage of irqs: |
| 92 | * Typically the vmw_fences_update function is called |
| 93 | * |
| 94 | * a) When a new fence seqno has been submitted by the fifo code. |
| 95 | * b) On-demand when we have waiters. Sleeping waiters will switch on the |
| 96 | * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE |
| 97 | * irq is received. When the last fence waiter is gone, that IRQ is masked |
| 98 | * away. |
| 99 | * |
| 100 | * In situations where there are no waiters and we don't submit any new fences, |
| 101 | * fence objects may not be signaled. This is perfectly OK, since there are |
| 102 | * no consumers of the signaled data, but that is NOT ok when there are fence |
| 103 | * actions attached to a fence. The fencing subsystem then makes use of the |
| 104 | * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence |
| 105 | * which has an action attached, and each time vmw_fences_update is called, |
| 106 | * the subsystem makes sure the fence goal seqno is updated. |
| 107 | * |
| 108 | * The fence goal seqno irq is on as long as there are unsignaled fence |
| 109 | * objects with actions attached to them. |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 110 | */ |
| 111 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 112 | static void vmw_fence_obj_destroy(struct dma_fence *f) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 113 | { |
| 114 | struct vmw_fence_obj *fence = |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 115 | container_of(f, struct vmw_fence_obj, base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 116 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 117 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 118 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 119 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 120 | list_del_init(&fence->head); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 121 | --fman->num_fence_objects; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 122 | spin_unlock(&fman->lock); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 123 | fence->destroy(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 124 | } |
| 125 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 126 | static const char *vmw_fence_get_driver_name(struct dma_fence *f) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 127 | { |
| 128 | return "vmwgfx"; |
| 129 | } |
| 130 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 131 | static const char *vmw_fence_get_timeline_name(struct dma_fence *f) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 132 | { |
| 133 | return "svga"; |
| 134 | } |
| 135 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 136 | static bool vmw_fence_enable_signaling(struct dma_fence *f) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 137 | { |
| 138 | struct vmw_fence_obj *fence = |
| 139 | container_of(f, struct vmw_fence_obj, base); |
| 140 | |
| 141 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
| 142 | struct vmw_private *dev_priv = fman->dev_priv; |
| 143 | |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 144 | u32 *fifo_mem = dev_priv->mmio_virt; |
| 145 | u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 146 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) |
| 147 | return false; |
| 148 | |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 149 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 150 | |
| 151 | return true; |
| 152 | } |
| 153 | |
| 154 | struct vmwgfx_wait_cb { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 155 | struct dma_fence_cb base; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 156 | struct task_struct *task; |
| 157 | }; |
| 158 | |
| 159 | static void |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 160 | vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 161 | { |
| 162 | struct vmwgfx_wait_cb *wait = |
| 163 | container_of(cb, struct vmwgfx_wait_cb, base); |
| 164 | |
| 165 | wake_up_process(wait->task); |
| 166 | } |
| 167 | |
| 168 | static void __vmw_fences_update(struct vmw_fence_manager *fman); |
| 169 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 170 | static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 171 | { |
| 172 | struct vmw_fence_obj *fence = |
| 173 | container_of(f, struct vmw_fence_obj, base); |
| 174 | |
| 175 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
| 176 | struct vmw_private *dev_priv = fman->dev_priv; |
| 177 | struct vmwgfx_wait_cb cb; |
| 178 | long ret = timeout; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 179 | |
| 180 | if (likely(vmw_fence_obj_signaled(fence))) |
| 181 | return timeout; |
| 182 | |
| 183 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
| 184 | vmw_seqno_waiter_add(dev_priv); |
| 185 | |
Thomas Hellstrom | 14dba71 | 2018-06-20 11:51:02 +0200 | [diff] [blame] | 186 | spin_lock(f->lock); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 187 | |
Chris Wilson | f2cb60e | 2019-08-17 16:30:22 +0100 | [diff] [blame] | 188 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) |
| 189 | goto out; |
| 190 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 191 | if (intr && signal_pending(current)) { |
| 192 | ret = -ERESTARTSYS; |
| 193 | goto out; |
| 194 | } |
| 195 | |
| 196 | cb.base.func = vmwgfx_wait_cb; |
| 197 | cb.task = current; |
| 198 | list_add(&cb.base.node, &f->cb_list); |
| 199 | |
Thomas Hellstrom | 14dba71 | 2018-06-20 11:51:02 +0200 | [diff] [blame] | 200 | for (;;) { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 201 | __vmw_fences_update(fman); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 202 | |
Thomas Hellstrom | 14dba71 | 2018-06-20 11:51:02 +0200 | [diff] [blame] | 203 | /* |
| 204 | * We can use the barrier free __set_current_state() since |
| 205 | * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the |
| 206 | * fence spinlock. |
| 207 | */ |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 208 | if (intr) |
| 209 | __set_current_state(TASK_INTERRUPTIBLE); |
| 210 | else |
| 211 | __set_current_state(TASK_UNINTERRUPTIBLE); |
Thomas Hellstrom | 14dba71 | 2018-06-20 11:51:02 +0200 | [diff] [blame] | 212 | |
| 213 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) { |
| 214 | if (ret == 0 && timeout > 0) |
| 215 | ret = 1; |
| 216 | break; |
| 217 | } |
| 218 | |
| 219 | if (intr && signal_pending(current)) { |
| 220 | ret = -ERESTARTSYS; |
| 221 | break; |
| 222 | } |
| 223 | |
| 224 | if (ret == 0) |
| 225 | break; |
| 226 | |
| 227 | spin_unlock(f->lock); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 228 | |
| 229 | ret = schedule_timeout(ret); |
| 230 | |
Thomas Hellstrom | 14dba71 | 2018-06-20 11:51:02 +0200 | [diff] [blame] | 231 | spin_lock(f->lock); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 232 | } |
Thomas Hellstrom | 14dba71 | 2018-06-20 11:51:02 +0200 | [diff] [blame] | 233 | __set_current_state(TASK_RUNNING); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 234 | if (!list_empty(&cb.base.node)) |
| 235 | list_del(&cb.base.node); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 236 | |
| 237 | out: |
Thomas Hellstrom | 14dba71 | 2018-06-20 11:51:02 +0200 | [diff] [blame] | 238 | spin_unlock(f->lock); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 239 | |
| 240 | vmw_seqno_waiter_remove(dev_priv); |
| 241 | |
| 242 | return ret; |
| 243 | } |
| 244 | |
Arvind Yadav | ef217b1 | 2017-11-01 10:45:43 -0700 | [diff] [blame] | 245 | static const struct dma_fence_ops vmw_fence_ops = { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 246 | .get_driver_name = vmw_fence_get_driver_name, |
| 247 | .get_timeline_name = vmw_fence_get_timeline_name, |
| 248 | .enable_signaling = vmw_fence_enable_signaling, |
| 249 | .wait = vmw_fence_wait, |
| 250 | .release = vmw_fence_obj_destroy, |
| 251 | }; |
| 252 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 253 | |
| 254 | /** |
| 255 | * Execute signal actions on fences recently signaled. |
| 256 | * This is done from a workqueue so we don't have to execute |
| 257 | * signal actions from atomic context. |
| 258 | */ |
| 259 | |
| 260 | static void vmw_fence_work_func(struct work_struct *work) |
| 261 | { |
| 262 | struct vmw_fence_manager *fman = |
| 263 | container_of(work, struct vmw_fence_manager, work); |
| 264 | struct list_head list; |
| 265 | struct vmw_fence_action *action, *next_action; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 266 | bool seqno_valid; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 267 | |
| 268 | do { |
| 269 | INIT_LIST_HEAD(&list); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 270 | mutex_lock(&fman->goal_irq_mutex); |
| 271 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 272 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 273 | list_splice_init(&fman->cleanup_list, &list); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 274 | seqno_valid = fman->seqno_valid; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 275 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 276 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 277 | if (!seqno_valid && fman->goal_irq_on) { |
| 278 | fman->goal_irq_on = false; |
| 279 | vmw_goal_waiter_remove(fman->dev_priv); |
| 280 | } |
| 281 | mutex_unlock(&fman->goal_irq_mutex); |
| 282 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 283 | if (list_empty(&list)) |
| 284 | return; |
| 285 | |
| 286 | /* |
| 287 | * At this point, only we should be able to manipulate the |
| 288 | * list heads of the actions we have on the private list. |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 289 | * hence fman::lock not held. |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 290 | */ |
| 291 | |
| 292 | list_for_each_entry_safe(action, next_action, &list, head) { |
| 293 | list_del_init(&action->head); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 294 | if (action->cleanup) |
| 295 | action->cleanup(action); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 296 | } |
| 297 | } while (1); |
| 298 | } |
| 299 | |
| 300 | struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) |
| 301 | { |
| 302 | struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); |
| 303 | |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 304 | if (unlikely(!fman)) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 305 | return NULL; |
| 306 | |
| 307 | fman->dev_priv = dev_priv; |
| 308 | spin_lock_init(&fman->lock); |
| 309 | INIT_LIST_HEAD(&fman->fence_list); |
| 310 | INIT_LIST_HEAD(&fman->cleanup_list); |
| 311 | INIT_WORK(&fman->work, &vmw_fence_work_func); |
| 312 | fman->fifo_down = true; |
Thomas Hellstrom | c7eae62 | 2018-09-26 15:50:13 +0200 | [diff] [blame] | 313 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) + |
| 314 | TTM_OBJ_EXTRA_SIZE; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 315 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 316 | fman->event_fence_action_size = |
| 317 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); |
| 318 | mutex_init(&fman->goal_irq_mutex); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 319 | fman->ctx = dma_fence_context_alloc(1); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 320 | |
| 321 | return fman; |
| 322 | } |
| 323 | |
| 324 | void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) |
| 325 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 326 | bool lists_empty; |
| 327 | |
| 328 | (void) cancel_work_sync(&fman->work); |
| 329 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 330 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 331 | lists_empty = list_empty(&fman->fence_list) && |
| 332 | list_empty(&fman->cleanup_list); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 333 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 334 | |
| 335 | BUG_ON(!lists_empty); |
| 336 | kfree(fman); |
| 337 | } |
| 338 | |
| 339 | static int vmw_fence_obj_init(struct vmw_fence_manager *fman, |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 340 | struct vmw_fence_obj *fence, u32 seqno, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 341 | void (*destroy) (struct vmw_fence_obj *fence)) |
| 342 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 343 | int ret = 0; |
| 344 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 345 | dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, |
| 346 | fman->ctx, seqno); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 347 | INIT_LIST_HEAD(&fence->seq_passed_actions); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 348 | fence->destroy = destroy; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 349 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 350 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 351 | if (unlikely(fman->fifo_down)) { |
| 352 | ret = -EBUSY; |
| 353 | goto out_unlock; |
| 354 | } |
| 355 | list_add_tail(&fence->head, &fman->fence_list); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 356 | ++fman->num_fence_objects; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 357 | |
| 358 | out_unlock: |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 359 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 360 | return ret; |
| 361 | |
| 362 | } |
| 363 | |
Rashika Kheria | 94844cf | 2014-01-06 22:21:21 +0530 | [diff] [blame] | 364 | static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 365 | struct list_head *list) |
| 366 | { |
| 367 | struct vmw_fence_action *action, *next_action; |
| 368 | |
| 369 | list_for_each_entry_safe(action, next_action, list, head) { |
| 370 | list_del_init(&action->head); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 371 | fman->pending_actions[action->type]--; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 372 | if (action->seq_passed != NULL) |
| 373 | action->seq_passed(action); |
| 374 | |
| 375 | /* |
| 376 | * Add the cleanup action to the cleanup list so that |
| 377 | * it will be performed by a worker task. |
| 378 | */ |
| 379 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 380 | list_add_tail(&action->head, &fman->cleanup_list); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 381 | } |
| 382 | } |
| 383 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 384 | /** |
| 385 | * vmw_fence_goal_new_locked - Figure out a new device fence goal |
| 386 | * seqno if needed. |
| 387 | * |
| 388 | * @fman: Pointer to a fence manager. |
| 389 | * @passed_seqno: The seqno the device currently signals as passed. |
| 390 | * |
| 391 | * This function should be called with the fence manager lock held. |
| 392 | * It is typically called when we have a new passed_seqno, and |
| 393 | * we might need to update the fence goal. It checks to see whether |
| 394 | * the current fence goal has already passed, and, in that case, |
| 395 | * scans through all unsignaled fences to get the next fence object with an |
| 396 | * action attached, and sets the seqno of that fence as a new fence goal. |
| 397 | * |
| 398 | * returns true if the device goal seqno was updated. False otherwise. |
| 399 | */ |
| 400 | static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, |
| 401 | u32 passed_seqno) |
| 402 | { |
| 403 | u32 goal_seqno; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 404 | u32 *fifo_mem; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 405 | struct vmw_fence_obj *fence; |
| 406 | |
| 407 | if (likely(!fman->seqno_valid)) |
| 408 | return false; |
| 409 | |
| 410 | fifo_mem = fman->dev_priv->mmio_virt; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 411 | goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 412 | if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) |
| 413 | return false; |
| 414 | |
| 415 | fman->seqno_valid = false; |
| 416 | list_for_each_entry(fence, &fman->fence_list, head) { |
| 417 | if (!list_empty(&fence->seq_passed_actions)) { |
| 418 | fman->seqno_valid = true; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 419 | vmw_mmio_write(fence->base.seqno, |
| 420 | fifo_mem + SVGA_FIFO_FENCE_GOAL); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 421 | break; |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | return true; |
| 426 | } |
| 427 | |
| 428 | |
| 429 | /** |
| 430 | * vmw_fence_goal_check_locked - Replace the device fence goal seqno if |
| 431 | * needed. |
| 432 | * |
| 433 | * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be |
| 434 | * considered as a device fence goal. |
| 435 | * |
| 436 | * This function should be called with the fence manager lock held. |
| 437 | * It is typically called when an action has been attached to a fence to |
| 438 | * check whether the seqno of that fence should be used for a fence |
| 439 | * goal interrupt. This is typically needed if the current fence goal is |
| 440 | * invalid, or has a higher seqno than that of the current fence object. |
| 441 | * |
| 442 | * returns true if the device goal seqno was updated. False otherwise. |
| 443 | */ |
| 444 | static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) |
| 445 | { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 446 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 447 | u32 goal_seqno; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 448 | u32 *fifo_mem; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 449 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 450 | if (dma_fence_is_signaled_locked(&fence->base)) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 451 | return false; |
| 452 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 453 | fifo_mem = fman->dev_priv->mmio_virt; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 454 | goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 455 | if (likely(fman->seqno_valid && |
| 456 | goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 457 | return false; |
| 458 | |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 459 | vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 460 | fman->seqno_valid = true; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 461 | |
| 462 | return true; |
| 463 | } |
| 464 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 465 | static void __vmw_fences_update(struct vmw_fence_manager *fman) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 466 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 467 | struct vmw_fence_obj *fence, *next_fence; |
| 468 | struct list_head action_list; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 469 | bool needs_rerun; |
| 470 | uint32_t seqno, new_seqno; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 471 | u32 *fifo_mem = fman->dev_priv->mmio_virt; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 472 | |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 473 | seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 474 | rerun: |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 475 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 476 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 477 | list_del_init(&fence->head); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 478 | dma_fence_signal_locked(&fence->base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 479 | INIT_LIST_HEAD(&action_list); |
| 480 | list_splice_init(&fence->seq_passed_actions, |
| 481 | &action_list); |
| 482 | vmw_fences_perform_actions(fman, &action_list); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 483 | } else |
| 484 | break; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 485 | } |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 486 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 487 | /* |
| 488 | * Rerun if the fence goal seqno was updated, and the |
| 489 | * hardware might have raced with that update, so that |
| 490 | * we missed a fence_goal irq. |
| 491 | */ |
| 492 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 493 | needs_rerun = vmw_fence_goal_new_locked(fman, seqno); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 494 | if (unlikely(needs_rerun)) { |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 495 | new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 496 | if (new_seqno != seqno) { |
| 497 | seqno = new_seqno; |
| 498 | goto rerun; |
| 499 | } |
| 500 | } |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 501 | |
| 502 | if (!list_empty(&fman->cleanup_list)) |
| 503 | (void) schedule_work(&fman->work); |
| 504 | } |
| 505 | |
| 506 | void vmw_fences_update(struct vmw_fence_manager *fman) |
| 507 | { |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 508 | spin_lock(&fman->lock); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 509 | __vmw_fences_update(fman); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 510 | spin_unlock(&fman->lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 511 | } |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 512 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 513 | bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 514 | { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 515 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 516 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 517 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 518 | return 1; |
| 519 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 520 | vmw_fences_update(fman); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 521 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 522 | return dma_fence_is_signaled(&fence->base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 523 | } |
| 524 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 525 | int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 526 | bool interruptible, unsigned long timeout) |
| 527 | { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 528 | long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 529 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 530 | if (likely(ret > 0)) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 531 | return 0; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 532 | else if (ret == 0) |
| 533 | return -EBUSY; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 534 | else |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 535 | return ret; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 536 | } |
| 537 | |
| 538 | void vmw_fence_obj_flush(struct vmw_fence_obj *fence) |
| 539 | { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 540 | struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 541 | |
| 542 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
| 543 | } |
| 544 | |
| 545 | static void vmw_fence_destroy(struct vmw_fence_obj *fence) |
| 546 | { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 547 | dma_fence_free(&fence->base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 548 | } |
| 549 | |
| 550 | int vmw_fence_create(struct vmw_fence_manager *fman, |
| 551 | uint32_t seqno, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 552 | struct vmw_fence_obj **p_fence) |
| 553 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 554 | struct vmw_fence_obj *fence; |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 555 | int ret; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 556 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 557 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 558 | if (unlikely(!fence)) |
Thomas Hellstrom | 1f563a6 | 2014-12-02 03:32:24 -0800 | [diff] [blame] | 559 | return -ENOMEM; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 560 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 561 | ret = vmw_fence_obj_init(fman, fence, seqno, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 562 | vmw_fence_destroy); |
| 563 | if (unlikely(ret != 0)) |
| 564 | goto out_err_init; |
| 565 | |
| 566 | *p_fence = fence; |
| 567 | return 0; |
| 568 | |
| 569 | out_err_init: |
| 570 | kfree(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 571 | return ret; |
| 572 | } |
| 573 | |
| 574 | |
| 575 | static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) |
| 576 | { |
| 577 | struct vmw_user_fence *ufence = |
| 578 | container_of(fence, struct vmw_user_fence, fence); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 579 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 580 | |
Thomas Hellstrom | 35f62a5 | 2012-11-20 12:16:49 +0000 | [diff] [blame] | 581 | ttm_base_object_kfree(ufence, base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 582 | /* |
| 583 | * Free kernel space accounting. |
| 584 | */ |
| 585 | ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), |
| 586 | fman->user_fence_size); |
| 587 | } |
| 588 | |
| 589 | static void vmw_user_fence_base_release(struct ttm_base_object **p_base) |
| 590 | { |
| 591 | struct ttm_base_object *base = *p_base; |
| 592 | struct vmw_user_fence *ufence = |
| 593 | container_of(base, struct vmw_user_fence, base); |
| 594 | struct vmw_fence_obj *fence = &ufence->fence; |
| 595 | |
| 596 | *p_base = NULL; |
| 597 | vmw_fence_obj_unreference(&fence); |
| 598 | } |
| 599 | |
| 600 | int vmw_user_fence_create(struct drm_file *file_priv, |
| 601 | struct vmw_fence_manager *fman, |
| 602 | uint32_t seqno, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 603 | struct vmw_fence_obj **p_fence, |
| 604 | uint32_t *p_handle) |
| 605 | { |
| 606 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 607 | struct vmw_user_fence *ufence; |
| 608 | struct vmw_fence_obj *tmp; |
| 609 | struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); |
Roger He | 279c01f | 2017-12-08 15:09:50 +0800 | [diff] [blame] | 610 | struct ttm_operation_ctx ctx = { |
| 611 | .interruptible = false, |
| 612 | .no_wait_gpu = false |
| 613 | }; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 614 | int ret; |
| 615 | |
| 616 | /* |
| 617 | * Kernel memory space accounting, since this object may |
| 618 | * be created by a user-space request. |
| 619 | */ |
| 620 | |
| 621 | ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, |
Roger He | 279c01f | 2017-12-08 15:09:50 +0800 | [diff] [blame] | 622 | &ctx); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 623 | if (unlikely(ret != 0)) |
| 624 | return ret; |
| 625 | |
| 626 | ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 627 | if (unlikely(!ufence)) { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 628 | ret = -ENOMEM; |
| 629 | goto out_no_object; |
| 630 | } |
| 631 | |
| 632 | ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 633 | vmw_user_fence_destroy); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 634 | if (unlikely(ret != 0)) { |
| 635 | kfree(ufence); |
| 636 | goto out_no_object; |
| 637 | } |
| 638 | |
| 639 | /* |
| 640 | * The base object holds a reference which is freed in |
| 641 | * vmw_user_fence_base_release. |
| 642 | */ |
| 643 | tmp = vmw_fence_obj_reference(&ufence->fence); |
| 644 | ret = ttm_base_object_init(tfile, &ufence->base, false, |
| 645 | VMW_RES_FENCE, |
| 646 | &vmw_user_fence_base_release, NULL); |
| 647 | |
| 648 | |
| 649 | if (unlikely(ret != 0)) { |
| 650 | /* |
| 651 | * Free the base object's reference |
| 652 | */ |
| 653 | vmw_fence_obj_unreference(&tmp); |
| 654 | goto out_err; |
| 655 | } |
| 656 | |
| 657 | *p_fence = &ufence->fence; |
Thomas Hellstrom | c7eae62 | 2018-09-26 15:50:13 +0200 | [diff] [blame] | 658 | *p_handle = ufence->base.handle; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 659 | |
| 660 | return 0; |
| 661 | out_err: |
| 662 | tmp = &ufence->fence; |
| 663 | vmw_fence_obj_unreference(&tmp); |
| 664 | out_no_object: |
| 665 | ttm_mem_global_free(mem_glob, fman->user_fence_size); |
| 666 | return ret; |
| 667 | } |
| 668 | |
| 669 | |
| 670 | /** |
Sinclair Yeh | 58585116 | 2017-07-05 01:45:40 -0700 | [diff] [blame] | 671 | * vmw_wait_dma_fence - Wait for a dma fence |
| 672 | * |
| 673 | * @fman: pointer to a fence manager |
| 674 | * @fence: DMA fence to wait on |
| 675 | * |
| 676 | * This function handles the case when the fence is actually a fence |
| 677 | * array. If that's the case, it'll wait on each of the child fence |
| 678 | */ |
| 679 | int vmw_wait_dma_fence(struct vmw_fence_manager *fman, |
| 680 | struct dma_fence *fence) |
| 681 | { |
| 682 | struct dma_fence_array *fence_array; |
| 683 | int ret = 0; |
| 684 | int i; |
| 685 | |
| 686 | |
| 687 | if (dma_fence_is_signaled(fence)) |
| 688 | return 0; |
| 689 | |
| 690 | if (!dma_fence_is_array(fence)) |
| 691 | return dma_fence_wait(fence, true); |
| 692 | |
| 693 | /* From i915: Note that if the fence-array was created in |
| 694 | * signal-on-any mode, we should *not* decompose it into its individual |
| 695 | * fences. However, we don't currently store which mode the fence-array |
| 696 | * is operating in. Fortunately, the only user of signal-on-any is |
| 697 | * private to amdgpu and we should not see any incoming fence-array |
| 698 | * from sync-file being in signal-on-any mode. |
| 699 | */ |
| 700 | |
| 701 | fence_array = to_dma_fence_array(fence); |
| 702 | for (i = 0; i < fence_array->num_fences; i++) { |
| 703 | struct dma_fence *child = fence_array->fences[i]; |
| 704 | |
| 705 | ret = dma_fence_wait(child, true); |
| 706 | |
| 707 | if (ret < 0) |
| 708 | return ret; |
| 709 | } |
| 710 | |
| 711 | return 0; |
| 712 | } |
| 713 | |
| 714 | |
| 715 | /** |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 716 | * vmw_fence_fifo_down - signal all unsignaled fence objects. |
| 717 | */ |
| 718 | |
| 719 | void vmw_fence_fifo_down(struct vmw_fence_manager *fman) |
| 720 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 721 | struct list_head action_list; |
| 722 | int ret; |
| 723 | |
| 724 | /* |
| 725 | * The list may be altered while we traverse it, so always |
| 726 | * restart when we've released the fman->lock. |
| 727 | */ |
| 728 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 729 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 730 | fman->fifo_down = true; |
| 731 | while (!list_empty(&fman->fence_list)) { |
| 732 | struct vmw_fence_obj *fence = |
| 733 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, |
| 734 | head); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 735 | dma_fence_get(&fence->base); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 736 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 737 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 738 | ret = vmw_fence_obj_wait(fence, false, false, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 739 | VMW_FENCE_WAIT_TIMEOUT); |
| 740 | |
| 741 | if (unlikely(ret != 0)) { |
| 742 | list_del_init(&fence->head); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 743 | dma_fence_signal(&fence->base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 744 | INIT_LIST_HEAD(&action_list); |
| 745 | list_splice_init(&fence->seq_passed_actions, |
| 746 | &action_list); |
| 747 | vmw_fences_perform_actions(fman, &action_list); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 748 | } |
| 749 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 750 | BUG_ON(!list_empty(&fence->head)); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 751 | dma_fence_put(&fence->base); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 752 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 753 | } |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 754 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 755 | } |
| 756 | |
| 757 | void vmw_fence_fifo_up(struct vmw_fence_manager *fman) |
| 758 | { |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 759 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 760 | fman->fifo_down = false; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 761 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 762 | } |
| 763 | |
| 764 | |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 765 | /** |
| 766 | * vmw_fence_obj_lookup - Look up a user-space fence object |
| 767 | * |
| 768 | * @tfile: A struct ttm_object_file identifying the caller. |
| 769 | * @handle: A handle identifying the fence object. |
| 770 | * @return: A struct vmw_user_fence base ttm object on success or |
| 771 | * an error pointer on failure. |
| 772 | * |
| 773 | * The fence object is looked up and type-checked. The caller needs |
| 774 | * to have opened the fence object first, but since that happens on |
| 775 | * creation and fence objects aren't shareable, that's not an |
| 776 | * issue currently. |
| 777 | */ |
| 778 | static struct ttm_base_object * |
| 779 | vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle) |
| 780 | { |
| 781 | struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle); |
| 782 | |
| 783 | if (!base) { |
| 784 | pr_err("Invalid fence object handle 0x%08lx.\n", |
| 785 | (unsigned long)handle); |
| 786 | return ERR_PTR(-EINVAL); |
| 787 | } |
| 788 | |
| 789 | if (base->refcount_release != vmw_user_fence_base_release) { |
| 790 | pr_err("Invalid fence object handle 0x%08lx.\n", |
| 791 | (unsigned long)handle); |
| 792 | ttm_base_object_unref(&base); |
| 793 | return ERR_PTR(-EINVAL); |
| 794 | } |
| 795 | |
| 796 | return base; |
| 797 | } |
| 798 | |
| 799 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 800 | int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, |
| 801 | struct drm_file *file_priv) |
| 802 | { |
| 803 | struct drm_vmw_fence_wait_arg *arg = |
| 804 | (struct drm_vmw_fence_wait_arg *)data; |
| 805 | unsigned long timeout; |
| 806 | struct ttm_base_object *base; |
| 807 | struct vmw_fence_obj *fence; |
| 808 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 809 | int ret; |
| 810 | uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); |
| 811 | |
| 812 | /* |
| 813 | * 64-bit division not present on 32-bit systems, so do an |
| 814 | * approximation. (Divide by 1000000). |
| 815 | */ |
| 816 | |
| 817 | wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - |
| 818 | (wait_timeout >> 26); |
| 819 | |
| 820 | if (!arg->cookie_valid) { |
| 821 | arg->cookie_valid = 1; |
| 822 | arg->kernel_cookie = jiffies + wait_timeout; |
| 823 | } |
| 824 | |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 825 | base = vmw_fence_obj_lookup(tfile, arg->handle); |
| 826 | if (IS_ERR(base)) |
| 827 | return PTR_ERR(base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 828 | |
| 829 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); |
| 830 | |
| 831 | timeout = jiffies; |
| 832 | if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 833 | ret = ((vmw_fence_obj_signaled(fence)) ? |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 834 | 0 : -EBUSY); |
| 835 | goto out; |
| 836 | } |
| 837 | |
| 838 | timeout = (unsigned long)arg->kernel_cookie - timeout; |
| 839 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 840 | ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 841 | |
| 842 | out: |
| 843 | ttm_base_object_unref(&base); |
| 844 | |
| 845 | /* |
| 846 | * Optionally unref the fence object. |
| 847 | */ |
| 848 | |
| 849 | if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) |
| 850 | return ttm_ref_object_base_unref(tfile, arg->handle, |
| 851 | TTM_REF_USAGE); |
| 852 | return ret; |
| 853 | } |
| 854 | |
| 855 | int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, |
| 856 | struct drm_file *file_priv) |
| 857 | { |
| 858 | struct drm_vmw_fence_signaled_arg *arg = |
| 859 | (struct drm_vmw_fence_signaled_arg *) data; |
| 860 | struct ttm_base_object *base; |
| 861 | struct vmw_fence_obj *fence; |
| 862 | struct vmw_fence_manager *fman; |
| 863 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 864 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 865 | |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 866 | base = vmw_fence_obj_lookup(tfile, arg->handle); |
| 867 | if (IS_ERR(base)) |
| 868 | return PTR_ERR(base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 869 | |
| 870 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 871 | fman = fman_from_fence(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 872 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 873 | arg->signaled = vmw_fence_obj_signaled(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 874 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 875 | arg->signaled_flags = arg->flags; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 876 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 877 | arg->passed_seqno = dev_priv->last_read_seqno; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 878 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 879 | |
| 880 | ttm_base_object_unref(&base); |
| 881 | |
| 882 | return 0; |
| 883 | } |
| 884 | |
| 885 | |
| 886 | int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, |
| 887 | struct drm_file *file_priv) |
| 888 | { |
| 889 | struct drm_vmw_fence_arg *arg = |
| 890 | (struct drm_vmw_fence_arg *) data; |
| 891 | |
| 892 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
| 893 | arg->handle, |
| 894 | TTM_REF_USAGE); |
| 895 | } |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 896 | |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 897 | /** |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 898 | * vmw_event_fence_action_seq_passed |
| 899 | * |
| 900 | * @action: The struct vmw_fence_action embedded in a struct |
| 901 | * vmw_event_fence_action. |
| 902 | * |
| 903 | * This function is called when the seqno of the fence where @action is |
| 904 | * attached has passed. It queues the event on the submitter's event list. |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 905 | * This function is always called from atomic context. |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 906 | */ |
| 907 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) |
| 908 | { |
| 909 | struct vmw_event_fence_action *eaction = |
| 910 | container_of(action, struct vmw_event_fence_action, action); |
| 911 | struct drm_device *dev = eaction->dev; |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 912 | struct drm_pending_event *event = eaction->event; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 913 | |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 914 | if (unlikely(event == NULL)) |
| 915 | return; |
| 916 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 917 | spin_lock_irq(&dev->event_lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 918 | |
| 919 | if (likely(eaction->tv_sec != NULL)) { |
Arnd Bergmann | 37efe80 | 2018-01-16 18:18:43 +0100 | [diff] [blame] | 920 | struct timespec64 ts; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 921 | |
Arnd Bergmann | 37efe80 | 2018-01-16 18:18:43 +0100 | [diff] [blame] | 922 | ktime_get_ts64(&ts); |
| 923 | /* monotonic time, so no y2038 overflow */ |
| 924 | *eaction->tv_sec = ts.tv_sec; |
| 925 | *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 926 | } |
| 927 | |
Daniel Vetter | fb740cf | 2016-01-11 22:40:59 +0100 | [diff] [blame] | 928 | drm_send_event_locked(dev, eaction->event); |
Dan Carpenter | 15b6b80 | 2016-01-28 12:06:46 +0300 | [diff] [blame] | 929 | eaction->event = NULL; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 930 | spin_unlock_irq(&dev->event_lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 931 | } |
| 932 | |
| 933 | /** |
| 934 | * vmw_event_fence_action_cleanup |
| 935 | * |
| 936 | * @action: The struct vmw_fence_action embedded in a struct |
| 937 | * vmw_event_fence_action. |
| 938 | * |
| 939 | * This function is the struct vmw_fence_action destructor. It's typically |
| 940 | * called from a workqueue. |
| 941 | */ |
| 942 | static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) |
| 943 | { |
| 944 | struct vmw_event_fence_action *eaction = |
| 945 | container_of(action, struct vmw_event_fence_action, action); |
| 946 | |
| 947 | vmw_fence_obj_unreference(&eaction->fence); |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 948 | kfree(eaction); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 949 | } |
| 950 | |
| 951 | |
| 952 | /** |
| 953 | * vmw_fence_obj_add_action - Add an action to a fence object. |
| 954 | * |
| 955 | * @fence - The fence object. |
| 956 | * @action - The action to add. |
| 957 | * |
| 958 | * Note that the action callbacks may be executed before this function |
| 959 | * returns. |
| 960 | */ |
Rashika Kheria | 94844cf | 2014-01-06 22:21:21 +0530 | [diff] [blame] | 961 | static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 962 | struct vmw_fence_action *action) |
| 963 | { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 964 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 965 | bool run_update = false; |
| 966 | |
| 967 | mutex_lock(&fman->goal_irq_mutex); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 968 | spin_lock(&fman->lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 969 | |
| 970 | fman->pending_actions[action->type]++; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 971 | if (dma_fence_is_signaled_locked(&fence->base)) { |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 972 | struct list_head action_list; |
| 973 | |
| 974 | INIT_LIST_HEAD(&action_list); |
| 975 | list_add_tail(&action->head, &action_list); |
| 976 | vmw_fences_perform_actions(fman, &action_list); |
| 977 | } else { |
| 978 | list_add_tail(&action->head, &fence->seq_passed_actions); |
| 979 | |
| 980 | /* |
| 981 | * This function may set fman::seqno_valid, so it must |
| 982 | * be run with the goal_irq_mutex held. |
| 983 | */ |
| 984 | run_update = vmw_fence_goal_check_locked(fence); |
| 985 | } |
| 986 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 987 | spin_unlock(&fman->lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 988 | |
| 989 | if (run_update) { |
| 990 | if (!fman->goal_irq_on) { |
| 991 | fman->goal_irq_on = true; |
| 992 | vmw_goal_waiter_add(fman->dev_priv); |
| 993 | } |
| 994 | vmw_fences_update(fman); |
| 995 | } |
| 996 | mutex_unlock(&fman->goal_irq_mutex); |
| 997 | |
| 998 | } |
| 999 | |
| 1000 | /** |
| 1001 | * vmw_event_fence_action_create - Post an event for sending when a fence |
| 1002 | * object seqno has passed. |
| 1003 | * |
| 1004 | * @file_priv: The file connection on which the event should be posted. |
| 1005 | * @fence: The fence object on which to post the event. |
| 1006 | * @event: Event to be posted. This event should've been alloced |
| 1007 | * using k[mz]alloc, and should've been completely initialized. |
| 1008 | * @interruptible: Interruptible waits if possible. |
| 1009 | * |
| 1010 | * As a side effect, the object pointed to by @event may have been |
| 1011 | * freed when this function returns. If this function returns with |
| 1012 | * an error code, the caller needs to free that object. |
| 1013 | */ |
| 1014 | |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1015 | int vmw_event_fence_action_queue(struct drm_file *file_priv, |
| 1016 | struct vmw_fence_obj *fence, |
| 1017 | struct drm_pending_event *event, |
| 1018 | uint32_t *tv_sec, |
| 1019 | uint32_t *tv_usec, |
| 1020 | bool interruptible) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1021 | { |
Dan Carpenter | 0c5d370 | 2011-10-18 09:09:45 +0300 | [diff] [blame] | 1022 | struct vmw_event_fence_action *eaction; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 1023 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1024 | |
| 1025 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 1026 | if (unlikely(!eaction)) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1027 | return -ENOMEM; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1028 | |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1029 | eaction->event = event; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1030 | |
| 1031 | eaction->action.seq_passed = vmw_event_fence_action_seq_passed; |
| 1032 | eaction->action.cleanup = vmw_event_fence_action_cleanup; |
| 1033 | eaction->action.type = VMW_ACTION_EVENT; |
| 1034 | |
| 1035 | eaction->fence = vmw_fence_obj_reference(fence); |
| 1036 | eaction->dev = fman->dev_priv->dev; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1037 | eaction->tv_sec = tv_sec; |
| 1038 | eaction->tv_usec = tv_usec; |
| 1039 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1040 | vmw_fence_obj_add_action(fence, &eaction->action); |
| 1041 | |
| 1042 | return 0; |
| 1043 | } |
| 1044 | |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1045 | struct vmw_event_fence_pending { |
| 1046 | struct drm_pending_event base; |
| 1047 | struct drm_vmw_event_fence event; |
| 1048 | }; |
| 1049 | |
Rashika Kheria | 94844cf | 2014-01-06 22:21:21 +0530 | [diff] [blame] | 1050 | static int vmw_event_fence_action_create(struct drm_file *file_priv, |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1051 | struct vmw_fence_obj *fence, |
| 1052 | uint32_t flags, |
| 1053 | uint64_t user_data, |
| 1054 | bool interruptible) |
| 1055 | { |
| 1056 | struct vmw_event_fence_pending *event; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 1057 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
| 1058 | struct drm_device *dev = fman->dev_priv->dev; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1059 | int ret; |
| 1060 | |
Dan Carpenter | 68c4fce | 2012-09-23 19:33:55 +0300 | [diff] [blame] | 1061 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 1062 | if (unlikely(!event)) { |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1063 | DRM_ERROR("Failed to allocate an event.\n"); |
| 1064 | ret = -ENOMEM; |
Daniel Vetter | 6d3729a | 2016-01-11 22:40:58 +0100 | [diff] [blame] | 1065 | goto out_no_space; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1066 | } |
| 1067 | |
| 1068 | event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; |
| 1069 | event->event.base.length = sizeof(*event); |
| 1070 | event->event.user_data = user_data; |
| 1071 | |
Daniel Vetter | 6d3729a | 2016-01-11 22:40:58 +0100 | [diff] [blame] | 1072 | ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1073 | |
Daniel Vetter | 6d3729a | 2016-01-11 22:40:58 +0100 | [diff] [blame] | 1074 | if (unlikely(ret != 0)) { |
| 1075 | DRM_ERROR("Failed to allocate event space for this file.\n"); |
| 1076 | kfree(event); |
| 1077 | goto out_no_space; |
| 1078 | } |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1079 | |
| 1080 | if (flags & DRM_VMW_FE_FLAG_REQ_TIME) |
| 1081 | ret = vmw_event_fence_action_queue(file_priv, fence, |
| 1082 | &event->base, |
| 1083 | &event->event.tv_sec, |
| 1084 | &event->event.tv_usec, |
| 1085 | interruptible); |
| 1086 | else |
| 1087 | ret = vmw_event_fence_action_queue(file_priv, fence, |
| 1088 | &event->base, |
| 1089 | NULL, |
| 1090 | NULL, |
| 1091 | interruptible); |
| 1092 | if (ret != 0) |
| 1093 | goto out_no_queue; |
| 1094 | |
Thomas Hellstrom | 89669e7 | 2014-12-02 03:36:57 -0800 | [diff] [blame] | 1095 | return 0; |
| 1096 | |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1097 | out_no_queue: |
Daniel Vetter | 6d3729a | 2016-01-11 22:40:58 +0100 | [diff] [blame] | 1098 | drm_event_cancel_free(dev, &event->base); |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1099 | out_no_space: |
| 1100 | return ret; |
| 1101 | } |
| 1102 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1103 | int vmw_fence_event_ioctl(struct drm_device *dev, void *data, |
| 1104 | struct drm_file *file_priv) |
| 1105 | { |
| 1106 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1107 | struct drm_vmw_fence_event_arg *arg = |
| 1108 | (struct drm_vmw_fence_event_arg *) data; |
| 1109 | struct vmw_fence_obj *fence = NULL; |
| 1110 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 1111 | struct ttm_object_file *tfile = vmw_fp->tfile; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1112 | struct drm_vmw_fence_rep __user *user_fence_rep = |
| 1113 | (struct drm_vmw_fence_rep __user *)(unsigned long) |
| 1114 | arg->fence_rep; |
| 1115 | uint32_t handle; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1116 | int ret; |
| 1117 | |
| 1118 | /* |
| 1119 | * Look up an existing fence object, |
| 1120 | * and if user-space wants a new reference, |
| 1121 | * add one. |
| 1122 | */ |
| 1123 | if (arg->handle) { |
| 1124 | struct ttm_base_object *base = |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 1125 | vmw_fence_obj_lookup(tfile, arg->handle); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1126 | |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 1127 | if (IS_ERR(base)) |
| 1128 | return PTR_ERR(base); |
| 1129 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1130 | fence = &(container_of(base, struct vmw_user_fence, |
| 1131 | base)->fence); |
| 1132 | (void) vmw_fence_obj_reference(fence); |
| 1133 | |
| 1134 | if (user_fence_rep != NULL) { |
Thomas Hellstrom | fe25deb | 2017-03-27 11:21:25 +0200 | [diff] [blame] | 1135 | ret = ttm_ref_object_add(vmw_fp->tfile, base, |
| 1136 | TTM_REF_USAGE, NULL, false); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1137 | if (unlikely(ret != 0)) { |
| 1138 | DRM_ERROR("Failed to reference a fence " |
| 1139 | "object.\n"); |
| 1140 | goto out_no_ref_obj; |
| 1141 | } |
Thomas Hellstrom | c7eae62 | 2018-09-26 15:50:13 +0200 | [diff] [blame] | 1142 | handle = base->handle; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1143 | } |
| 1144 | ttm_base_object_unref(&base); |
| 1145 | } |
| 1146 | |
| 1147 | /* |
| 1148 | * Create a new fence object. |
| 1149 | */ |
| 1150 | if (!fence) { |
| 1151 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
| 1152 | &fence, |
| 1153 | (user_fence_rep) ? |
| 1154 | &handle : NULL); |
| 1155 | if (unlikely(ret != 0)) { |
| 1156 | DRM_ERROR("Fence event failed to create fence.\n"); |
| 1157 | return ret; |
| 1158 | } |
| 1159 | } |
| 1160 | |
| 1161 | BUG_ON(fence == NULL); |
| 1162 | |
Thomas Hellstrom | 89669e7 | 2014-12-02 03:36:57 -0800 | [diff] [blame] | 1163 | ret = vmw_event_fence_action_create(file_priv, fence, |
| 1164 | arg->flags, |
| 1165 | arg->user_data, |
| 1166 | true); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1167 | if (unlikely(ret != 0)) { |
| 1168 | if (ret != -ERESTARTSYS) |
| 1169 | DRM_ERROR("Failed to attach event to fence.\n"); |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1170 | goto out_no_create; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1171 | } |
| 1172 | |
| 1173 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, |
Sinclair Yeh | c906965d | 2017-07-05 01:49:32 -0700 | [diff] [blame] | 1174 | handle, -1, NULL); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1175 | vmw_fence_obj_unreference(&fence); |
| 1176 | return 0; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1177 | out_no_create: |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1178 | if (user_fence_rep != NULL) |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 1179 | ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1180 | out_no_ref_obj: |
| 1181 | vmw_fence_obj_unreference(&fence); |
| 1182 | return ret; |
| 1183 | } |