Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
Sinclair Yeh | 54fbde8 | 2015-07-29 12:38:02 -0700 | [diff] [blame] | 3 | * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 28 | #include <drm/drmP.h> |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 29 | #include "vmwgfx_drv.h" |
| 30 | |
| 31 | #define VMW_FENCE_WRAP (1 << 31) |
| 32 | |
| 33 | struct vmw_fence_manager { |
| 34 | int num_fence_objects; |
| 35 | struct vmw_private *dev_priv; |
| 36 | spinlock_t lock; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 37 | struct list_head fence_list; |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 38 | struct work_struct work; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 39 | u32 user_fence_size; |
| 40 | u32 fence_size; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 41 | u32 event_fence_action_size; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 42 | bool fifo_down; |
| 43 | struct list_head cleanup_list; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 44 | uint32_t pending_actions[VMW_ACTION_MAX]; |
| 45 | struct mutex goal_irq_mutex; |
| 46 | bool goal_irq_on; /* Protected by @goal_irq_mutex */ |
| 47 | bool seqno_valid; /* Protected by @lock, and may not be set to true |
| 48 | without the @goal_irq_mutex held. */ |
Christian König | 76bf0db | 2016-06-01 15:10:02 +0200 | [diff] [blame] | 49 | u64 ctx; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 50 | }; |
| 51 | |
| 52 | struct vmw_user_fence { |
| 53 | struct ttm_base_object base; |
| 54 | struct vmw_fence_obj fence; |
| 55 | }; |
| 56 | |
| 57 | /** |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 58 | * struct vmw_event_fence_action - fence action that delivers a drm event. |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 59 | * |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 60 | * @e: A struct drm_pending_event that controls the event delivery. |
| 61 | * @action: A struct vmw_fence_action to hook up to a fence. |
| 62 | * @fence: A referenced pointer to the fence to keep it alive while @action |
| 63 | * hangs on it. |
| 64 | * @dev: Pointer to a struct drm_device so we can access the event stuff. |
| 65 | * @kref: Both @e and @action has destructors, so we need to refcount. |
| 66 | * @size: Size accounted for this object. |
| 67 | * @tv_sec: If non-null, the variable pointed to will be assigned |
| 68 | * current time tv_sec val when the fence signals. |
| 69 | * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will |
| 70 | * be assigned the current time tv_usec val when the fence signals. |
| 71 | */ |
| 72 | struct vmw_event_fence_action { |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 73 | struct vmw_fence_action action; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 74 | |
| 75 | struct drm_pending_event *event; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 76 | struct vmw_fence_obj *fence; |
| 77 | struct drm_device *dev; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 78 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 79 | uint32_t *tv_sec; |
| 80 | uint32_t *tv_usec; |
| 81 | }; |
| 82 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 83 | static struct vmw_fence_manager * |
| 84 | fman_from_fence(struct vmw_fence_obj *fence) |
| 85 | { |
| 86 | return container_of(fence->base.lock, struct vmw_fence_manager, lock); |
| 87 | } |
| 88 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 89 | /** |
| 90 | * Note on fencing subsystem usage of irqs: |
| 91 | * Typically the vmw_fences_update function is called |
| 92 | * |
| 93 | * a) When a new fence seqno has been submitted by the fifo code. |
| 94 | * b) On-demand when we have waiters. Sleeping waiters will switch on the |
| 95 | * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE |
| 96 | * irq is received. When the last fence waiter is gone, that IRQ is masked |
| 97 | * away. |
| 98 | * |
| 99 | * In situations where there are no waiters and we don't submit any new fences, |
| 100 | * fence objects may not be signaled. This is perfectly OK, since there are |
| 101 | * no consumers of the signaled data, but that is NOT ok when there are fence |
| 102 | * actions attached to a fence. The fencing subsystem then makes use of the |
| 103 | * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence |
| 104 | * which has an action attached, and each time vmw_fences_update is called, |
| 105 | * the subsystem makes sure the fence goal seqno is updated. |
| 106 | * |
| 107 | * The fence goal seqno irq is on as long as there are unsignaled fence |
| 108 | * objects with actions attached to them. |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 109 | */ |
| 110 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 111 | static void vmw_fence_obj_destroy(struct dma_fence *f) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 112 | { |
| 113 | struct vmw_fence_obj *fence = |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 114 | container_of(f, struct vmw_fence_obj, base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 115 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 116 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 117 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 118 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 119 | list_del_init(&fence->head); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 120 | --fman->num_fence_objects; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 121 | spin_unlock(&fman->lock); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 122 | fence->destroy(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 123 | } |
| 124 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 125 | static const char *vmw_fence_get_driver_name(struct dma_fence *f) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 126 | { |
| 127 | return "vmwgfx"; |
| 128 | } |
| 129 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 130 | static const char *vmw_fence_get_timeline_name(struct dma_fence *f) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 131 | { |
| 132 | return "svga"; |
| 133 | } |
| 134 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 135 | static bool vmw_fence_enable_signaling(struct dma_fence *f) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 136 | { |
| 137 | struct vmw_fence_obj *fence = |
| 138 | container_of(f, struct vmw_fence_obj, base); |
| 139 | |
| 140 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
| 141 | struct vmw_private *dev_priv = fman->dev_priv; |
| 142 | |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 143 | u32 *fifo_mem = dev_priv->mmio_virt; |
| 144 | u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 145 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) |
| 146 | return false; |
| 147 | |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 148 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 149 | |
| 150 | return true; |
| 151 | } |
| 152 | |
| 153 | struct vmwgfx_wait_cb { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 154 | struct dma_fence_cb base; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 155 | struct task_struct *task; |
| 156 | }; |
| 157 | |
| 158 | static void |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 159 | vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 160 | { |
| 161 | struct vmwgfx_wait_cb *wait = |
| 162 | container_of(cb, struct vmwgfx_wait_cb, base); |
| 163 | |
| 164 | wake_up_process(wait->task); |
| 165 | } |
| 166 | |
| 167 | static void __vmw_fences_update(struct vmw_fence_manager *fman); |
| 168 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 169 | static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 170 | { |
| 171 | struct vmw_fence_obj *fence = |
| 172 | container_of(f, struct vmw_fence_obj, base); |
| 173 | |
| 174 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
| 175 | struct vmw_private *dev_priv = fman->dev_priv; |
| 176 | struct vmwgfx_wait_cb cb; |
| 177 | long ret = timeout; |
| 178 | unsigned long irq_flags; |
| 179 | |
| 180 | if (likely(vmw_fence_obj_signaled(fence))) |
| 181 | return timeout; |
| 182 | |
| 183 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
| 184 | vmw_seqno_waiter_add(dev_priv); |
| 185 | |
| 186 | spin_lock_irqsave(f->lock, irq_flags); |
| 187 | |
| 188 | if (intr && signal_pending(current)) { |
| 189 | ret = -ERESTARTSYS; |
| 190 | goto out; |
| 191 | } |
| 192 | |
| 193 | cb.base.func = vmwgfx_wait_cb; |
| 194 | cb.task = current; |
| 195 | list_add(&cb.base.node, &f->cb_list); |
| 196 | |
| 197 | while (ret > 0) { |
| 198 | __vmw_fences_update(fman); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 199 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 200 | break; |
| 201 | |
| 202 | if (intr) |
| 203 | __set_current_state(TASK_INTERRUPTIBLE); |
| 204 | else |
| 205 | __set_current_state(TASK_UNINTERRUPTIBLE); |
| 206 | spin_unlock_irqrestore(f->lock, irq_flags); |
| 207 | |
| 208 | ret = schedule_timeout(ret); |
| 209 | |
| 210 | spin_lock_irqsave(f->lock, irq_flags); |
| 211 | if (ret > 0 && intr && signal_pending(current)) |
| 212 | ret = -ERESTARTSYS; |
| 213 | } |
| 214 | |
| 215 | if (!list_empty(&cb.base.node)) |
| 216 | list_del(&cb.base.node); |
| 217 | __set_current_state(TASK_RUNNING); |
| 218 | |
| 219 | out: |
| 220 | spin_unlock_irqrestore(f->lock, irq_flags); |
| 221 | |
| 222 | vmw_seqno_waiter_remove(dev_priv); |
| 223 | |
| 224 | return ret; |
| 225 | } |
| 226 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 227 | static struct dma_fence_ops vmw_fence_ops = { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 228 | .get_driver_name = vmw_fence_get_driver_name, |
| 229 | .get_timeline_name = vmw_fence_get_timeline_name, |
| 230 | .enable_signaling = vmw_fence_enable_signaling, |
| 231 | .wait = vmw_fence_wait, |
| 232 | .release = vmw_fence_obj_destroy, |
| 233 | }; |
| 234 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 235 | |
| 236 | /** |
| 237 | * Execute signal actions on fences recently signaled. |
| 238 | * This is done from a workqueue so we don't have to execute |
| 239 | * signal actions from atomic context. |
| 240 | */ |
| 241 | |
| 242 | static void vmw_fence_work_func(struct work_struct *work) |
| 243 | { |
| 244 | struct vmw_fence_manager *fman = |
| 245 | container_of(work, struct vmw_fence_manager, work); |
| 246 | struct list_head list; |
| 247 | struct vmw_fence_action *action, *next_action; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 248 | bool seqno_valid; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 249 | |
| 250 | do { |
| 251 | INIT_LIST_HEAD(&list); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 252 | mutex_lock(&fman->goal_irq_mutex); |
| 253 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 254 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 255 | list_splice_init(&fman->cleanup_list, &list); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 256 | seqno_valid = fman->seqno_valid; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 257 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 258 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 259 | if (!seqno_valid && fman->goal_irq_on) { |
| 260 | fman->goal_irq_on = false; |
| 261 | vmw_goal_waiter_remove(fman->dev_priv); |
| 262 | } |
| 263 | mutex_unlock(&fman->goal_irq_mutex); |
| 264 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 265 | if (list_empty(&list)) |
| 266 | return; |
| 267 | |
| 268 | /* |
| 269 | * At this point, only we should be able to manipulate the |
| 270 | * list heads of the actions we have on the private list. |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 271 | * hence fman::lock not held. |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 272 | */ |
| 273 | |
| 274 | list_for_each_entry_safe(action, next_action, &list, head) { |
| 275 | list_del_init(&action->head); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 276 | if (action->cleanup) |
| 277 | action->cleanup(action); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 278 | } |
| 279 | } while (1); |
| 280 | } |
| 281 | |
| 282 | struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) |
| 283 | { |
| 284 | struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); |
| 285 | |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 286 | if (unlikely(!fman)) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 287 | return NULL; |
| 288 | |
| 289 | fman->dev_priv = dev_priv; |
| 290 | spin_lock_init(&fman->lock); |
| 291 | INIT_LIST_HEAD(&fman->fence_list); |
| 292 | INIT_LIST_HEAD(&fman->cleanup_list); |
| 293 | INIT_WORK(&fman->work, &vmw_fence_work_func); |
| 294 | fman->fifo_down = true; |
| 295 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); |
| 296 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 297 | fman->event_fence_action_size = |
| 298 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); |
| 299 | mutex_init(&fman->goal_irq_mutex); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 300 | fman->ctx = dma_fence_context_alloc(1); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 301 | |
| 302 | return fman; |
| 303 | } |
| 304 | |
| 305 | void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) |
| 306 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 307 | bool lists_empty; |
| 308 | |
| 309 | (void) cancel_work_sync(&fman->work); |
| 310 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 311 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 312 | lists_empty = list_empty(&fman->fence_list) && |
| 313 | list_empty(&fman->cleanup_list); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 314 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 315 | |
| 316 | BUG_ON(!lists_empty); |
| 317 | kfree(fman); |
| 318 | } |
| 319 | |
| 320 | static int vmw_fence_obj_init(struct vmw_fence_manager *fman, |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 321 | struct vmw_fence_obj *fence, u32 seqno, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 322 | void (*destroy) (struct vmw_fence_obj *fence)) |
| 323 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 324 | int ret = 0; |
| 325 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 326 | dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, |
| 327 | fman->ctx, seqno); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 328 | INIT_LIST_HEAD(&fence->seq_passed_actions); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 329 | fence->destroy = destroy; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 330 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 331 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 332 | if (unlikely(fman->fifo_down)) { |
| 333 | ret = -EBUSY; |
| 334 | goto out_unlock; |
| 335 | } |
| 336 | list_add_tail(&fence->head, &fman->fence_list); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 337 | ++fman->num_fence_objects; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 338 | |
| 339 | out_unlock: |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 340 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 341 | return ret; |
| 342 | |
| 343 | } |
| 344 | |
Rashika Kheria | 94844cf | 2014-01-06 22:21:21 +0530 | [diff] [blame] | 345 | static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 346 | struct list_head *list) |
| 347 | { |
| 348 | struct vmw_fence_action *action, *next_action; |
| 349 | |
| 350 | list_for_each_entry_safe(action, next_action, list, head) { |
| 351 | list_del_init(&action->head); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 352 | fman->pending_actions[action->type]--; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 353 | if (action->seq_passed != NULL) |
| 354 | action->seq_passed(action); |
| 355 | |
| 356 | /* |
| 357 | * Add the cleanup action to the cleanup list so that |
| 358 | * it will be performed by a worker task. |
| 359 | */ |
| 360 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 361 | list_add_tail(&action->head, &fman->cleanup_list); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 362 | } |
| 363 | } |
| 364 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 365 | /** |
| 366 | * vmw_fence_goal_new_locked - Figure out a new device fence goal |
| 367 | * seqno if needed. |
| 368 | * |
| 369 | * @fman: Pointer to a fence manager. |
| 370 | * @passed_seqno: The seqno the device currently signals as passed. |
| 371 | * |
| 372 | * This function should be called with the fence manager lock held. |
| 373 | * It is typically called when we have a new passed_seqno, and |
| 374 | * we might need to update the fence goal. It checks to see whether |
| 375 | * the current fence goal has already passed, and, in that case, |
| 376 | * scans through all unsignaled fences to get the next fence object with an |
| 377 | * action attached, and sets the seqno of that fence as a new fence goal. |
| 378 | * |
| 379 | * returns true if the device goal seqno was updated. False otherwise. |
| 380 | */ |
| 381 | static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, |
| 382 | u32 passed_seqno) |
| 383 | { |
| 384 | u32 goal_seqno; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 385 | u32 *fifo_mem; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 386 | struct vmw_fence_obj *fence; |
| 387 | |
| 388 | if (likely(!fman->seqno_valid)) |
| 389 | return false; |
| 390 | |
| 391 | fifo_mem = fman->dev_priv->mmio_virt; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 392 | goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 393 | if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) |
| 394 | return false; |
| 395 | |
| 396 | fman->seqno_valid = false; |
| 397 | list_for_each_entry(fence, &fman->fence_list, head) { |
| 398 | if (!list_empty(&fence->seq_passed_actions)) { |
| 399 | fman->seqno_valid = true; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 400 | vmw_mmio_write(fence->base.seqno, |
| 401 | fifo_mem + SVGA_FIFO_FENCE_GOAL); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 402 | break; |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | return true; |
| 407 | } |
| 408 | |
| 409 | |
| 410 | /** |
| 411 | * vmw_fence_goal_check_locked - Replace the device fence goal seqno if |
| 412 | * needed. |
| 413 | * |
| 414 | * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be |
| 415 | * considered as a device fence goal. |
| 416 | * |
| 417 | * This function should be called with the fence manager lock held. |
| 418 | * It is typically called when an action has been attached to a fence to |
| 419 | * check whether the seqno of that fence should be used for a fence |
| 420 | * goal interrupt. This is typically needed if the current fence goal is |
| 421 | * invalid, or has a higher seqno than that of the current fence object. |
| 422 | * |
| 423 | * returns true if the device goal seqno was updated. False otherwise. |
| 424 | */ |
| 425 | static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) |
| 426 | { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 427 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 428 | u32 goal_seqno; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 429 | u32 *fifo_mem; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 430 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 431 | if (dma_fence_is_signaled_locked(&fence->base)) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 432 | return false; |
| 433 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 434 | fifo_mem = fman->dev_priv->mmio_virt; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 435 | goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 436 | if (likely(fman->seqno_valid && |
| 437 | goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 438 | return false; |
| 439 | |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 440 | vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 441 | fman->seqno_valid = true; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 442 | |
| 443 | return true; |
| 444 | } |
| 445 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 446 | static void __vmw_fences_update(struct vmw_fence_manager *fman) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 447 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 448 | struct vmw_fence_obj *fence, *next_fence; |
| 449 | struct list_head action_list; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 450 | bool needs_rerun; |
| 451 | uint32_t seqno, new_seqno; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 452 | u32 *fifo_mem = fman->dev_priv->mmio_virt; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 453 | |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 454 | seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 455 | rerun: |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 456 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 457 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 458 | list_del_init(&fence->head); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 459 | dma_fence_signal_locked(&fence->base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 460 | INIT_LIST_HEAD(&action_list); |
| 461 | list_splice_init(&fence->seq_passed_actions, |
| 462 | &action_list); |
| 463 | vmw_fences_perform_actions(fman, &action_list); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 464 | } else |
| 465 | break; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 466 | } |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 467 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 468 | /* |
| 469 | * Rerun if the fence goal seqno was updated, and the |
| 470 | * hardware might have raced with that update, so that |
| 471 | * we missed a fence_goal irq. |
| 472 | */ |
| 473 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 474 | needs_rerun = vmw_fence_goal_new_locked(fman, seqno); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 475 | if (unlikely(needs_rerun)) { |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 476 | new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 477 | if (new_seqno != seqno) { |
| 478 | seqno = new_seqno; |
| 479 | goto rerun; |
| 480 | } |
| 481 | } |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 482 | |
| 483 | if (!list_empty(&fman->cleanup_list)) |
| 484 | (void) schedule_work(&fman->work); |
| 485 | } |
| 486 | |
| 487 | void vmw_fences_update(struct vmw_fence_manager *fman) |
| 488 | { |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 489 | spin_lock(&fman->lock); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 490 | __vmw_fences_update(fman); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 491 | spin_unlock(&fman->lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 492 | } |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 493 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 494 | bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 495 | { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 496 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 497 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 498 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 499 | return 1; |
| 500 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 501 | vmw_fences_update(fman); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 502 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 503 | return dma_fence_is_signaled(&fence->base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 504 | } |
| 505 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 506 | int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 507 | bool interruptible, unsigned long timeout) |
| 508 | { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 509 | long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 510 | |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 511 | if (likely(ret > 0)) |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 512 | return 0; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 513 | else if (ret == 0) |
| 514 | return -EBUSY; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 515 | else |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 516 | return ret; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 517 | } |
| 518 | |
| 519 | void vmw_fence_obj_flush(struct vmw_fence_obj *fence) |
| 520 | { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 521 | struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 522 | |
| 523 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
| 524 | } |
| 525 | |
| 526 | static void vmw_fence_destroy(struct vmw_fence_obj *fence) |
| 527 | { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 528 | dma_fence_free(&fence->base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 529 | } |
| 530 | |
| 531 | int vmw_fence_create(struct vmw_fence_manager *fman, |
| 532 | uint32_t seqno, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 533 | struct vmw_fence_obj **p_fence) |
| 534 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 535 | struct vmw_fence_obj *fence; |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 536 | int ret; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 537 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 538 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 539 | if (unlikely(!fence)) |
Thomas Hellstrom | 1f563a6 | 2014-12-02 03:32:24 -0800 | [diff] [blame] | 540 | return -ENOMEM; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 541 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 542 | ret = vmw_fence_obj_init(fman, fence, seqno, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 543 | vmw_fence_destroy); |
| 544 | if (unlikely(ret != 0)) |
| 545 | goto out_err_init; |
| 546 | |
| 547 | *p_fence = fence; |
| 548 | return 0; |
| 549 | |
| 550 | out_err_init: |
| 551 | kfree(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 552 | return ret; |
| 553 | } |
| 554 | |
| 555 | |
| 556 | static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) |
| 557 | { |
| 558 | struct vmw_user_fence *ufence = |
| 559 | container_of(fence, struct vmw_user_fence, fence); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 560 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 561 | |
Thomas Hellstrom | 35f62a5 | 2012-11-20 12:16:49 +0000 | [diff] [blame] | 562 | ttm_base_object_kfree(ufence, base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 563 | /* |
| 564 | * Free kernel space accounting. |
| 565 | */ |
| 566 | ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), |
| 567 | fman->user_fence_size); |
| 568 | } |
| 569 | |
| 570 | static void vmw_user_fence_base_release(struct ttm_base_object **p_base) |
| 571 | { |
| 572 | struct ttm_base_object *base = *p_base; |
| 573 | struct vmw_user_fence *ufence = |
| 574 | container_of(base, struct vmw_user_fence, base); |
| 575 | struct vmw_fence_obj *fence = &ufence->fence; |
| 576 | |
| 577 | *p_base = NULL; |
| 578 | vmw_fence_obj_unreference(&fence); |
| 579 | } |
| 580 | |
| 581 | int vmw_user_fence_create(struct drm_file *file_priv, |
| 582 | struct vmw_fence_manager *fman, |
| 583 | uint32_t seqno, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 584 | struct vmw_fence_obj **p_fence, |
| 585 | uint32_t *p_handle) |
| 586 | { |
| 587 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 588 | struct vmw_user_fence *ufence; |
| 589 | struct vmw_fence_obj *tmp; |
| 590 | struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); |
| 591 | int ret; |
| 592 | |
| 593 | /* |
| 594 | * Kernel memory space accounting, since this object may |
| 595 | * be created by a user-space request. |
| 596 | */ |
| 597 | |
| 598 | ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, |
| 599 | false, false); |
| 600 | if (unlikely(ret != 0)) |
| 601 | return ret; |
| 602 | |
| 603 | ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 604 | if (unlikely(!ufence)) { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 605 | ret = -ENOMEM; |
| 606 | goto out_no_object; |
| 607 | } |
| 608 | |
| 609 | ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 610 | vmw_user_fence_destroy); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 611 | if (unlikely(ret != 0)) { |
| 612 | kfree(ufence); |
| 613 | goto out_no_object; |
| 614 | } |
| 615 | |
| 616 | /* |
| 617 | * The base object holds a reference which is freed in |
| 618 | * vmw_user_fence_base_release. |
| 619 | */ |
| 620 | tmp = vmw_fence_obj_reference(&ufence->fence); |
| 621 | ret = ttm_base_object_init(tfile, &ufence->base, false, |
| 622 | VMW_RES_FENCE, |
| 623 | &vmw_user_fence_base_release, NULL); |
| 624 | |
| 625 | |
| 626 | if (unlikely(ret != 0)) { |
| 627 | /* |
| 628 | * Free the base object's reference |
| 629 | */ |
| 630 | vmw_fence_obj_unreference(&tmp); |
| 631 | goto out_err; |
| 632 | } |
| 633 | |
| 634 | *p_fence = &ufence->fence; |
| 635 | *p_handle = ufence->base.hash.key; |
| 636 | |
| 637 | return 0; |
| 638 | out_err: |
| 639 | tmp = &ufence->fence; |
| 640 | vmw_fence_obj_unreference(&tmp); |
| 641 | out_no_object: |
| 642 | ttm_mem_global_free(mem_glob, fman->user_fence_size); |
| 643 | return ret; |
| 644 | } |
| 645 | |
| 646 | |
| 647 | /** |
Sinclair Yeh | 58585116 | 2017-07-05 01:45:40 -0700 | [diff] [blame] | 648 | * vmw_wait_dma_fence - Wait for a dma fence |
| 649 | * |
| 650 | * @fman: pointer to a fence manager |
| 651 | * @fence: DMA fence to wait on |
| 652 | * |
| 653 | * This function handles the case when the fence is actually a fence |
| 654 | * array. If that's the case, it'll wait on each of the child fence |
| 655 | */ |
| 656 | int vmw_wait_dma_fence(struct vmw_fence_manager *fman, |
| 657 | struct dma_fence *fence) |
| 658 | { |
| 659 | struct dma_fence_array *fence_array; |
| 660 | int ret = 0; |
| 661 | int i; |
| 662 | |
| 663 | |
| 664 | if (dma_fence_is_signaled(fence)) |
| 665 | return 0; |
| 666 | |
| 667 | if (!dma_fence_is_array(fence)) |
| 668 | return dma_fence_wait(fence, true); |
| 669 | |
| 670 | /* From i915: Note that if the fence-array was created in |
| 671 | * signal-on-any mode, we should *not* decompose it into its individual |
| 672 | * fences. However, we don't currently store which mode the fence-array |
| 673 | * is operating in. Fortunately, the only user of signal-on-any is |
| 674 | * private to amdgpu and we should not see any incoming fence-array |
| 675 | * from sync-file being in signal-on-any mode. |
| 676 | */ |
| 677 | |
| 678 | fence_array = to_dma_fence_array(fence); |
| 679 | for (i = 0; i < fence_array->num_fences; i++) { |
| 680 | struct dma_fence *child = fence_array->fences[i]; |
| 681 | |
| 682 | ret = dma_fence_wait(child, true); |
| 683 | |
| 684 | if (ret < 0) |
| 685 | return ret; |
| 686 | } |
| 687 | |
| 688 | return 0; |
| 689 | } |
| 690 | |
| 691 | |
| 692 | /** |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 693 | * vmw_fence_fifo_down - signal all unsignaled fence objects. |
| 694 | */ |
| 695 | |
| 696 | void vmw_fence_fifo_down(struct vmw_fence_manager *fman) |
| 697 | { |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 698 | struct list_head action_list; |
| 699 | int ret; |
| 700 | |
| 701 | /* |
| 702 | * The list may be altered while we traverse it, so always |
| 703 | * restart when we've released the fman->lock. |
| 704 | */ |
| 705 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 706 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 707 | fman->fifo_down = true; |
| 708 | while (!list_empty(&fman->fence_list)) { |
| 709 | struct vmw_fence_obj *fence = |
| 710 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, |
| 711 | head); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 712 | dma_fence_get(&fence->base); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 713 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 714 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 715 | ret = vmw_fence_obj_wait(fence, false, false, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 716 | VMW_FENCE_WAIT_TIMEOUT); |
| 717 | |
| 718 | if (unlikely(ret != 0)) { |
| 719 | list_del_init(&fence->head); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 720 | dma_fence_signal(&fence->base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 721 | INIT_LIST_HEAD(&action_list); |
| 722 | list_splice_init(&fence->seq_passed_actions, |
| 723 | &action_list); |
| 724 | vmw_fences_perform_actions(fman, &action_list); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 725 | } |
| 726 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 727 | BUG_ON(!list_empty(&fence->head)); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 728 | dma_fence_put(&fence->base); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 729 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 730 | } |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 731 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 732 | } |
| 733 | |
| 734 | void vmw_fence_fifo_up(struct vmw_fence_manager *fman) |
| 735 | { |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 736 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 737 | fman->fifo_down = false; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 738 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 742 | /** |
| 743 | * vmw_fence_obj_lookup - Look up a user-space fence object |
| 744 | * |
| 745 | * @tfile: A struct ttm_object_file identifying the caller. |
| 746 | * @handle: A handle identifying the fence object. |
| 747 | * @return: A struct vmw_user_fence base ttm object on success or |
| 748 | * an error pointer on failure. |
| 749 | * |
| 750 | * The fence object is looked up and type-checked. The caller needs |
| 751 | * to have opened the fence object first, but since that happens on |
| 752 | * creation and fence objects aren't shareable, that's not an |
| 753 | * issue currently. |
| 754 | */ |
| 755 | static struct ttm_base_object * |
| 756 | vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle) |
| 757 | { |
| 758 | struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle); |
| 759 | |
| 760 | if (!base) { |
| 761 | pr_err("Invalid fence object handle 0x%08lx.\n", |
| 762 | (unsigned long)handle); |
| 763 | return ERR_PTR(-EINVAL); |
| 764 | } |
| 765 | |
| 766 | if (base->refcount_release != vmw_user_fence_base_release) { |
| 767 | pr_err("Invalid fence object handle 0x%08lx.\n", |
| 768 | (unsigned long)handle); |
| 769 | ttm_base_object_unref(&base); |
| 770 | return ERR_PTR(-EINVAL); |
| 771 | } |
| 772 | |
| 773 | return base; |
| 774 | } |
| 775 | |
| 776 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 777 | int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, |
| 778 | struct drm_file *file_priv) |
| 779 | { |
| 780 | struct drm_vmw_fence_wait_arg *arg = |
| 781 | (struct drm_vmw_fence_wait_arg *)data; |
| 782 | unsigned long timeout; |
| 783 | struct ttm_base_object *base; |
| 784 | struct vmw_fence_obj *fence; |
| 785 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 786 | int ret; |
| 787 | uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); |
| 788 | |
| 789 | /* |
| 790 | * 64-bit division not present on 32-bit systems, so do an |
| 791 | * approximation. (Divide by 1000000). |
| 792 | */ |
| 793 | |
| 794 | wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - |
| 795 | (wait_timeout >> 26); |
| 796 | |
| 797 | if (!arg->cookie_valid) { |
| 798 | arg->cookie_valid = 1; |
| 799 | arg->kernel_cookie = jiffies + wait_timeout; |
| 800 | } |
| 801 | |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 802 | base = vmw_fence_obj_lookup(tfile, arg->handle); |
| 803 | if (IS_ERR(base)) |
| 804 | return PTR_ERR(base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 805 | |
| 806 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); |
| 807 | |
| 808 | timeout = jiffies; |
| 809 | if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 810 | ret = ((vmw_fence_obj_signaled(fence)) ? |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 811 | 0 : -EBUSY); |
| 812 | goto out; |
| 813 | } |
| 814 | |
| 815 | timeout = (unsigned long)arg->kernel_cookie - timeout; |
| 816 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 817 | ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 818 | |
| 819 | out: |
| 820 | ttm_base_object_unref(&base); |
| 821 | |
| 822 | /* |
| 823 | * Optionally unref the fence object. |
| 824 | */ |
| 825 | |
| 826 | if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) |
| 827 | return ttm_ref_object_base_unref(tfile, arg->handle, |
| 828 | TTM_REF_USAGE); |
| 829 | return ret; |
| 830 | } |
| 831 | |
| 832 | int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, |
| 833 | struct drm_file *file_priv) |
| 834 | { |
| 835 | struct drm_vmw_fence_signaled_arg *arg = |
| 836 | (struct drm_vmw_fence_signaled_arg *) data; |
| 837 | struct ttm_base_object *base; |
| 838 | struct vmw_fence_obj *fence; |
| 839 | struct vmw_fence_manager *fman; |
| 840 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 841 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 842 | |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 843 | base = vmw_fence_obj_lookup(tfile, arg->handle); |
| 844 | if (IS_ERR(base)) |
| 845 | return PTR_ERR(base); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 846 | |
| 847 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 848 | fman = fman_from_fence(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 849 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 850 | arg->signaled = vmw_fence_obj_signaled(fence); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 851 | |
Maarten Lankhorst | c060a4e | 2014-03-26 13:06:24 +0100 | [diff] [blame] | 852 | arg->signaled_flags = arg->flags; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 853 | spin_lock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 854 | arg->passed_seqno = dev_priv->last_read_seqno; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 855 | spin_unlock(&fman->lock); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 856 | |
| 857 | ttm_base_object_unref(&base); |
| 858 | |
| 859 | return 0; |
| 860 | } |
| 861 | |
| 862 | |
| 863 | int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, |
| 864 | struct drm_file *file_priv) |
| 865 | { |
| 866 | struct drm_vmw_fence_arg *arg = |
| 867 | (struct drm_vmw_fence_arg *) data; |
| 868 | |
| 869 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
| 870 | arg->handle, |
| 871 | TTM_REF_USAGE); |
| 872 | } |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 873 | |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 874 | /** |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 875 | * vmw_event_fence_action_seq_passed |
| 876 | * |
| 877 | * @action: The struct vmw_fence_action embedded in a struct |
| 878 | * vmw_event_fence_action. |
| 879 | * |
| 880 | * This function is called when the seqno of the fence where @action is |
| 881 | * attached has passed. It queues the event on the submitter's event list. |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 882 | * This function is always called from atomic context. |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 883 | */ |
| 884 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) |
| 885 | { |
| 886 | struct vmw_event_fence_action *eaction = |
| 887 | container_of(action, struct vmw_event_fence_action, action); |
| 888 | struct drm_device *dev = eaction->dev; |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 889 | struct drm_pending_event *event = eaction->event; |
| 890 | struct drm_file *file_priv; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 891 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 892 | |
Thomas Hellstrom | 6b82ef5 | 2012-02-09 16:56:42 +0100 | [diff] [blame] | 893 | if (unlikely(event == NULL)) |
| 894 | return; |
| 895 | |
| 896 | file_priv = event->file_priv; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 897 | spin_lock_irq(&dev->event_lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 898 | |
| 899 | if (likely(eaction->tv_sec != NULL)) { |
| 900 | struct timeval tv; |
| 901 | |
| 902 | do_gettimeofday(&tv); |
| 903 | *eaction->tv_sec = tv.tv_sec; |
| 904 | *eaction->tv_usec = tv.tv_usec; |
| 905 | } |
| 906 | |
Daniel Vetter | fb740cf | 2016-01-11 22:40:59 +0100 | [diff] [blame] | 907 | drm_send_event_locked(dev, eaction->event); |
Dan Carpenter | 15b6b80 | 2016-01-28 12:06:46 +0300 | [diff] [blame] | 908 | eaction->event = NULL; |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 909 | spin_unlock_irq(&dev->event_lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 910 | } |
| 911 | |
| 912 | /** |
| 913 | * vmw_event_fence_action_cleanup |
| 914 | * |
| 915 | * @action: The struct vmw_fence_action embedded in a struct |
| 916 | * vmw_event_fence_action. |
| 917 | * |
| 918 | * This function is the struct vmw_fence_action destructor. It's typically |
| 919 | * called from a workqueue. |
| 920 | */ |
| 921 | static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) |
| 922 | { |
| 923 | struct vmw_event_fence_action *eaction = |
| 924 | container_of(action, struct vmw_event_fence_action, action); |
| 925 | |
| 926 | vmw_fence_obj_unreference(&eaction->fence); |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 927 | kfree(eaction); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 928 | } |
| 929 | |
| 930 | |
| 931 | /** |
| 932 | * vmw_fence_obj_add_action - Add an action to a fence object. |
| 933 | * |
| 934 | * @fence - The fence object. |
| 935 | * @action - The action to add. |
| 936 | * |
| 937 | * Note that the action callbacks may be executed before this function |
| 938 | * returns. |
| 939 | */ |
Rashika Kheria | 94844cf | 2014-01-06 22:21:21 +0530 | [diff] [blame] | 940 | static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 941 | struct vmw_fence_action *action) |
| 942 | { |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 943 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 944 | bool run_update = false; |
| 945 | |
| 946 | mutex_lock(&fman->goal_irq_mutex); |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 947 | spin_lock(&fman->lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 948 | |
| 949 | fman->pending_actions[action->type]++; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 950 | if (dma_fence_is_signaled_locked(&fence->base)) { |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 951 | struct list_head action_list; |
| 952 | |
| 953 | INIT_LIST_HEAD(&action_list); |
| 954 | list_add_tail(&action->head, &action_list); |
| 955 | vmw_fences_perform_actions(fman, &action_list); |
| 956 | } else { |
| 957 | list_add_tail(&action->head, &fence->seq_passed_actions); |
| 958 | |
| 959 | /* |
| 960 | * This function may set fman::seqno_valid, so it must |
| 961 | * be run with the goal_irq_mutex held. |
| 962 | */ |
| 963 | run_update = vmw_fence_goal_check_locked(fence); |
| 964 | } |
| 965 | |
Thomas Hellstrom | ef36990 | 2017-08-24 08:06:28 +0200 | [diff] [blame] | 966 | spin_unlock(&fman->lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 967 | |
| 968 | if (run_update) { |
| 969 | if (!fman->goal_irq_on) { |
| 970 | fman->goal_irq_on = true; |
| 971 | vmw_goal_waiter_add(fman->dev_priv); |
| 972 | } |
| 973 | vmw_fences_update(fman); |
| 974 | } |
| 975 | mutex_unlock(&fman->goal_irq_mutex); |
| 976 | |
| 977 | } |
| 978 | |
| 979 | /** |
| 980 | * vmw_event_fence_action_create - Post an event for sending when a fence |
| 981 | * object seqno has passed. |
| 982 | * |
| 983 | * @file_priv: The file connection on which the event should be posted. |
| 984 | * @fence: The fence object on which to post the event. |
| 985 | * @event: Event to be posted. This event should've been alloced |
| 986 | * using k[mz]alloc, and should've been completely initialized. |
| 987 | * @interruptible: Interruptible waits if possible. |
| 988 | * |
| 989 | * As a side effect, the object pointed to by @event may have been |
| 990 | * freed when this function returns. If this function returns with |
| 991 | * an error code, the caller needs to free that object. |
| 992 | */ |
| 993 | |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 994 | int vmw_event_fence_action_queue(struct drm_file *file_priv, |
| 995 | struct vmw_fence_obj *fence, |
| 996 | struct drm_pending_event *event, |
| 997 | uint32_t *tv_sec, |
| 998 | uint32_t *tv_usec, |
| 999 | bool interruptible) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1000 | { |
Dan Carpenter | 0c5d370 | 2011-10-18 09:09:45 +0300 | [diff] [blame] | 1001 | struct vmw_event_fence_action *eaction; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 1002 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1003 | |
| 1004 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 1005 | if (unlikely(!eaction)) |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1006 | return -ENOMEM; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1007 | |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1008 | eaction->event = event; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1009 | |
| 1010 | eaction->action.seq_passed = vmw_event_fence_action_seq_passed; |
| 1011 | eaction->action.cleanup = vmw_event_fence_action_cleanup; |
| 1012 | eaction->action.type = VMW_ACTION_EVENT; |
| 1013 | |
| 1014 | eaction->fence = vmw_fence_obj_reference(fence); |
| 1015 | eaction->dev = fman->dev_priv->dev; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1016 | eaction->tv_sec = tv_sec; |
| 1017 | eaction->tv_usec = tv_usec; |
| 1018 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1019 | vmw_fence_obj_add_action(fence, &eaction->action); |
| 1020 | |
| 1021 | return 0; |
| 1022 | } |
| 1023 | |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1024 | struct vmw_event_fence_pending { |
| 1025 | struct drm_pending_event base; |
| 1026 | struct drm_vmw_event_fence event; |
| 1027 | }; |
| 1028 | |
Rashika Kheria | 94844cf | 2014-01-06 22:21:21 +0530 | [diff] [blame] | 1029 | static int vmw_event_fence_action_create(struct drm_file *file_priv, |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1030 | struct vmw_fence_obj *fence, |
| 1031 | uint32_t flags, |
| 1032 | uint64_t user_data, |
| 1033 | bool interruptible) |
| 1034 | { |
| 1035 | struct vmw_event_fence_pending *event; |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 1036 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
| 1037 | struct drm_device *dev = fman->dev_priv->dev; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1038 | int ret; |
| 1039 | |
Dan Carpenter | 68c4fce | 2012-09-23 19:33:55 +0300 | [diff] [blame] | 1040 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
Ravikant B Sharma | 1a4adb0 | 2016-11-08 17:30:31 +0530 | [diff] [blame] | 1041 | if (unlikely(!event)) { |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1042 | DRM_ERROR("Failed to allocate an event.\n"); |
| 1043 | ret = -ENOMEM; |
Daniel Vetter | 6d3729a | 2016-01-11 22:40:58 +0100 | [diff] [blame] | 1044 | goto out_no_space; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1045 | } |
| 1046 | |
| 1047 | event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; |
| 1048 | event->event.base.length = sizeof(*event); |
| 1049 | event->event.user_data = user_data; |
| 1050 | |
Daniel Vetter | 6d3729a | 2016-01-11 22:40:58 +0100 | [diff] [blame] | 1051 | ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1052 | |
Daniel Vetter | 6d3729a | 2016-01-11 22:40:58 +0100 | [diff] [blame] | 1053 | if (unlikely(ret != 0)) { |
| 1054 | DRM_ERROR("Failed to allocate event space for this file.\n"); |
| 1055 | kfree(event); |
| 1056 | goto out_no_space; |
| 1057 | } |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1058 | |
| 1059 | if (flags & DRM_VMW_FE_FLAG_REQ_TIME) |
| 1060 | ret = vmw_event_fence_action_queue(file_priv, fence, |
| 1061 | &event->base, |
| 1062 | &event->event.tv_sec, |
| 1063 | &event->event.tv_usec, |
| 1064 | interruptible); |
| 1065 | else |
| 1066 | ret = vmw_event_fence_action_queue(file_priv, fence, |
| 1067 | &event->base, |
| 1068 | NULL, |
| 1069 | NULL, |
| 1070 | interruptible); |
| 1071 | if (ret != 0) |
| 1072 | goto out_no_queue; |
| 1073 | |
Thomas Hellstrom | 89669e7 | 2014-12-02 03:36:57 -0800 | [diff] [blame] | 1074 | return 0; |
| 1075 | |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1076 | out_no_queue: |
Daniel Vetter | 6d3729a | 2016-01-11 22:40:58 +0100 | [diff] [blame] | 1077 | drm_event_cancel_free(dev, &event->base); |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1078 | out_no_space: |
| 1079 | return ret; |
| 1080 | } |
| 1081 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1082 | int vmw_fence_event_ioctl(struct drm_device *dev, void *data, |
| 1083 | struct drm_file *file_priv) |
| 1084 | { |
| 1085 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1086 | struct drm_vmw_fence_event_arg *arg = |
| 1087 | (struct drm_vmw_fence_event_arg *) data; |
| 1088 | struct vmw_fence_obj *fence = NULL; |
| 1089 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 1090 | struct ttm_object_file *tfile = vmw_fp->tfile; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1091 | struct drm_vmw_fence_rep __user *user_fence_rep = |
| 1092 | (struct drm_vmw_fence_rep __user *)(unsigned long) |
| 1093 | arg->fence_rep; |
| 1094 | uint32_t handle; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1095 | int ret; |
| 1096 | |
| 1097 | /* |
| 1098 | * Look up an existing fence object, |
| 1099 | * and if user-space wants a new reference, |
| 1100 | * add one. |
| 1101 | */ |
| 1102 | if (arg->handle) { |
| 1103 | struct ttm_base_object *base = |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 1104 | vmw_fence_obj_lookup(tfile, arg->handle); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1105 | |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 1106 | if (IS_ERR(base)) |
| 1107 | return PTR_ERR(base); |
| 1108 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1109 | fence = &(container_of(base, struct vmw_user_fence, |
| 1110 | base)->fence); |
| 1111 | (void) vmw_fence_obj_reference(fence); |
| 1112 | |
| 1113 | if (user_fence_rep != NULL) { |
Thomas Hellstrom | fe25deb | 2017-03-27 11:21:25 +0200 | [diff] [blame] | 1114 | ret = ttm_ref_object_add(vmw_fp->tfile, base, |
| 1115 | TTM_REF_USAGE, NULL, false); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1116 | if (unlikely(ret != 0)) { |
| 1117 | DRM_ERROR("Failed to reference a fence " |
| 1118 | "object.\n"); |
| 1119 | goto out_no_ref_obj; |
| 1120 | } |
| 1121 | handle = base->hash.key; |
| 1122 | } |
| 1123 | ttm_base_object_unref(&base); |
| 1124 | } |
| 1125 | |
| 1126 | /* |
| 1127 | * Create a new fence object. |
| 1128 | */ |
| 1129 | if (!fence) { |
| 1130 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
| 1131 | &fence, |
| 1132 | (user_fence_rep) ? |
| 1133 | &handle : NULL); |
| 1134 | if (unlikely(ret != 0)) { |
| 1135 | DRM_ERROR("Fence event failed to create fence.\n"); |
| 1136 | return ret; |
| 1137 | } |
| 1138 | } |
| 1139 | |
| 1140 | BUG_ON(fence == NULL); |
| 1141 | |
Thomas Hellstrom | 89669e7 | 2014-12-02 03:36:57 -0800 | [diff] [blame] | 1142 | ret = vmw_event_fence_action_create(file_priv, fence, |
| 1143 | arg->flags, |
| 1144 | arg->user_data, |
| 1145 | true); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1146 | if (unlikely(ret != 0)) { |
| 1147 | if (ret != -ERESTARTSYS) |
| 1148 | DRM_ERROR("Failed to attach event to fence.\n"); |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1149 | goto out_no_create; |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1150 | } |
| 1151 | |
| 1152 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, |
Sinclair Yeh | c906965d | 2017-07-05 01:49:32 -0700 | [diff] [blame] | 1153 | handle, -1, NULL); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1154 | vmw_fence_obj_unreference(&fence); |
| 1155 | return 0; |
Jakob Bornecrantz | 8b7de6a | 2012-02-09 16:56:41 +0100 | [diff] [blame] | 1156 | out_no_create: |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1157 | if (user_fence_rep != NULL) |
Thomas Hellstrom | f7652af | 2017-03-27 11:09:08 +0200 | [diff] [blame] | 1158 | ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 1159 | out_no_ref_obj: |
| 1160 | vmw_fence_obj_unreference(&fence); |
| 1161 | return ret; |
| 1162 | } |