blob: 96d82b6826745ad9b13908df334ea512c7fd477d [file] [log] [blame]
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
Oded Gabbay1251f232019-02-16 00:39:18 +020010#include <linux/slab.h>
11
12/**
Lee Jones9eea2a42020-06-26 14:05:24 +010013 * struct hl_eqe_work - This structure is used to schedule work of EQ
Oded Gabbay2f553422020-08-15 16:28:10 +030014 * entry and cpucp_reset event
Oded Gabbay1251f232019-02-16 00:39:18 +020015 *
Lee Jones3db99f02020-07-01 09:58:38 +010016 * @eq_work: workqueue object to run when EQ entry is received
17 * @hdev: pointer to device structure
18 * @eq_entry: copy of the EQ entry
Oded Gabbay1251f232019-02-16 00:39:18 +020019 */
20struct hl_eqe_work {
21 struct work_struct eq_work;
22 struct hl_device *hdev;
23 struct hl_eq_entry eq_entry;
24};
Oded Gabbay9494a8d2019-02-16 00:39:17 +020025
Lee Jones3db99f02020-07-01 09:58:38 +010026/**
Oded Gabbay9494a8d2019-02-16 00:39:17 +020027 * hl_cq_inc_ptr - increment ci or pi of cq
28 *
29 * @ptr: the current ci or pi value of the completion queue
30 *
31 * Increment ptr by 1. If it reaches the number of completion queue
32 * entries, set it to 0
33 */
34inline u32 hl_cq_inc_ptr(u32 ptr)
35{
36 ptr++;
37 if (unlikely(ptr == HL_CQ_LENGTH))
38 ptr = 0;
39 return ptr;
40}
41
Lee Jones3db99f02020-07-01 09:58:38 +010042/**
Oded Gabbay1251f232019-02-16 00:39:18 +020043 * hl_eq_inc_ptr - increment ci of eq
44 *
45 * @ptr: the current ci value of the event queue
46 *
47 * Increment ptr by 1. If it reaches the number of event queue
48 * entries, set it to 0
49 */
Oded Gabbay16db6b52021-02-16 21:49:30 +020050static inline u32 hl_eq_inc_ptr(u32 ptr)
Oded Gabbay1251f232019-02-16 00:39:18 +020051{
52 ptr++;
53 if (unlikely(ptr == HL_EQ_LENGTH))
54 ptr = 0;
55 return ptr;
56}
57
58static void irq_handle_eqe(struct work_struct *work)
59{
60 struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
61 eq_work);
62 struct hl_device *hdev = eqe_work->hdev;
63
64 hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
65
66 kfree(eqe_work);
67}
68
Lee Jones3db99f02020-07-01 09:58:38 +010069/**
Oded Gabbay9494a8d2019-02-16 00:39:17 +020070 * hl_irq_handler_cq - irq handler for completion queue
71 *
72 * @irq: irq number
73 * @arg: pointer to completion queue structure
74 *
75 */
76irqreturn_t hl_irq_handler_cq(int irq, void *arg)
77{
78 struct hl_cq *cq = arg;
79 struct hl_device *hdev = cq->hdev;
80 struct hl_hw_queue *queue;
81 struct hl_cs_job *job;
82 bool shadow_index_valid;
83 u16 shadow_index;
Ben Segal4e873342019-08-01 23:22:20 +000084 struct hl_cq_entry *cq_entry, *cq_base;
Oded Gabbay9494a8d2019-02-16 00:39:17 +020085
86 if (hdev->disabled) {
87 dev_dbg(hdev->dev,
88 "Device disabled but received IRQ %d for CQ %d\n",
89 irq, cq->hw_queue_id);
90 return IRQ_HANDLED;
91 }
92
Arnd Bergmann82948e62020-10-26 17:08:06 +010093 cq_base = cq->kernel_address;
Oded Gabbay9494a8d2019-02-16 00:39:17 +020094
95 while (1) {
Ben Segal4e873342019-08-01 23:22:20 +000096 bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
97 CQ_ENTRY_READY_MASK)
Oded Gabbay9494a8d2019-02-16 00:39:17 +020098 >> CQ_ENTRY_READY_SHIFT);
99
100 if (!entry_ready)
101 break;
102
Ben Segal4e873342019-08-01 23:22:20 +0000103 cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200104
Ben Segal4e873342019-08-01 23:22:20 +0000105 /* Make sure we read CQ entry contents after we've
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200106 * checked the ownership bit.
107 */
108 dma_rmb();
109
Ben Segal4e873342019-08-01 23:22:20 +0000110 shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
111 CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200112 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
113
Ben Segal4e873342019-08-01 23:22:20 +0000114 shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
115 CQ_ENTRY_SHADOW_INDEX_MASK)
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200116 >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
117
118 queue = &hdev->kernel_queues[cq->hw_queue_id];
119
120 if ((shadow_index_valid) && (!hdev->disabled)) {
121 job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
Ofir Bitton5574cb22020-07-05 13:35:51 +0300122 queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200123 }
124
Ofir Bitton79b18942020-06-24 14:49:43 +0300125 atomic_inc(&queue->ci);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200126
127 /* Clear CQ entry ready bit */
Ben Segal4e873342019-08-01 23:22:20 +0000128 cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
129 ~CQ_ENTRY_READY_MASK);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200130
131 cq->ci = hl_cq_inc_ptr(cq->ci);
132
133 /* Increment free slots */
134 atomic_inc(&cq->free_slots_cnt);
135 }
136
137 return IRQ_HANDLED;
138}
139
Ofir Bittonab5f5c32021-01-12 18:37:19 +0200140static void handle_user_cq(struct hl_device *hdev,
141 struct hl_user_interrupt *user_cq)
142{
143 struct hl_user_pending_interrupt *pend;
Yuri Nudelmand2f56842021-10-06 11:58:02 +0300144 ktime_t now = ktime_get();
Ofir Bittonab5f5c32021-01-12 18:37:19 +0200145
146 spin_lock(&user_cq->wait_list_lock);
Yuri Nudelmanf05d17b2021-09-23 17:40:14 +0300147 list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node) {
Yuri Nudelmand2f56842021-10-06 11:58:02 +0300148 pend->fence.timestamp = now;
Ofir Bittonab5f5c32021-01-12 18:37:19 +0200149 complete_all(&pend->fence.completion);
Yuri Nudelmanf05d17b2021-09-23 17:40:14 +0300150 }
Ofir Bittonab5f5c32021-01-12 18:37:19 +0200151 spin_unlock(&user_cq->wait_list_lock);
152}
153
Lee Jones3db99f02020-07-01 09:58:38 +0100154/**
Ofir Bitton2d44c6f2021-01-12 14:43:09 +0200155 * hl_irq_handler_user_cq - irq handler for user completion queues
156 *
157 * @irq: irq number
158 * @arg: pointer to user interrupt structure
159 *
160 */
161irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
162{
163 struct hl_user_interrupt *user_cq = arg;
164 struct hl_device *hdev = user_cq->hdev;
Ofir Bitton2d44c6f2021-01-12 14:43:09 +0200165
Ofir Bittonab5f5c32021-01-12 18:37:19 +0200166 dev_dbg(hdev->dev,
Ofir Bitton2d44c6f2021-01-12 14:43:09 +0200167 "got user completion interrupt id %u",
Ofir Bittonab5f5c32021-01-12 18:37:19 +0200168 user_cq->interrupt_id);
169
170 /* Handle user cq interrupts registered on all interrupts */
171 handle_user_cq(hdev, &hdev->common_user_interrupt);
172
173 /* Handle user cq interrupts registered on this specific interrupt */
174 handle_user_cq(hdev, user_cq);
Ofir Bitton2d44c6f2021-01-12 14:43:09 +0200175
176 return IRQ_HANDLED;
177}
178
179/**
180 * hl_irq_handler_default - default irq handler
181 *
182 * @irq: irq number
183 * @arg: pointer to user interrupt structure
184 *
185 */
186irqreturn_t hl_irq_handler_default(int irq, void *arg)
187{
188 struct hl_user_interrupt *user_interrupt = arg;
189 struct hl_device *hdev = user_interrupt->hdev;
190 u32 interrupt_id = user_interrupt->interrupt_id;
191
192 dev_err(hdev->dev,
193 "got invalid user interrupt %u",
194 interrupt_id);
195
196 return IRQ_HANDLED;
197}
198
199/**
Oded Gabbay1251f232019-02-16 00:39:18 +0200200 * hl_irq_handler_eq - irq handler for event queue
201 *
202 * @irq: irq number
203 * @arg: pointer to event queue structure
204 *
205 */
206irqreturn_t hl_irq_handler_eq(int irq, void *arg)
207{
208 struct hl_eq *eq = arg;
209 struct hl_device *hdev = eq->hdev;
210 struct hl_eq_entry *eq_entry;
211 struct hl_eq_entry *eq_base;
212 struct hl_eqe_work *handle_eqe_work;
Oded Gabbay1242e9f2021-05-19 14:52:14 +0300213 bool entry_ready;
214 u32 cur_eqe;
215 u16 cur_eqe_index;
Oded Gabbay1251f232019-02-16 00:39:18 +0200216
Arnd Bergmann82948e62020-10-26 17:08:06 +0100217 eq_base = eq->kernel_address;
Oded Gabbay1251f232019-02-16 00:39:18 +0200218
219 while (1) {
Oded Gabbay1242e9f2021-05-19 14:52:14 +0300220 cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
221 entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
Oded Gabbay1251f232019-02-16 00:39:18 +0200222
223 if (!entry_ready)
224 break;
225
Oded Gabbay1242e9f2021-05-19 14:52:14 +0300226 cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
227 if ((hdev->event_queue.check_eqe_index) &&
228 (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
229 != cur_eqe_index)) {
230 dev_dbg(hdev->dev,
231 "EQE 0x%x in queue is ready but index does not match %d!=%d",
232 eq_base[eq->ci].hdr.ctl,
233 ((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
234 cur_eqe_index);
235 break;
236 }
237
238 eq->prev_eqe_index++;
239
Oded Gabbay1251f232019-02-16 00:39:18 +0200240 eq_entry = &eq_base[eq->ci];
241
242 /*
243 * Make sure we read EQ entry contents after we've
244 * checked the ownership bit.
245 */
246 dma_rmb();
247
248 if (hdev->disabled) {
249 dev_warn(hdev->dev,
250 "Device disabled but received IRQ %d for EQ\n",
251 irq);
252 goto skip_irq;
253 }
254
255 handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
256 if (handle_eqe_work) {
257 INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
258 handle_eqe_work->hdev = hdev;
259
260 memcpy(&handle_eqe_work->eq_entry, eq_entry,
261 sizeof(*eq_entry));
262
263 queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
264 }
265skip_irq:
266 /* Clear EQ entry ready bit */
Oded Gabbay8c844872019-02-28 10:46:24 +0200267 eq_entry->hdr.ctl =
Oded Gabbayfe9a52c2019-08-08 17:05:45 +0300268 cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
Oded Gabbay8c844872019-02-28 10:46:24 +0200269 ~EQ_CTL_READY_MASK);
Oded Gabbay1251f232019-02-16 00:39:18 +0200270
271 eq->ci = hl_eq_inc_ptr(eq->ci);
272
273 hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
274 }
275
276 return IRQ_HANDLED;
277}
278
Lee Jones3db99f02020-07-01 09:58:38 +0100279/**
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200280 * hl_cq_init - main initialization function for an cq object
281 *
282 * @hdev: pointer to device structure
283 * @q: pointer to cq structure
284 * @hw_queue_id: The H/W queue ID this completion queue belongs to
285 *
286 * Allocate dma-able memory for the completion queue and initialize fields
287 * Returns 0 on success
288 */
289int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
290{
291 void *p;
292
Oded Gabbayd9c3aa82019-05-01 11:47:04 +0300293 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200294 &q->bus_address, GFP_KERNEL | __GFP_ZERO);
295 if (!p)
296 return -ENOMEM;
297
298 q->hdev = hdev;
Arnd Bergmann82948e62020-10-26 17:08:06 +0100299 q->kernel_address = p;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200300 q->hw_queue_id = hw_queue_id;
301 q->ci = 0;
302 q->pi = 0;
303
304 atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
305
306 return 0;
307}
308
Lee Jones3db99f02020-07-01 09:58:38 +0100309/**
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200310 * hl_cq_fini - destroy completion queue
311 *
312 * @hdev: pointer to device structure
313 * @q: pointer to cq structure
314 *
315 * Free the completion queue memory
316 */
317void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
318{
Oded Gabbayd9c3aa82019-05-01 11:47:04 +0300319 hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
Arnd Bergmann82948e62020-10-26 17:08:06 +0100320 q->kernel_address,
321 q->bus_address);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200322}
Oded Gabbay1251f232019-02-16 00:39:18 +0200323
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200324void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
325{
326 q->ci = 0;
327 q->pi = 0;
328
329 atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
330
331 /*
332 * It's not enough to just reset the PI/CI because the H/W may have
333 * written valid completion entries before it was halted and therefore
334 * we need to clean the actual queues so we won't process old entries
335 * when the device is operational again
336 */
337
Arnd Bergmann82948e62020-10-26 17:08:06 +0100338 memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200339}
340
Lee Jones3db99f02020-07-01 09:58:38 +0100341/**
Oded Gabbay1251f232019-02-16 00:39:18 +0200342 * hl_eq_init - main initialization function for an event queue object
343 *
344 * @hdev: pointer to device structure
345 * @q: pointer to eq structure
346 *
347 * Allocate dma-able memory for the event queue and initialize fields
348 * Returns 0 on success
349 */
350int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
351{
352 void *p;
353
Tomer Tayar03d5f642019-04-28 19:17:38 +0300354 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
355 HL_EQ_SIZE_IN_BYTES,
356 &q->bus_address);
Oded Gabbay1251f232019-02-16 00:39:18 +0200357 if (!p)
358 return -ENOMEM;
359
360 q->hdev = hdev;
Arnd Bergmann82948e62020-10-26 17:08:06 +0100361 q->kernel_address = p;
Oded Gabbay1251f232019-02-16 00:39:18 +0200362 q->ci = 0;
Oded Gabbay1242e9f2021-05-19 14:52:14 +0300363 q->prev_eqe_index = 0;
Oded Gabbay1251f232019-02-16 00:39:18 +0200364
365 return 0;
366}
367
Lee Jones3db99f02020-07-01 09:58:38 +0100368/**
Oded Gabbay1251f232019-02-16 00:39:18 +0200369 * hl_eq_fini - destroy event queue
370 *
371 * @hdev: pointer to device structure
372 * @q: pointer to eq structure
373 *
374 * Free the event queue memory
375 */
376void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
377{
378 flush_workqueue(hdev->eq_wq);
379
Tomer Tayar03d5f642019-04-28 19:17:38 +0300380 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
381 HL_EQ_SIZE_IN_BYTES,
Arnd Bergmann82948e62020-10-26 17:08:06 +0100382 q->kernel_address);
Oded Gabbay1251f232019-02-16 00:39:18 +0200383}
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200384
385void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
386{
387 q->ci = 0;
Oded Gabbay1242e9f2021-05-19 14:52:14 +0300388 q->prev_eqe_index = 0;
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200389
390 /*
391 * It's not enough to just reset the PI/CI because the H/W may have
392 * written valid completion entries before it was halted and therefore
393 * we need to clean the actual queues so we won't process old entries
394 * when the device is operational again
395 */
396
Arnd Bergmann82948e62020-10-26 17:08:06 +0100397 memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200398}