blob: f40cf08a61920d7448dda117aaf4974fd0c395b0 [file] [log] [blame]
Scott Branden22c30602021-01-20 09:58:20 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018-2020 Broadcom.
4 */
5
Scott Branden111d7462021-01-20 09:58:23 -08006#include <linux/delay.h>
7#include <linux/fs.h>
8#include <linux/hash.h>
9#include <linux/interrupt.h>
10#include <linux/list.h>
11#include <linux/module.h>
12#include <linux/poll.h>
13#include <linux/sizes.h>
14#include <linux/spinlock.h>
15#include <linux/timer.h>
16
Scott Branden22c30602021-01-20 09:58:20 -080017#include "bcm_vk.h"
18#include "bcm_vk_msg.h"
Scott Branden111d7462021-01-20 09:58:23 -080019#include "bcm_vk_sg.h"
20
21/* functions to manipulate the transport id in msg block */
22#define BCM_VK_MSG_Q_SHIFT 4
23#define BCM_VK_MSG_Q_MASK 0xF
24#define BCM_VK_MSG_ID_MASK 0xFFF
25
26#define BCM_VK_DMA_DRAIN_MAX_MS 2000
27
28/* number x q_size will be the max number of msg processed per loop */
29#define BCM_VK_MSG_PROC_MAX_LOOP 2
30
31/* module parameter */
32static bool hb_mon = true;
33module_param(hb_mon, bool, 0444);
34MODULE_PARM_DESC(hb_mon, "Monitoring heartbeat continuously.\n");
35static int batch_log = 1;
36module_param(batch_log, int, 0444);
37MODULE_PARM_DESC(batch_log, "Max num of logs per batch operation.\n");
38
39static bool hb_mon_is_on(void)
40{
41 return hb_mon;
42}
43
44static u32 get_q_num(const struct vk_msg_blk *msg)
45{
46 u32 q_num = msg->trans_id & BCM_VK_MSG_Q_MASK;
47
48 if (q_num >= VK_MSGQ_PER_CHAN_MAX)
49 q_num = VK_MSGQ_NUM_DEFAULT;
50 return q_num;
51}
52
53static void set_q_num(struct vk_msg_blk *msg, u32 q_num)
54{
Desmond Yand71277d2021-01-28 22:04:03 -080055 u32 trans_q;
56
57 if (q_num >= VK_MSGQ_PER_CHAN_MAX)
58 trans_q = VK_MSGQ_NUM_DEFAULT;
59 else
60 trans_q = q_num;
61
62 msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | trans_q;
Scott Branden111d7462021-01-20 09:58:23 -080063}
64
65static u32 get_msg_id(const struct vk_msg_blk *msg)
66{
67 return ((msg->trans_id >> BCM_VK_MSG_Q_SHIFT) & BCM_VK_MSG_ID_MASK);
68}
69
70static void set_msg_id(struct vk_msg_blk *msg, u32 val)
71{
72 msg->trans_id = (val << BCM_VK_MSG_Q_SHIFT) | get_q_num(msg);
73}
74
75static u32 msgq_inc(const struct bcm_vk_sync_qinfo *qinfo, u32 idx, u32 inc)
76{
77 return ((idx + inc) & qinfo->q_mask);
78}
79
80static
81struct vk_msg_blk __iomem *msgq_blk_addr(const struct bcm_vk_sync_qinfo *qinfo,
82 u32 idx)
83{
84 return qinfo->q_start + (VK_MSGQ_BLK_SIZE * idx);
85}
86
87static u32 msgq_occupied(const struct bcm_vk_msgq __iomem *msgq,
88 const struct bcm_vk_sync_qinfo *qinfo)
89{
90 u32 wr_idx, rd_idx;
91
92 wr_idx = readl_relaxed(&msgq->wr_idx);
93 rd_idx = readl_relaxed(&msgq->rd_idx);
94
95 return ((wr_idx - rd_idx) & qinfo->q_mask);
96}
97
98static
99u32 msgq_avail_space(const struct bcm_vk_msgq __iomem *msgq,
100 const struct bcm_vk_sync_qinfo *qinfo)
101{
102 return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1);
103}
104
105/* number of retries when enqueue message fails before returning EAGAIN */
106#define BCM_VK_H2VK_ENQ_RETRY 10
107#define BCM_VK_H2VK_ENQ_RETRY_DELAY_MS 50
108
109bool bcm_vk_drv_access_ok(struct bcm_vk *vk)
110{
111 return (!!atomic_read(&vk->msgq_inited));
112}
113
114void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask)
115{
116 struct bcm_vk_alert *alert = &vk->host_alert;
117 unsigned long flags;
118
119 /* use irqsave version as this maybe called inside timer interrupt */
120 spin_lock_irqsave(&vk->host_alert_lock, flags);
121 alert->notfs |= bit_mask;
122 spin_unlock_irqrestore(&vk->host_alert_lock, flags);
123
124 if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
125 queue_work(vk->wq_thread, &vk->wq_work);
126}
127
128/*
129 * Heartbeat related defines
130 * The heartbeat from host is a last resort. If stuck condition happens
131 * on the card, firmware is supposed to detect it. Therefore, the heartbeat
132 * values used will be more relaxed on the driver, which need to be bigger
133 * than the watchdog timeout on the card. The watchdog timeout on the card
134 * is 20s, with a jitter of 2s => 22s. We use a value of 27s here.
135 */
136#define BCM_VK_HB_TIMER_S 3
137#define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ)
138#define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S)
139
140static void bcm_vk_hb_poll(struct timer_list *t)
141{
142 u32 uptime_s;
143 struct bcm_vk_hb_ctrl *hb = container_of(t, struct bcm_vk_hb_ctrl,
144 timer);
145 struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl);
146
147 if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) {
148 /* read uptime from register and compare */
149 uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME);
150
151 if (uptime_s == hb->last_uptime)
152 hb->lost_cnt++;
153 else /* reset to avoid accumulation */
154 hb->lost_cnt = 0;
155
156 dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n",
157 hb->last_uptime, uptime_s, hb->lost_cnt);
158
159 /*
160 * if the interface goes down without any activity, a value
161 * of 0xFFFFFFFF will be continuously read, and the detection
162 * will be happened eventually.
163 */
164 hb->last_uptime = uptime_s;
165 } else {
166 /* reset heart beat lost cnt */
167 hb->lost_cnt = 0;
168 }
169
170 /* next, check if heartbeat exceeds limit */
171 if (hb->lost_cnt > BCM_VK_HB_LOST_MAX) {
172 dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n",
173 BCM_VK_HB_LOST_MAX,
174 BCM_VK_HB_LOST_MAX * BCM_VK_HB_TIMER_S);
175
176 bcm_vk_blk_drv_access(vk);
177 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL);
178 }
179 /* re-arm timer */
180 mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
181}
182
183void bcm_vk_hb_init(struct bcm_vk *vk)
184{
185 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
186
187 timer_setup(&hb->timer, bcm_vk_hb_poll, 0);
188 mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
189}
190
191void bcm_vk_hb_deinit(struct bcm_vk *vk)
192{
193 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
194
195 del_timer(&hb->timer);
196}
197
198static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk,
199 unsigned int start,
200 unsigned int nbits)
201{
202 spin_lock(&vk->msg_id_lock);
203 bitmap_clear(vk->bmap, start, nbits);
204 spin_unlock(&vk->msg_id_lock);
205}
Scott Branden22c30602021-01-20 09:58:20 -0800206
207/*
208 * allocate a ctx per file struct
209 */
210static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid)
211{
212 u32 i;
213 struct bcm_vk_ctx *ctx = NULL;
214 u32 hash_idx = hash_32(pid, VK_PID_HT_SHIFT_BIT);
215
216 spin_lock(&vk->ctx_lock);
217
Scott Brandend63d6582021-01-20 09:58:24 -0800218 /* check if it is in reset, if so, don't allow */
219 if (vk->reset_pid) {
220 dev_err(&vk->pdev->dev,
221 "No context allowed during reset by pid %d\n",
222 vk->reset_pid);
223
224 goto in_reset_exit;
225 }
226
Scott Branden22c30602021-01-20 09:58:20 -0800227 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
228 if (!vk->ctx[i].in_use) {
229 vk->ctx[i].in_use = true;
230 ctx = &vk->ctx[i];
231 break;
232 }
233 }
234
235 if (!ctx) {
236 dev_err(&vk->pdev->dev, "All context in use\n");
237
238 goto all_in_use_exit;
239 }
240
241 /* set the pid and insert it to hash table */
242 ctx->pid = pid;
243 ctx->hash_idx = hash_idx;
244 list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head);
245
246 /* increase kref */
247 kref_get(&vk->kref);
248
Scott Branden111d7462021-01-20 09:58:23 -0800249 /* clear counter */
250 atomic_set(&ctx->pend_cnt, 0);
251 atomic_set(&ctx->dma_cnt, 0);
252 init_waitqueue_head(&ctx->rd_wq);
253
Scott Branden22c30602021-01-20 09:58:20 -0800254all_in_use_exit:
Scott Brandend63d6582021-01-20 09:58:24 -0800255in_reset_exit:
Scott Branden22c30602021-01-20 09:58:20 -0800256 spin_unlock(&vk->ctx_lock);
257
258 return ctx;
259}
260
Scott Branden111d7462021-01-20 09:58:23 -0800261static u16 bcm_vk_get_msg_id(struct bcm_vk *vk)
262{
263 u16 rc = VK_MSG_ID_OVERFLOW;
264 u16 test_bit_count = 0;
265
266 spin_lock(&vk->msg_id_lock);
267 while (test_bit_count < (VK_MSG_ID_BITMAP_SIZE - 1)) {
268 /*
269 * first time come in this loop, msg_id will be 0
270 * and the first one tested will be 1. We skip
271 * VK_SIMPLEX_MSG_ID (0) for one way host2vk
272 * communication
273 */
274 vk->msg_id++;
275 if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE)
276 vk->msg_id = 1;
277
278 if (test_bit(vk->msg_id, vk->bmap)) {
279 test_bit_count++;
280 continue;
281 }
282 rc = vk->msg_id;
283 bitmap_set(vk->bmap, vk->msg_id, 1);
284 break;
285 }
286 spin_unlock(&vk->msg_id_lock);
287
288 return rc;
289}
290
Scott Branden22c30602021-01-20 09:58:20 -0800291static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx)
292{
293 u32 idx;
294 u32 hash_idx;
295 pid_t pid;
296 struct bcm_vk_ctx *entry;
297 int count = 0;
298
299 if (!ctx) {
300 dev_err(&vk->pdev->dev, "NULL context detected\n");
301 return -EINVAL;
302 }
303 idx = ctx->idx;
304 pid = ctx->pid;
305
306 spin_lock(&vk->ctx_lock);
307
308 if (!vk->ctx[idx].in_use) {
309 dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx);
310 } else {
311 vk->ctx[idx].in_use = false;
312 vk->ctx[idx].miscdev = NULL;
313
314 /* Remove it from hash list and see if it is the last one. */
315 list_del(&ctx->node);
316 hash_idx = ctx->hash_idx;
317 list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) {
318 if (entry->pid == pid)
319 count++;
320 }
321 }
322
323 spin_unlock(&vk->ctx_lock);
324
325 return count;
326}
327
Scott Branden111d7462021-01-20 09:58:23 -0800328static void bcm_vk_free_wkent(struct device *dev, struct bcm_vk_wkent *entry)
329{
330 int proc_cnt;
331
332 bcm_vk_sg_free(dev, entry->dma, VK_DMA_MAX_ADDRS, &proc_cnt);
333 if (proc_cnt)
334 atomic_dec(&entry->ctx->dma_cnt);
335
336 kfree(entry->to_h_msg);
337 kfree(entry);
338}
339
340static void bcm_vk_drain_all_pend(struct device *dev,
341 struct bcm_vk_msg_chan *chan,
342 struct bcm_vk_ctx *ctx)
343{
344 u32 num;
345 struct bcm_vk_wkent *entry, *tmp;
346 struct bcm_vk *vk;
347 struct list_head del_q;
348
349 if (ctx)
350 vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
351
352 INIT_LIST_HEAD(&del_q);
353 spin_lock(&chan->pendq_lock);
354 for (num = 0; num < chan->q_nr; num++) {
355 list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) {
356 if ((!ctx) || (entry->ctx->idx == ctx->idx)) {
357 list_del(&entry->node);
358 list_add_tail(&entry->node, &del_q);
359 }
360 }
361 }
362 spin_unlock(&chan->pendq_lock);
363
364 /* batch clean up */
365 num = 0;
366 list_for_each_entry_safe(entry, tmp, &del_q, node) {
367 list_del(&entry->node);
368 num++;
369 if (ctx) {
370 struct vk_msg_blk *msg;
371 int bit_set;
372 bool responded;
373 u32 msg_id;
374
375 /* if it is specific ctx, log for any stuck */
376 msg = entry->to_v_msg;
377 msg_id = get_msg_id(msg);
378 bit_set = test_bit(msg_id, vk->bmap);
379 responded = entry->to_h_msg ? true : false;
380 if (num <= batch_log)
381 dev_info(dev,
382 "Drained: fid %u size %u msg 0x%x(seq-%x) ctx 0x%x[fd-%d] args:[0x%x 0x%x] resp %s, bmap %d\n",
383 msg->function_id, msg->size,
384 msg_id, entry->seq_num,
385 msg->context_id, entry->ctx->idx,
386 msg->cmd, msg->arg,
387 responded ? "T" : "F", bit_set);
388 if (responded)
389 atomic_dec(&ctx->pend_cnt);
390 else if (bit_set)
391 bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
392 }
393 bcm_vk_free_wkent(dev, entry);
394 }
395 if (num && ctx)
396 dev_info(dev, "Total drained items %d [fd-%d]\n",
397 num, ctx->idx);
398}
399
Scott Brandend63d6582021-01-20 09:58:24 -0800400void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk)
401{
402 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
403 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
404}
405
Scott Branden111d7462021-01-20 09:58:23 -0800406/*
407 * Function to sync up the messages queue info that is provided by BAR1
408 */
409int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync)
410{
411 struct bcm_vk_msgq __iomem *msgq;
412 struct device *dev = &vk->pdev->dev;
413 u32 msgq_off;
414 u32 num_q;
415 struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan,
416 &vk->to_h_msg_chan};
417 struct bcm_vk_msg_chan *chan;
418 int i, j;
419 int ret = 0;
420
421 /*
422 * If the driver is loaded at startup where vk OS is not up yet,
423 * the msgq-info may not be available until a later time. In
424 * this case, we skip and the sync function is supposed to be
425 * called again.
426 */
427 if (!bcm_vk_msgq_marker_valid(vk)) {
428 dev_info(dev, "BAR1 msgq marker not initialized.\n");
429 return -EAGAIN;
430 }
431
432 msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF);
433
434 /* each side is always half the total */
435 num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2;
436 if (!num_q || (num_q > VK_MSGQ_PER_CHAN_MAX)) {
437 dev_err(dev,
438 "Advertised msgq %d error - max %d allowed\n",
439 num_q, VK_MSGQ_PER_CHAN_MAX);
440 return -EINVAL;
441 }
442
443 vk->to_v_msg_chan.q_nr = num_q;
444 vk->to_h_msg_chan.q_nr = num_q;
445
446 /* first msgq location */
447 msgq = vk->bar[BAR_1] + msgq_off;
448
449 /*
450 * if this function is called when it is already inited,
451 * something is wrong
452 */
453 if (bcm_vk_drv_access_ok(vk) && !force_sync) {
454 dev_err(dev, "Msgq info already in sync\n");
455 return -EPERM;
456 }
457
458 for (i = 0; i < ARRAY_SIZE(chan_list); i++) {
459 chan = chan_list[i];
460 memset(chan->sync_qinfo, 0, sizeof(chan->sync_qinfo));
461
462 for (j = 0; j < num_q; j++) {
463 struct bcm_vk_sync_qinfo *qinfo;
464 u32 msgq_start;
465 u32 msgq_size;
466 u32 msgq_nxt;
467 u32 msgq_db_offset, q_db_offset;
468
469 chan->msgq[j] = msgq;
470 msgq_start = readl_relaxed(&msgq->start);
471 msgq_size = readl_relaxed(&msgq->size);
472 msgq_nxt = readl_relaxed(&msgq->nxt);
473 msgq_db_offset = readl_relaxed(&msgq->db_offset);
474 q_db_offset = (msgq_db_offset & ((1 << DB_SHIFT) - 1));
475 if (q_db_offset == (~msgq_db_offset >> DB_SHIFT))
476 msgq_db_offset = q_db_offset;
477 else
478 /* fall back to default */
479 msgq_db_offset = VK_BAR0_Q_DB_BASE(j);
480
481 dev_info(dev,
482 "MsgQ[%d] type %d num %d, @ 0x%x, db_offset 0x%x rd_idx %d wr_idx %d, size %d, nxt 0x%x\n",
483 j,
484 readw_relaxed(&msgq->type),
485 readw_relaxed(&msgq->num),
486 msgq_start,
487 msgq_db_offset,
488 readl_relaxed(&msgq->rd_idx),
489 readl_relaxed(&msgq->wr_idx),
490 msgq_size,
491 msgq_nxt);
492
493 qinfo = &chan->sync_qinfo[j];
494 /* formulate and record static info */
495 qinfo->q_start = vk->bar[BAR_1] + msgq_start;
496 qinfo->q_size = msgq_size;
497 /* set low threshold as 50% or 1/2 */
498 qinfo->q_low = qinfo->q_size >> 1;
499 qinfo->q_mask = qinfo->q_size - 1;
500 qinfo->q_db_offset = msgq_db_offset;
501
502 msgq++;
503 }
504 }
505 atomic_set(&vk->msgq_inited, 1);
506
507 return ret;
508}
509
510static int bcm_vk_msg_chan_init(struct bcm_vk_msg_chan *chan)
511{
512 u32 i;
513
514 mutex_init(&chan->msgq_mutex);
515 spin_lock_init(&chan->pendq_lock);
516 for (i = 0; i < VK_MSGQ_MAX_NR; i++)
517 INIT_LIST_HEAD(&chan->pendq[i]);
518
519 return 0;
520}
521
522static void bcm_vk_append_pendq(struct bcm_vk_msg_chan *chan, u16 q_num,
523 struct bcm_vk_wkent *entry)
524{
525 struct bcm_vk_ctx *ctx;
526
527 spin_lock(&chan->pendq_lock);
528 list_add_tail(&entry->node, &chan->pendq[q_num]);
529 if (entry->to_h_msg) {
530 ctx = entry->ctx;
531 atomic_inc(&ctx->pend_cnt);
532 wake_up_interruptible(&ctx->rd_wq);
533 }
534 spin_unlock(&chan->pendq_lock);
535}
536
537static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk,
538 struct bcm_vk_wkent *entry,
539 struct _vk_data *data,
540 unsigned int num_planes)
541{
542 unsigned int i;
543 unsigned int item_cnt = 0;
544 struct device *dev = &vk->pdev->dev;
545 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
546 struct vk_msg_blk *msg = &entry->to_v_msg[0];
547 struct bcm_vk_msgq __iomem *msgq;
548 struct bcm_vk_sync_qinfo *qinfo;
549 u32 ib_sgl_size = 0;
550 u8 *buf = (u8 *)&entry->to_v_msg[entry->to_v_blks];
551 u32 avail;
552 u32 q_num;
553
554 /* check if high watermark is hit, and if so, skip */
555 q_num = get_q_num(msg);
556 msgq = chan->msgq[q_num];
557 qinfo = &chan->sync_qinfo[q_num];
558 avail = msgq_avail_space(msgq, qinfo);
559 if (avail < qinfo->q_low) {
560 dev_dbg(dev, "Skip inserting inband SGL, [0x%x/0x%x]\n",
561 avail, qinfo->q_size);
562 return 0;
563 }
564
565 for (i = 0; i < num_planes; i++) {
566 if (data[i].address &&
567 (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) {
568 item_cnt++;
569 memcpy(buf, entry->dma[i].sglist, data[i].size);
570 ib_sgl_size += data[i].size;
571 buf += data[i].size;
572 }
573 }
574
575 dev_dbg(dev, "Num %u sgl items appended, size 0x%x, room 0x%x\n",
576 item_cnt, ib_sgl_size, vk->ib_sgl_size);
577
578 /* round up size */
579 ib_sgl_size = (ib_sgl_size + VK_MSGQ_BLK_SIZE - 1)
580 >> VK_MSGQ_BLK_SZ_SHIFT;
581
582 return ib_sgl_size;
583}
584
585void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val)
586{
587 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
588 struct bcm_vk_sync_qinfo *qinfo = &chan->sync_qinfo[q_num];
589
590 vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset);
591}
592
593static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry)
594{
595 static u32 seq_num;
596 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
597 struct device *dev = &vk->pdev->dev;
598 struct vk_msg_blk *src = &entry->to_v_msg[0];
599
600 struct vk_msg_blk __iomem *dst;
601 struct bcm_vk_msgq __iomem *msgq;
602 struct bcm_vk_sync_qinfo *qinfo;
603 u32 q_num = get_q_num(src);
604 u32 wr_idx; /* local copy */
605 u32 i;
606 u32 avail;
607 u32 retry;
608
609 if (entry->to_v_blks != src->size + 1) {
610 dev_err(dev, "number of blks %d not matching %d MsgId[0x%x]: func %d ctx 0x%x\n",
611 entry->to_v_blks,
612 src->size + 1,
613 get_msg_id(src),
614 src->function_id,
615 src->context_id);
616 return -EMSGSIZE;
617 }
618
619 msgq = chan->msgq[q_num];
620 qinfo = &chan->sync_qinfo[q_num];
621
622 mutex_lock(&chan->msgq_mutex);
623
624 avail = msgq_avail_space(msgq, qinfo);
625
626 /* if not enough space, return EAGAIN and let app handles it */
627 retry = 0;
628 while ((avail < entry->to_v_blks) &&
629 (retry++ < BCM_VK_H2VK_ENQ_RETRY)) {
630 mutex_unlock(&chan->msgq_mutex);
631
632 msleep(BCM_VK_H2VK_ENQ_RETRY_DELAY_MS);
633 mutex_lock(&chan->msgq_mutex);
634 avail = msgq_avail_space(msgq, qinfo);
635 }
636 if (retry > BCM_VK_H2VK_ENQ_RETRY) {
637 mutex_unlock(&chan->msgq_mutex);
638 return -EAGAIN;
639 }
640
641 /* at this point, mutex is taken and there is enough space */
642 entry->seq_num = seq_num++; /* update debug seq number */
643 wr_idx = readl_relaxed(&msgq->wr_idx);
644
645 if (wr_idx >= qinfo->q_size) {
646 dev_crit(dev, "Invalid wr_idx 0x%x => max 0x%x!",
647 wr_idx, qinfo->q_size);
648 bcm_vk_blk_drv_access(vk);
649 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN);
650 goto idx_err;
651 }
652
653 dst = msgq_blk_addr(qinfo, wr_idx);
654 for (i = 0; i < entry->to_v_blks; i++) {
655 memcpy_toio(dst, src, sizeof(*dst));
656
657 src++;
658 wr_idx = msgq_inc(qinfo, wr_idx, 1);
659 dst = msgq_blk_addr(qinfo, wr_idx);
660 }
661
662 /* flush the write pointer */
663 writel(wr_idx, &msgq->wr_idx);
664
665 /* log new info for debugging */
666 dev_dbg(dev,
667 "MsgQ[%d] [Rd Wr] = [%d %d] blks inserted %d - Q = [u-%d a-%d]/%d\n",
668 readl_relaxed(&msgq->num),
669 readl_relaxed(&msgq->rd_idx),
670 wr_idx,
671 entry->to_v_blks,
672 msgq_occupied(msgq, qinfo),
673 msgq_avail_space(msgq, qinfo),
674 readl_relaxed(&msgq->size));
675 /*
676 * press door bell based on queue number. 1 is added to the wr_idx
677 * to avoid the value of 0 appearing on the VK side to distinguish
678 * from initial value.
679 */
680 bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1);
681idx_err:
682 mutex_unlock(&chan->msgq_mutex);
683 return 0;
684}
685
686int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
687 const pid_t pid, const u32 q_num)
688{
689 int rc = 0;
690 struct bcm_vk_wkent *entry;
691 struct device *dev = &vk->pdev->dev;
692
693 /*
694 * check if the marker is still good. Sometimes, the PCIe interface may
695 * have gone done, and if so and we ship down thing based on broken
696 * values, kernel may panic.
697 */
698 if (!bcm_vk_msgq_marker_valid(vk)) {
699 dev_info(dev, "PCIe comm chan - invalid marker (0x%x)!\n",
700 vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY));
701 return -EINVAL;
702 }
703
704 entry = kzalloc(sizeof(*entry) +
705 sizeof(struct vk_msg_blk), GFP_KERNEL);
706 if (!entry)
707 return -ENOMEM;
708
709 /* fill up necessary data */
710 entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN;
711 set_q_num(&entry->to_v_msg[0], q_num);
712 set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID);
713 entry->to_v_blks = 1; /* always 1 block */
714
715 entry->to_v_msg[0].cmd = shut_type;
716 entry->to_v_msg[0].arg = pid;
717
718 rc = bcm_to_v_msg_enqueue(vk, entry);
719 if (rc)
720 dev_err(dev,
721 "Sending shutdown message to q %d for pid %d fails.\n",
722 get_q_num(&entry->to_v_msg[0]), pid);
723
724 kfree(entry);
725
726 return rc;
727}
728
729static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid,
730 const u32 q_num)
731{
732 int rc = 0;
733 struct device *dev = &vk->pdev->dev;
734
735 /*
736 * don't send down or do anything if message queue is not initialized
Scott Brandend63d6582021-01-20 09:58:24 -0800737 * and if it is the reset session, clear it.
Scott Branden111d7462021-01-20 09:58:23 -0800738 */
Scott Brandend63d6582021-01-20 09:58:24 -0800739 if (!bcm_vk_drv_access_ok(vk)) {
740 if (vk->reset_pid == pid)
741 vk->reset_pid = 0;
Scott Branden111d7462021-01-20 09:58:23 -0800742 return -EPERM;
Scott Brandend63d6582021-01-20 09:58:24 -0800743 }
Scott Branden111d7462021-01-20 09:58:23 -0800744
745 dev_dbg(dev, "No more sessions, shut down pid %d\n", pid);
746
Scott Brandend63d6582021-01-20 09:58:24 -0800747 /* only need to do it if it is not the reset process */
748 if (vk->reset_pid != pid)
749 rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num);
750 else
751 /* put reset_pid to 0 if it is exiting last session */
752 vk->reset_pid = 0;
Scott Branden111d7462021-01-20 09:58:23 -0800753
754 return rc;
755}
756
757static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
758 struct bcm_vk_msg_chan *chan,
759 u16 q_num,
760 u16 msg_id)
761{
762 bool found = false;
763 struct bcm_vk_wkent *entry;
764
765 spin_lock(&chan->pendq_lock);
766 list_for_each_entry(entry, &chan->pendq[q_num], node) {
767 if (get_msg_id(&entry->to_v_msg[0]) == msg_id) {
768 list_del(&entry->node);
769 found = true;
770 bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
771 break;
772 }
773 }
774 spin_unlock(&chan->pendq_lock);
775 return ((found) ? entry : NULL);
776}
777
778s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
779{
780 struct device *dev = &vk->pdev->dev;
781 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
782 struct vk_msg_blk *data;
783 struct vk_msg_blk __iomem *src;
784 struct vk_msg_blk *dst;
785 struct bcm_vk_msgq __iomem *msgq;
786 struct bcm_vk_sync_qinfo *qinfo;
787 struct bcm_vk_wkent *entry;
788 u32 rd_idx, wr_idx;
789 u32 q_num, msg_id, j;
790 u32 num_blks;
791 s32 total = 0;
792 int cnt = 0;
793 int msg_processed = 0;
794 int max_msg_to_process;
795 bool exit_loop;
796
797 /*
798 * drain all the messages from the queues, and find its pending
799 * entry in the to_v queue, based on msg_id & q_num, and move the
800 * entry to the to_h pending queue, waiting for user space
801 * program to extract
802 */
803 mutex_lock(&chan->msgq_mutex);
804
805 for (q_num = 0; q_num < chan->q_nr; q_num++) {
806 msgq = chan->msgq[q_num];
807 qinfo = &chan->sync_qinfo[q_num];
808 max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size;
809
810 rd_idx = readl_relaxed(&msgq->rd_idx);
811 wr_idx = readl_relaxed(&msgq->wr_idx);
812 msg_processed = 0;
813 exit_loop = false;
814 while ((rd_idx != wr_idx) && !exit_loop) {
815 u8 src_size;
816
817 /*
818 * Make a local copy and get pointer to src blk
819 * The rd_idx is masked before getting the pointer to
820 * avoid out of bound access in case the interface goes
821 * down. It will end up pointing to the last block in
822 * the buffer, but subsequent src->size check would be
823 * able to catch this.
824 */
825 src = msgq_blk_addr(qinfo, rd_idx & qinfo->q_mask);
826 src_size = readb(&src->size);
827
828 if ((rd_idx >= qinfo->q_size) ||
829 (src_size > (qinfo->q_size - 1))) {
830 dev_crit(dev,
831 "Invalid rd_idx 0x%x or size 0x%x => max 0x%x!",
832 rd_idx, src_size, qinfo->q_size);
833 bcm_vk_blk_drv_access(vk);
834 bcm_vk_set_host_alert(vk,
835 ERR_LOG_HOST_PCIE_DWN);
836 goto idx_err;
837 }
838
839 num_blks = src_size + 1;
840 data = kzalloc(num_blks * VK_MSGQ_BLK_SIZE, GFP_KERNEL);
841 if (data) {
842 /* copy messages and linearize it */
843 dst = data;
844 for (j = 0; j < num_blks; j++) {
845 memcpy_fromio(dst, src, sizeof(*dst));
846
847 dst++;
848 rd_idx = msgq_inc(qinfo, rd_idx, 1);
849 src = msgq_blk_addr(qinfo, rd_idx);
850 }
851 total++;
852 } else {
853 /*
854 * if we could not allocate memory in kernel,
855 * that is fatal.
856 */
857 dev_crit(dev, "Kernel mem allocation failure.\n");
Dan Carpenterd7a4bfc2021-02-01 15:22:07 +0300858 total = -ENOMEM;
859 goto idx_err;
Scott Branden111d7462021-01-20 09:58:23 -0800860 }
861
862 /* flush rd pointer after a message is dequeued */
863 writel(rd_idx, &msgq->rd_idx);
864
865 /* log new info for debugging */
866 dev_dbg(dev,
867 "MsgQ[%d] [Rd Wr] = [%d %d] blks extracted %d - Q = [u-%d a-%d]/%d\n",
868 readl_relaxed(&msgq->num),
869 rd_idx,
870 wr_idx,
871 num_blks,
872 msgq_occupied(msgq, qinfo),
873 msgq_avail_space(msgq, qinfo),
874 readl_relaxed(&msgq->size));
875
876 /*
877 * No need to search if it is an autonomous one-way
878 * message from driver, as these messages do not bear
879 * a to_v pending item. Currently, only the shutdown
880 * message falls into this category.
881 */
882 if (data->function_id == VK_FID_SHUTDOWN) {
883 kfree(data);
884 continue;
885 }
886
887 msg_id = get_msg_id(data);
888 /* lookup original message in to_v direction */
889 entry = bcm_vk_dequeue_pending(vk,
890 &vk->to_v_msg_chan,
891 q_num,
892 msg_id);
893
894 /*
895 * if there is message to does not have prior send,
896 * this is the location to add here
897 */
898 if (entry) {
899 entry->to_h_blks = num_blks;
900 entry->to_h_msg = data;
901 bcm_vk_append_pendq(&vk->to_h_msg_chan,
902 q_num, entry);
903
904 } else {
905 if (cnt++ < batch_log)
906 dev_info(dev,
907 "Could not find MsgId[0x%x] for resp func %d bmap %d\n",
908 msg_id, data->function_id,
909 test_bit(msg_id, vk->bmap));
910 kfree(data);
911 }
912 /* Fetch wr_idx to handle more back-to-back events */
913 wr_idx = readl(&msgq->wr_idx);
914
915 /*
916 * cap the max so that even we try to handle more back-to-back events,
917 * so that it won't hold CPU too long or in case rd/wr idexes are
918 * corrupted which triggers infinite looping.
919 */
920 if (++msg_processed >= max_msg_to_process) {
921 dev_warn(dev, "Q[%d] Per loop processing exceeds %d\n",
922 q_num, max_msg_to_process);
923 exit_loop = true;
924 }
925 }
926 }
927idx_err:
928 mutex_unlock(&chan->msgq_mutex);
929 dev_dbg(dev, "total %d drained from queues\n", total);
930
931 return total;
932}
933
934/*
935 * init routine for all required data structures
936 */
937static int bcm_vk_data_init(struct bcm_vk *vk)
938{
939 int i;
940
941 spin_lock_init(&vk->ctx_lock);
942 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
943 vk->ctx[i].in_use = false;
944 vk->ctx[i].idx = i; /* self identity */
945 vk->ctx[i].miscdev = NULL;
946 }
947 spin_lock_init(&vk->msg_id_lock);
948 spin_lock_init(&vk->host_alert_lock);
949 vk->msg_id = 0;
950
951 /* initialize hash table */
952 for (i = 0; i < VK_PID_HT_SZ; i++)
953 INIT_LIST_HEAD(&vk->pid_ht[i].head);
954
955 return 0;
956}
957
958irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id)
959{
960 struct bcm_vk *vk = dev_id;
961
962 if (!bcm_vk_drv_access_ok(vk)) {
963 dev_err(&vk->pdev->dev,
964 "Interrupt %d received when msgq not inited\n", irq);
965 goto skip_schedule_work;
966 }
967
968 queue_work(vk->wq_thread, &vk->wq_work);
969
970skip_schedule_work:
971 return IRQ_HANDLED;
972}
973
Scott Branden22c30602021-01-20 09:58:20 -0800974int bcm_vk_open(struct inode *inode, struct file *p_file)
975{
976 struct bcm_vk_ctx *ctx;
977 struct miscdevice *miscdev = (struct miscdevice *)p_file->private_data;
978 struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev);
979 struct device *dev = &vk->pdev->dev;
980 int rc = 0;
981
982 /* get a context and set it up for file */
983 ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current));
984 if (!ctx) {
985 dev_err(dev, "Error allocating context\n");
986 rc = -ENOMEM;
987 } else {
988 /*
989 * set up context and replace private data with context for
990 * other methods to use. Reason for the context is because
991 * it is allowed for multiple sessions to open the sysfs, and
992 * for each file open, when upper layer query the response,
993 * only those that are tied to a specific open should be
994 * returned. The context->idx will be used for such binding
995 */
996 ctx->miscdev = miscdev;
997 p_file->private_data = ctx;
998 dev_dbg(dev, "ctx_returned with idx %d, pid %d\n",
999 ctx->idx, ctx->pid);
1000 }
1001 return rc;
1002}
1003
Scott Branden111d7462021-01-20 09:58:23 -08001004ssize_t bcm_vk_read(struct file *p_file,
1005 char __user *buf,
1006 size_t count,
1007 loff_t *f_pos)
1008{
1009 ssize_t rc = -ENOMSG;
1010 struct bcm_vk_ctx *ctx = p_file->private_data;
1011 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
1012 miscdev);
1013 struct device *dev = &vk->pdev->dev;
1014 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
1015 struct bcm_vk_wkent *entry = NULL;
1016 u32 q_num;
1017 u32 rsp_length;
1018 bool found = false;
1019
1020 if (!bcm_vk_drv_access_ok(vk))
1021 return -EPERM;
1022
1023 dev_dbg(dev, "Buf count %zu\n", count);
1024 found = false;
1025
1026 /*
1027 * search through the pendq on the to_h chan, and return only those
1028 * that belongs to the same context. Search is always from the high to
1029 * the low priority queues
1030 */
1031 spin_lock(&chan->pendq_lock);
1032 for (q_num = 0; q_num < chan->q_nr; q_num++) {
1033 list_for_each_entry(entry, &chan->pendq[q_num], node) {
1034 if (entry->ctx->idx == ctx->idx) {
1035 if (count >=
1036 (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) {
1037 list_del(&entry->node);
1038 atomic_dec(&ctx->pend_cnt);
1039 found = true;
1040 } else {
1041 /* buffer not big enough */
1042 rc = -EMSGSIZE;
1043 }
1044 goto read_loop_exit;
1045 }
1046 }
1047 }
1048read_loop_exit:
1049 spin_unlock(&chan->pendq_lock);
1050
1051 if (found) {
1052 /* retrieve the passed down msg_id */
1053 set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
1054 rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
1055 if (copy_to_user(buf, entry->to_h_msg, rsp_length) == 0)
1056 rc = rsp_length;
1057
1058 bcm_vk_free_wkent(dev, entry);
1059 } else if (rc == -EMSGSIZE) {
1060 struct vk_msg_blk tmp_msg = entry->to_h_msg[0];
1061
1062 /*
1063 * in this case, return just the first block, so
1064 * that app knows what size it is looking for.
1065 */
1066 set_msg_id(&tmp_msg, entry->usr_msg_id);
1067 tmp_msg.size = entry->to_h_blks - 1;
1068 if (copy_to_user(buf, &tmp_msg, VK_MSGQ_BLK_SIZE) != 0) {
1069 dev_err(dev, "Error return 1st block in -EMSGSIZE\n");
1070 rc = -EFAULT;
1071 }
1072 }
1073 return rc;
1074}
1075
1076ssize_t bcm_vk_write(struct file *p_file,
1077 const char __user *buf,
1078 size_t count,
1079 loff_t *f_pos)
1080{
1081 ssize_t rc;
1082 struct bcm_vk_ctx *ctx = p_file->private_data;
1083 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
1084 miscdev);
1085 struct bcm_vk_msgq __iomem *msgq;
1086 struct device *dev = &vk->pdev->dev;
1087 struct bcm_vk_wkent *entry;
1088 u32 sgl_extra_blks;
1089 u32 q_num;
1090 u32 msg_size;
1091 u32 msgq_size;
1092
1093 if (!bcm_vk_drv_access_ok(vk))
1094 return -EPERM;
1095
1096 dev_dbg(dev, "Msg count %zu\n", count);
1097
1098 /* first, do sanity check where count should be multiple of basic blk */
1099 if (count & (VK_MSGQ_BLK_SIZE - 1)) {
1100 dev_err(dev, "Failure with size %zu not multiple of %zu\n",
1101 count, VK_MSGQ_BLK_SIZE);
1102 rc = -EINVAL;
1103 goto write_err;
1104 }
1105
1106 /* allocate the work entry + buffer for size count and inband sgl */
1107 entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size,
1108 GFP_KERNEL);
1109 if (!entry) {
1110 rc = -ENOMEM;
1111 goto write_err;
1112 }
1113
1114 /* now copy msg from user space, and then formulate the work entry */
1115 if (copy_from_user(&entry->to_v_msg[0], buf, count)) {
1116 rc = -EFAULT;
1117 goto write_free_ent;
1118 }
1119
1120 entry->to_v_blks = count >> VK_MSGQ_BLK_SZ_SHIFT;
1121 entry->ctx = ctx;
1122
1123 /* do a check on the blk size which could not exceed queue space */
1124 q_num = get_q_num(&entry->to_v_msg[0]);
1125 msgq = vk->to_v_msg_chan.msgq[q_num];
1126 msgq_size = readl_relaxed(&msgq->size);
1127 if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT)
1128 > (msgq_size - 1)) {
1129 dev_err(dev, "Blk size %d exceed max queue size allowed %d\n",
1130 entry->to_v_blks, msgq_size - 1);
1131 rc = -EINVAL;
1132 goto write_free_ent;
1133 }
1134
1135 /* Use internal message id */
1136 entry->usr_msg_id = get_msg_id(&entry->to_v_msg[0]);
1137 rc = bcm_vk_get_msg_id(vk);
1138 if (rc == VK_MSG_ID_OVERFLOW) {
1139 dev_err(dev, "msg_id overflow\n");
1140 rc = -EOVERFLOW;
1141 goto write_free_ent;
1142 }
1143 set_msg_id(&entry->to_v_msg[0], rc);
1144 ctx->q_num = q_num;
1145
1146 dev_dbg(dev,
1147 "[Q-%d]Message ctx id %d, usr_msg_id 0x%x sent msg_id 0x%x\n",
1148 ctx->q_num, ctx->idx, entry->usr_msg_id,
1149 get_msg_id(&entry->to_v_msg[0]));
1150
1151 if (entry->to_v_msg[0].function_id == VK_FID_TRANS_BUF) {
1152 /* Convert any pointers to sg list */
1153 unsigned int num_planes;
1154 int dir;
1155 struct _vk_data *data;
1156
Scott Brandend63d6582021-01-20 09:58:24 -08001157 /*
1158 * check if we are in reset, if so, no buffer transfer is
1159 * allowed and return error.
1160 */
1161 if (vk->reset_pid) {
1162 dev_dbg(dev, "No Transfer allowed during reset, pid %d.\n",
1163 ctx->pid);
1164 rc = -EACCES;
1165 goto write_free_msgid;
1166 }
1167
Scott Branden111d7462021-01-20 09:58:23 -08001168 num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK;
1169 if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD)
1170 dir = DMA_FROM_DEVICE;
1171 else
1172 dir = DMA_TO_DEVICE;
1173
1174 /* Calculate vk_data location */
1175 /* Go to end of the message */
1176 msg_size = entry->to_v_msg[0].size;
1177 if (msg_size > entry->to_v_blks) {
1178 rc = -EMSGSIZE;
1179 goto write_free_msgid;
1180 }
1181
1182 data = (struct _vk_data *)&entry->to_v_msg[msg_size + 1];
1183
1184 /* Now back up to the start of the pointers */
1185 data -= num_planes;
1186
1187 /* Convert user addresses to DMA SG List */
1188 rc = bcm_vk_sg_alloc(dev, entry->dma, dir, data, num_planes);
1189 if (rc)
1190 goto write_free_msgid;
1191
1192 atomic_inc(&ctx->dma_cnt);
1193 /* try to embed inband sgl */
1194 sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data,
1195 num_planes);
1196 entry->to_v_blks += sgl_extra_blks;
1197 entry->to_v_msg[0].size += sgl_extra_blks;
1198 } else if (entry->to_v_msg[0].function_id == VK_FID_INIT &&
1199 entry->to_v_msg[0].context_id == VK_NEW_CTX) {
1200 /*
1201 * Init happens in 2 stages, only the first stage contains the
1202 * pid that needs translating.
1203 */
1204 pid_t org_pid, pid;
1205
1206 /*
1207 * translate the pid into the unique host space as user
1208 * may run sessions inside containers or process
1209 * namespaces.
1210 */
1211#define VK_MSG_PID_MASK 0xffffff00
1212#define VK_MSG_PID_SH 8
1213 org_pid = (entry->to_v_msg[0].arg & VK_MSG_PID_MASK)
1214 >> VK_MSG_PID_SH;
1215
1216 pid = task_tgid_nr(current);
1217 entry->to_v_msg[0].arg =
1218 (entry->to_v_msg[0].arg & ~VK_MSG_PID_MASK) |
1219 (pid << VK_MSG_PID_SH);
1220 if (org_pid != pid)
1221 dev_dbg(dev, "In PID 0x%x(%d), converted PID 0x%x(%d)\n",
1222 org_pid, org_pid, pid, pid);
1223 }
1224
1225 /*
1226 * store work entry to pending queue until a response is received.
1227 * This needs to be done before enqueuing the message
1228 */
1229 bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry);
1230
1231 rc = bcm_to_v_msg_enqueue(vk, entry);
1232 if (rc) {
1233 dev_err(dev, "Fail to enqueue msg to to_v queue\n");
1234
1235 /* remove message from pending list */
1236 entry = bcm_vk_dequeue_pending
1237 (vk,
1238 &vk->to_v_msg_chan,
1239 q_num,
1240 get_msg_id(&entry->to_v_msg[0]));
1241 goto write_free_ent;
1242 }
1243
1244 return count;
1245
1246write_free_msgid:
1247 bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1);
1248write_free_ent:
1249 kfree(entry);
1250write_err:
1251 return rc;
1252}
1253
1254__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait)
1255{
1256 __poll_t ret = 0;
1257 int cnt;
1258 struct bcm_vk_ctx *ctx = p_file->private_data;
1259 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
1260 struct device *dev = &vk->pdev->dev;
1261
1262 poll_wait(p_file, &ctx->rd_wq, wait);
1263
1264 cnt = atomic_read(&ctx->pend_cnt);
1265 if (cnt) {
1266 ret = (__force __poll_t)(POLLIN | POLLRDNORM);
1267 if (cnt < 0) {
1268 dev_err(dev, "Error cnt %d, setting back to 0", cnt);
1269 atomic_set(&ctx->pend_cnt, 0);
1270 }
1271 }
1272
1273 return ret;
1274}
1275
Scott Branden22c30602021-01-20 09:58:20 -08001276int bcm_vk_release(struct inode *inode, struct file *p_file)
1277{
1278 int ret;
1279 struct bcm_vk_ctx *ctx = p_file->private_data;
1280 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
Scott Branden111d7462021-01-20 09:58:23 -08001281 struct device *dev = &vk->pdev->dev;
1282 pid_t pid = ctx->pid;
1283 int dma_cnt;
1284 unsigned long timeout, start_time;
1285
1286 /*
1287 * if there are outstanding DMA transactions, need to delay long enough
1288 * to ensure that the card side would have stopped touching the host buffer
1289 * and its SGL list. A race condition could happen if the host app is killed
1290 * abruptly, eg kill -9, while some DMA transfer orders are still inflight.
1291 * Nothing could be done except for a delay as host side is running in a
1292 * completely async fashion.
1293 */
1294 start_time = jiffies;
1295 timeout = start_time + msecs_to_jiffies(BCM_VK_DMA_DRAIN_MAX_MS);
1296 do {
1297 if (time_after(jiffies, timeout)) {
1298 dev_warn(dev, "%d dma still pending for [fd-%d] pid %d\n",
1299 dma_cnt, ctx->idx, pid);
1300 break;
1301 }
1302 dma_cnt = atomic_read(&ctx->dma_cnt);
1303 cpu_relax();
1304 cond_resched();
1305 } while (dma_cnt);
1306 dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n",
1307 ctx->idx, pid, jiffies_to_msecs(jiffies - start_time));
1308
1309 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx);
1310 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx);
Scott Branden22c30602021-01-20 09:58:20 -08001311
1312 ret = bcm_vk_free_ctx(vk, ctx);
Scott Branden111d7462021-01-20 09:58:23 -08001313 if (ret == 0)
1314 ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num);
1315 else
1316 ret = 0;
Scott Branden22c30602021-01-20 09:58:20 -08001317
1318 kref_put(&vk->kref, bcm_vk_release_data);
1319
1320 return ret;
1321}
1322
Scott Branden111d7462021-01-20 09:58:23 -08001323int bcm_vk_msg_init(struct bcm_vk *vk)
1324{
1325 struct device *dev = &vk->pdev->dev;
1326 int ret;
1327
1328 if (bcm_vk_data_init(vk)) {
1329 dev_err(dev, "Error initializing internal data structures\n");
1330 return -EINVAL;
1331 }
1332
1333 if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) ||
1334 bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) {
1335 dev_err(dev, "Error initializing communication channel\n");
1336 return -EIO;
1337 }
1338
1339 /* read msgq info if ready */
1340 ret = bcm_vk_sync_msgq(vk, false);
1341 if (ret && (ret != -EAGAIN)) {
1342 dev_err(dev, "Error reading comm msg Q info\n");
1343 return -EIO;
1344 }
1345
1346 return 0;
1347}
1348
1349void bcm_vk_msg_remove(struct bcm_vk *vk)
1350{
1351 bcm_vk_blk_drv_access(vk);
1352
1353 /* drain all pending items */
1354 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
1355 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
1356}
1357