blob: 1fe2bb5a63b03b61fc8b997937b2e2903d13516d [file] [log] [blame]
Leon Romanovsky6bf9d8f2020-07-19 10:25:21 +03001/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/*
3 * Copyright(c) 2016 - 2018 Intel Corporation.
4 */
5
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -08006#ifndef DEF_RDMAVT_INCCQ_H
7#define DEF_RDMAVT_INCCQ_H
8
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -08009#include <linux/kthread.h>
10#include <rdma/ib_user_verbs.h>
Jason Gunthorpe39289bf2019-07-22 17:01:30 +000011#include <rdma/ib_verbs.h>
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -080012
13/*
14 * Define an ib_cq_notify value that is not valid so we know when CQ
15 * notifications are armed.
16 */
17#define RVT_CQ_NONE (IB_CQ_NEXT_COMP + 1)
18
19/*
Kamenee Arumugam239b0e52019-06-28 14:04:17 -040020 * Define read macro that apply smp_load_acquire memory barrier
21 * when reading indice of circular buffer that mmaped to user space.
22 */
23#define RDMA_READ_UAPI_ATOMIC(member) smp_load_acquire(&(member).val)
24
25/*
26 * Define write macro that uses smp_store_release memory barrier
27 * when writing indice of circular buffer that mmaped to user space.
28 */
29#define RDMA_WRITE_UAPI_ATOMIC(member, x) smp_store_release(&(member).val, x)
30#include <rdma/rvt-abi.h>
31
32/*
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -080033 * This structure is used to contain the head pointer, tail pointer,
34 * and completion queue entries as a single memory allocation so
35 * it can be mmap'ed into user space.
36 */
Kamenee Arumugam239b0e52019-06-28 14:04:17 -040037struct rvt_k_cq_wc {
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -080038 u32 head; /* index of next entry to fill */
39 u32 tail; /* index of next ib_poll_cq() entry */
Kamenee Arumugam239b0e52019-06-28 14:04:17 -040040 struct ib_wc kqueue[];
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -080041};
42
43/*
44 * The completion queue structure.
45 */
46struct rvt_cq {
47 struct ib_cq ibcq;
Sebastian Sanchez5d18ee62018-05-02 06:43:55 -070048 struct work_struct comptask;
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -080049 spinlock_t lock; /* protect changes in this struct */
50 u8 notify;
51 u8 triggered;
Kamenee Arumugam5136bfe2019-06-28 14:21:52 -040052 u8 cq_full;
Sebastian Sanchez5d18ee62018-05-02 06:43:55 -070053 int comp_vector_cpu;
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -080054 struct rvt_dev_info *rdi;
55 struct rvt_cq_wc *queue;
56 struct rvt_mmap_info *ip;
Kamenee Arumugam239b0e52019-06-28 14:04:17 -040057 struct rvt_k_cq_wc *kqueue;
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -080058};
59
60static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq)
61{
62 return container_of(ibcq, struct rvt_cq, ibcq);
63}
64
Kamenee Arumugam5136bfe2019-06-28 14:21:52 -040065bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
Dennis Dalessandro6f6387a2016-01-22 13:00:15 -080066
67#endif /* DEF_RDMAVT_INCCQH */