blob: c3367e9833efd118a66659579615e4e0cddf871b [file] [log] [blame]
Leon Romanovsky6bf9d8f2020-07-19 10:25:21 +03001/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
Dennis Dalessandrob4e64392016-01-06 10:04:31 -08002/*
Dennis Dalessandrofe314192016-01-22 13:04:58 -08003 * Copyright(c) 2016 Intel Corporation.
Dennis Dalessandrob4e64392016-01-06 10:04:31 -08004 */
5
Leon Romanovsky6bf9d8f2020-07-19 10:25:21 +03006#ifndef DEF_RDMAVT_INCMR_H
7#define DEF_RDMAVT_INCMR_H
8
Dennis Dalessandrob4e64392016-01-06 10:04:31 -08009/*
10 * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
11 * drivers no longer need access to the MR directly.
12 */
Sebastian Sanchez338adfd2017-02-08 05:26:31 -080013#include <linux/percpu-refcount.h>
Dennis Dalessandrob4e64392016-01-06 10:04:31 -080014
15/*
16 * A segment is a linear region of low physical memory.
17 * Used by the verbs layer.
18 */
19struct rvt_seg {
20 void *vaddr;
21 size_t length;
22};
23
24/* The number of rvt_segs that fit in a page. */
25#define RVT_SEGSZ (PAGE_SIZE / sizeof(struct rvt_seg))
26
27struct rvt_segarray {
28 struct rvt_seg segs[RVT_SEGSZ];
29};
30
31struct rvt_mregion {
32 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
33 u64 user_base; /* User's address for this region */
34 u64 iova; /* IB start address of this region */
35 size_t length;
36 u32 lkey;
37 u32 offset; /* offset (bytes) to start of region */
38 int access_flags;
39 u32 max_segs; /* number of rvt_segs in all the arrays */
40 u32 mapsz; /* size of the map array */
Sebastian Sanchez338adfd2017-02-08 05:26:31 -080041 atomic_t lkey_invalid; /* true if current lkey is invalid */
Dennis Dalessandrob4e64392016-01-06 10:04:31 -080042 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
43 u8 lkey_published; /* in global table */
Sebastian Sanchez338adfd2017-02-08 05:26:31 -080044 struct percpu_ref refcount;
Dennis Dalessandrob4e64392016-01-06 10:04:31 -080045 struct completion comp; /* complete when refcount goes to zero */
Gustavo A. R. Silva5b361322020-02-12 19:04:25 -060046 struct rvt_segarray *map[]; /* the segments */
Dennis Dalessandrob4e64392016-01-06 10:04:31 -080047};
48
49#define RVT_MAX_LKEY_TABLE_BITS 23
50
51struct rvt_lkey_table {
Mike Marciniszyn99f80d22016-10-10 06:14:39 -070052 /* read mostly fields */
53 u32 max; /* size of the table */
54 u32 shift; /* lkey/rkey shift */
55 struct rvt_mregion __rcu **table;
56 /* writeable fields */
57 /* protect changes in this struct */
58 spinlock_t lock ____cacheline_aligned_in_smp;
Dennis Dalessandrob4e64392016-01-06 10:04:31 -080059 u32 next; /* next unused index (speeds search) */
60 u32 gen; /* generation count */
Dennis Dalessandrob4e64392016-01-06 10:04:31 -080061};
62
63/*
64 * These keep track of the copy progress within a memory region.
65 * Used by the verbs layer.
66 */
67struct rvt_sge {
68 struct rvt_mregion *mr;
69 void *vaddr; /* kernel virtual address of segment */
70 u32 sge_length; /* length of the SGE */
71 u32 length; /* remaining length of the segment */
72 u16 m; /* current index: mr->map[m] */
73 u16 n; /* current index: mr->map[m]->segs[n] */
74};
75
76struct rvt_sge_state {
77 struct rvt_sge *sg_list; /* next SGE to be used if any */
78 struct rvt_sge sge; /* progress state for the current SGE */
79 u32 total_len;
80 u8 num_sge;
81};
82
83static inline void rvt_put_mr(struct rvt_mregion *mr)
84{
Sebastian Sanchez338adfd2017-02-08 05:26:31 -080085 percpu_ref_put(&mr->refcount);
Dennis Dalessandrob4e64392016-01-06 10:04:31 -080086}
87
88static inline void rvt_get_mr(struct rvt_mregion *mr)
89{
Sebastian Sanchez338adfd2017-02-08 05:26:31 -080090 percpu_ref_get(&mr->refcount);
Dennis Dalessandrob4e64392016-01-06 10:04:31 -080091}
92
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -080093static inline void rvt_put_ss(struct rvt_sge_state *ss)
94{
95 while (ss->num_sge) {
96 rvt_put_mr(ss->sge.mr);
97 if (--ss->num_sge)
98 ss->sge = *ss->sg_list++;
99 }
100}
101
Brian Welty1198fce2017-02-08 05:27:37 -0800102static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length)
103{
104 u32 len = sge->length;
105
106 if (len > length)
107 len = length;
108 if (len > sge->sge_length)
109 len = sge->sge_length;
110
111 return len;
112}
113
114static inline void rvt_update_sge(struct rvt_sge_state *ss, u32 length,
115 bool release)
116{
117 struct rvt_sge *sge = &ss->sge;
118
119 sge->vaddr += length;
120 sge->length -= length;
121 sge->sge_length -= length;
122 if (sge->sge_length == 0) {
123 if (release)
124 rvt_put_mr(sge->mr);
125 if (--ss->num_sge)
126 *sge = *ss->sg_list++;
127 } else if (sge->length == 0 && sge->mr->lkey) {
128 if (++sge->n >= RVT_SEGSZ) {
129 if (++sge->m >= sge->mr->mapsz)
130 return;
131 sge->n = 0;
132 }
133 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
134 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
135 }
136}
137
138static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length,
139 bool release)
140{
141 struct rvt_sge *sge = &ss->sge;
142
143 while (length) {
144 u32 len = rvt_get_sge_length(sge, length);
145
146 WARN_ON_ONCE(len == 0);
147 rvt_update_sge(ss, len, release);
148 length -= len;
149 }
150}
151
Mike Marciniszyn0208da92017-08-28 11:24:10 -0700152bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey);
153bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey);
154
Dennis Dalessandrob4e64392016-01-06 10:04:31 -0800155#endif /* DEF_RDMAVT_INCMRH */