blob: f38fbb00d0e8c2157fdbca0cfc15efb98fb0b3fb [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
33#ifndef IB_SRP_H
34#define IB_SRP_H
35
36#include <linux/types.h>
37#include <linux/list.h>
Ingo Molnar8e9e5f4f2006-01-30 15:21:21 -080038#include <linux/mutex.h>
Roland Dreiercf368712006-03-24 15:47:26 -080039#include <linux/scatterlist.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080040
41#include <scsi/scsi_host.h>
42#include <scsi/scsi_cmnd.h>
43
44#include <rdma/ib_verbs.h>
45#include <rdma/ib_sa.h>
46#include <rdma/ib_cm.h>
Roland Dreierf5358a12006-06-17 20:37:29 -070047#include <rdma/ib_fmr_pool.h>
Bart Van Assche19f31342018-01-22 14:27:12 -080048#include <rdma/rdma_cm.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080049
50enum {
51 SRP_PATH_REC_TIMEOUT_MS = 1000,
52 SRP_ABORT_TIMEOUT_MS = 5000,
53
54 SRP_PORT_REDIRECT = 1,
55 SRP_DLID_REDIRECT = 2,
David Dillow9fe4bcf2008-01-08 17:08:52 -050056 SRP_STALE_CONN = 3,
Roland Dreieraef9ec32005-11-02 14:07:13 -080057
Vu Pham74b0a152006-06-17 20:37:32 -070058 SRP_DEF_SG_TABLESIZE = 12,
Roland Dreieraef9ec32005-11-02 14:07:13 -080059
Bart Van Assche4d73f952013-10-26 14:40:37 +020060 SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
Bart Van Asschedd5e6e32010-08-30 19:27:20 +000061 SRP_RSP_SQ_SIZE = 1,
Bart Van Asschedd5e6e32010-08-30 19:27:20 +000062 SRP_TSK_MGMT_SQ_SIZE = 1,
Bart Van Assche4d73f952013-10-26 14:40:37 +020063 SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
64 SRP_TSK_MGMT_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -080065
David Dillowf8b6e312010-11-26 13:02:21 -050066 SRP_TAG_NO_REQ = ~0U,
67 SRP_TAG_TSK_MGMT = 1U << 31,
Roland Dreierf5358a12006-06-17 20:37:29 -070068
Bart Van Assche52ede082014-05-20 15:07:45 +020069 SRP_MAX_PAGES_PER_MR = 512,
Bart Van Assche482fffc2018-12-17 13:20:35 -080070
71 SRP_MAX_ADD_CDB_LEN = 16,
Bart Van Assche882981f2018-12-17 13:20:39 -080072
73 SRP_MAX_IMM_SGE = 2,
74 SRP_MAX_SGE = SRP_MAX_IMM_SGE + 1,
75 /*
76 * Choose the immediate data offset such that a 32 byte CDB still fits.
77 */
78 SRP_IMM_DATA_OFFSET = sizeof(struct srp_cmd) +
79 SRP_MAX_ADD_CDB_LEN +
80 sizeof(struct srp_imm_buf),
Roland Dreieraef9ec32005-11-02 14:07:13 -080081};
82
Roland Dreieraef9ec32005-11-02 14:07:13 -080083enum srp_target_state {
Bart Van Assche34aa6542014-10-30 14:47:22 +010084 SRP_TARGET_SCANNING,
Roland Dreieraef9ec32005-11-02 14:07:13 -080085 SRP_TARGET_LIVE,
Bart Van Asscheef6c49d2011-12-26 16:49:18 +000086 SRP_TARGET_REMOVED,
Roland Dreieraef9ec32005-11-02 14:07:13 -080087};
88
David Dillowbb125882010-10-08 14:40:47 -040089enum srp_iu_type {
90 SRP_IU_CMD,
91 SRP_IU_TSK_MGMT,
92 SRP_IU_RSP,
David Dillow8cba2072007-12-19 17:08:43 -050093};
94
Bart Van Assche5cfb1782014-05-20 15:08:34 +020095/*
96 * @mr_page_mask: HCA memory registration page mask.
97 * @mr_page_size: HCA memory registration page size.
98 * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
99 * request.
100 */
Roland Dreierf5358a12006-06-17 20:37:29 -0700101struct srp_device {
102 struct list_head dev_list;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800103 struct ib_device *dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800104 struct ib_pd *pd;
Bart Van Asschecee687b2017-10-11 10:27:28 -0700105 u32 global_rkey;
Bart Van Assche52ede082014-05-20 15:07:45 +0200106 u64 mr_page_mask;
107 int mr_page_size;
108 int mr_max_size;
109 int max_pages_per_mr;
Bart Van Assched1b42892014-05-20 15:07:20 +0200110 bool has_fmr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200111 bool has_fr;
Bart Van Assche002f1562015-08-10 17:08:44 -0700112 bool use_fmr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200113 bool use_fast_reg;
Roland Dreierf5358a12006-06-17 20:37:29 -0700114};
115
116struct srp_host {
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100117 struct srp_device *srp_dev;
Roland Dreierf5358a12006-06-17 20:37:29 -0700118 u8 port;
Tony Jonesee959b02008-02-22 00:13:36 +0100119 struct device dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800120 struct list_head target_list;
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -0700121 spinlock_t target_lock;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800122 struct completion released;
123 struct list_head list;
Bart Van Assche2d7091b2014-03-14 13:52:45 +0100124 struct mutex add_target_mutex;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800125};
126
127struct srp_request {
Roland Dreieraef9ec32005-11-02 14:07:13 -0800128 struct scsi_cmnd *scmnd;
129 struct srp_iu *cmd;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200130 union {
131 struct ib_pool_fmr **fmr_list;
132 struct srp_fr_desc **fr_list;
133 };
David Dillow8f26c9f2011-01-14 19:45:50 -0500134 u64 *map_page;
David Dillowc07d4242011-01-16 13:57:10 -0500135 struct srp_direct_buf *indirect_desc;
136 dma_addr_t indirect_dma_addr;
Bart Van Assche52ede082014-05-20 15:07:45 +0200137 short nmdesc;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100138 struct ib_cqe reg_cqe;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800139};
140
Bart Van Assche509c07b2014-10-30 14:48:30 +0100141/**
142 * struct srp_rdma_ch
143 * @comp_vector: Completion vector used by this RDMA channel.
Bart Van Assche513d5642018-12-17 13:20:38 -0800144 * @max_it_iu_len: Maximum initiator-to-target information unit length.
Bart Van Assche4f6d4982018-12-17 13:20:37 -0800145 * @max_ti_iu_len: Maximum target-to-initiator information unit length.
Bart Van Assche509c07b2014-10-30 14:48:30 +0100146 */
147struct srp_rdma_ch {
David Dillow9af76272010-11-26 15:34:46 -0500148 /* These are RW in the hot path, and commonly used together */
149 struct list_head free_tx;
David Dillow9af76272010-11-26 15:34:46 -0500150 spinlock_t lock;
151 s32 req_lim;
152
153 /* These are read-only in the hot path */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100154 struct srp_target_port *target ____cacheline_aligned_in_smp;
155 struct ib_cq *send_cq;
David Dillow9af76272010-11-26 15:34:46 -0500156 struct ib_cq *recv_cq;
157 struct ib_qp *qp;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200158 union {
159 struct ib_fmr_pool *fmr_pool;
160 struct srp_fr_pool *fr_pool;
161 };
Bart Van Assche513d5642018-12-17 13:20:38 -0800162 uint32_t max_it_iu_len;
Bart Van Assche4f6d4982018-12-17 13:20:37 -0800163 uint32_t max_ti_iu_len;
Bart Van Asschebf583472019-09-30 16:16:59 -0700164 u8 max_imm_sge;
Bart Van Assche882981f2018-12-17 13:20:39 -0800165 bool use_imm_data;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100166
167 /* Everything above this point is used in the hot path of
168 * command processing. Try to keep them packed into cachelines.
169 */
170
171 struct completion done;
172 int status;
173
Bart Van Assche19f31342018-01-22 14:27:12 -0800174 union {
175 struct ib_cm {
176 struct sa_path_rec path;
177 struct ib_sa_query *path_query;
178 int path_query_id;
179 struct ib_cm_id *cm_id;
180 } ib_cm;
181 struct rdma_cm {
182 struct rdma_cm_id *cm_id;
183 } rdma_cm;
184 };
Bart Van Assche509c07b2014-10-30 14:48:30 +0100185
Bart Van Assche509c07b2014-10-30 14:48:30 +0100186 struct srp_iu **tx_ring;
187 struct srp_iu **rx_ring;
188 struct srp_request *req_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100189 int comp_vector;
190
Bart Van Assche0a6fdbd2017-02-14 10:56:31 -0800191 u64 tsk_mgmt_tag;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100192 struct completion tsk_mgmt_done;
193 u8 tsk_mgmt_status;
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200194 bool connected;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100195};
196
197/**
198 * struct srp_target_port
199 * @comp_vector: Completion vector used by the first RDMA channel created for
200 * this target port.
201 */
202struct srp_target_port {
203 /* read and written in the hot path */
204 spinlock_t lock;
205
Bart Van Assche509c07b2014-10-30 14:48:30 +0100206 /* read only in the hot path */
Bart Van Asschecee687b2017-10-11 10:27:28 -0700207 u32 global_rkey;
Bart Van Assched92c0da2014-10-06 17:14:36 +0200208 struct srp_rdma_ch *ch;
Bart Van Assche19f31342018-01-22 14:27:12 -0800209 struct net *net;
Bart Van Assched92c0da2014-10-06 17:14:36 +0200210 u32 ch_count;
David Dillow9af76272010-11-26 15:34:46 -0500211 u32 lkey;
David Dillow9af76272010-11-26 15:34:46 -0500212 enum srp_target_state state;
David Dillow49248642011-01-14 18:23:24 -0500213 unsigned int cmd_sg_cnt;
David Dillowc07d4242011-01-16 13:57:10 -0500214 unsigned int indirect_size;
215 bool allow_ext_sg;
David Dillow9af76272010-11-26 15:34:46 -0500216
Bart Van Assche509c07b2014-10-30 14:48:30 +0100217 /* other member variables */
Bart Van Assche747fe002014-10-30 14:48:05 +0100218 union ib_gid sgid;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800219 __be64 id_ext;
220 __be64 ioc_guid;
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200221 __be64 initiator_ext;
Ramachandra K0c0450db2006-06-17 20:37:38 -0700222 u16 io_class;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800223 struct srp_host *srp_host;
224 struct Scsi_Host *scsi_host;
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200225 struct srp_rport *rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800226 char target_name[32];
227 unsigned int scsi_id;
David Dillowc07d4242011-01-16 13:57:10 -0500228 unsigned int sg_tablesize;
Bart Van Asscheb0780ee2018-01-22 14:27:13 -0800229 unsigned int target_can_queue;
Bart Van Asschefa9863f2016-04-22 14:13:57 -0700230 int mr_pool_size;
Bart Van Assche509c5f32016-05-12 10:50:35 -0700231 int mr_per_cmd;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200232 int queue_size;
233 int req_ring_size;
Bart Van Assche4b5e5f42013-06-28 14:57:42 +0200234 int comp_vector;
Vu Pham7bb312e2013-10-26 14:31:27 +0200235 int tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800236
Bart Van Assche19f31342018-01-22 14:27:12 -0800237 bool using_rdma_cm;
238
239 union {
240 struct {
241 __be64 service_id;
242 union ib_gid orig_dgid;
243 __be16 pkey;
244 } ib_cm;
245 struct {
246 union {
247 struct sockaddr_in ip4;
248 struct sockaddr_in6 ip6;
Bart Van Assche14673772019-09-30 16:16:58 -0700249 struct sockaddr sa;
Bart Van Assche19f31342018-01-22 14:27:12 -0800250 struct sockaddr_storage ss;
251 } src;
252 union {
253 struct sockaddr_in ip4;
254 struct sockaddr_in6 ip6;
Bart Van Assche14673772019-09-30 16:16:58 -0700255 struct sockaddr sa;
Bart Van Assche19f31342018-01-22 14:27:12 -0800256 struct sockaddr_storage ss;
257 } dst;
258 bool src_specified;
259 } rdma_cm;
260 };
Roland Dreieraef9ec32005-11-02 14:07:13 -0800261
Bart Van Asschec9b03c12011-09-03 09:34:48 +0200262 u32 rq_tmo_jiffies;
263
Roland Dreier6bfa24f2006-06-17 20:37:33 -0700264 int zero_req_lim;
265
Bart Van Asschec1120f82013-10-26 14:35:08 +0200266 struct work_struct tl_err_work;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000267 struct work_struct remove_work;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800268
269 struct list_head list;
Bart Van Assche948d1e82011-09-03 09:25:42 +0200270 bool qp_in_error;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800271};
272
273struct srp_iu {
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500274 struct list_head list;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800275 u64 dma;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800276 void *buf;
277 size_t size;
278 enum dma_data_direction direction;
Bart Van Assche882981f2018-12-17 13:20:39 -0800279 u32 num_sge;
280 struct ib_sge sge[SRP_MAX_SGE];
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100281 struct ib_cqe cqe;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800282};
283
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200284/**
285 * struct srp_fr_desc - fast registration work request arguments
286 * @entry: Entry in srp_fr_pool.free_list.
287 * @mr: Memory region.
288 * @frpl: Fast registration page list.
289 */
290struct srp_fr_desc {
291 struct list_head entry;
292 struct ib_mr *mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200293};
294
295/**
296 * struct srp_fr_pool - pool of fast registration descriptors
297 *
298 * An entry is available for allocation if and only if it occurs in @free_list.
299 *
300 * @size: Number of descriptors in this pool.
301 * @max_page_list_len: Maximum fast registration work request page list length.
302 * @lock: Protects free_list.
303 * @free_list: List of free descriptors.
304 * @desc: Fast registration descriptor pool.
305 */
306struct srp_fr_pool {
307 int size;
308 int max_page_list_len;
309 spinlock_t lock;
310 struct list_head free_list;
311 struct srp_fr_desc desc[0];
312};
313
314/**
315 * struct srp_map_state - per-request DMA memory mapping state
316 * @desc: Pointer to the element of the SRP buffer descriptor array
317 * that is being filled in.
318 * @pages: Array with DMA addresses of pages being considered for
319 * memory registration.
320 * @base_dma_addr: DMA address of the first page that has not yet been mapped.
321 * @dma_len: Number of bytes that will be registered with the next
322 * FMR or FR memory registration call.
323 * @total_len: Total number of bytes in the sg-list being mapped.
324 * @npages: Number of page addresses in the pages[] array.
325 * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
326 * @ndesc: Number of SRP buffer descriptors that have been filled in.
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200327 */
David Dillow8f26c9f2011-01-14 19:45:50 -0500328struct srp_map_state {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200329 union {
Bart Van Asschef731ed62015-08-10 17:07:27 -0700330 struct {
331 struct ib_pool_fmr **next;
332 struct ib_pool_fmr **end;
333 } fmr;
334 struct {
335 struct srp_fr_desc **next;
336 struct srp_fr_desc **end;
337 } fr;
Bart Van Assche330179f2015-08-10 17:09:05 -0700338 struct {
339 void **next;
340 void **end;
341 } gen;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200342 };
David Dillow8f26c9f2011-01-14 19:45:50 -0500343 struct srp_direct_buf *desc;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +0300344 union {
345 u64 *pages;
346 struct scatterlist *sg;
347 };
David Dillow8f26c9f2011-01-14 19:45:50 -0500348 dma_addr_t base_dma_addr;
Bart Van Assche52ede082014-05-20 15:07:45 +0200349 u32 dma_len;
David Dillow8f26c9f2011-01-14 19:45:50 -0500350 u32 total_len;
Bart Van Assche57b0be92015-12-01 10:19:38 -0800351 unsigned int npages;
Bart Van Assche52ede082014-05-20 15:07:45 +0200352 unsigned int nmdesc;
David Dillow8f26c9f2011-01-14 19:45:50 -0500353 unsigned int ndesc;
David Dillow8f26c9f2011-01-14 19:45:50 -0500354};
355
Roland Dreieraef9ec32005-11-02 14:07:13 -0800356#endif /* IB_SRP_H */