Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause |
| 2 | |
| 3 | /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ |
| 4 | /* Copyright (c) 2008-2019, IBM Corporation */ |
| 5 | |
| 6 | #include <linux/init.h> |
| 7 | #include <linux/errno.h> |
| 8 | #include <linux/netdevice.h> |
| 9 | #include <linux/inetdevice.h> |
| 10 | #include <net/net_namespace.h> |
| 11 | #include <linux/rtnetlink.h> |
| 12 | #include <linux/if_arp.h> |
| 13 | #include <linux/list.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/dma-mapping.h> |
| 18 | |
Bart Van Assche | 934f05b | 2019-09-30 16:16:57 -0700 | [diff] [blame] | 19 | #include <net/addrconf.h> |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 20 | #include <rdma/ib_verbs.h> |
| 21 | #include <rdma/ib_user_verbs.h> |
| 22 | #include <rdma/rdma_netlink.h> |
| 23 | #include <linux/kthread.h> |
| 24 | |
| 25 | #include "siw.h" |
| 26 | #include "siw_verbs.h" |
| 27 | |
| 28 | MODULE_AUTHOR("Bernard Metzler"); |
| 29 | MODULE_DESCRIPTION("Software iWARP Driver"); |
| 30 | MODULE_LICENSE("Dual BSD/GPL"); |
| 31 | |
| 32 | /* transmit from user buffer, if possible */ |
| 33 | const bool zcopy_tx = true; |
| 34 | |
| 35 | /* Restrict usage of GSO, if hardware peer iwarp is unable to process |
| 36 | * large packets. try_gso = true lets siw try to use local GSO, |
| 37 | * if peer agrees. Not using GSO severly limits siw maximum tx bandwidth. |
| 38 | */ |
| 39 | const bool try_gso; |
| 40 | |
| 41 | /* Attach siw also with loopback devices */ |
| 42 | const bool loopback_enabled = true; |
| 43 | |
| 44 | /* We try to negotiate CRC on, if true */ |
| 45 | const bool mpa_crc_required; |
| 46 | |
| 47 | /* MPA CRC on/off enforced */ |
| 48 | const bool mpa_crc_strict; |
| 49 | |
| 50 | /* Control TCP_NODELAY socket option */ |
| 51 | const bool siw_tcp_nagle; |
| 52 | |
| 53 | /* Select MPA version to be used during connection setup */ |
| 54 | u_char mpa_version = MPA_REVISION_2; |
| 55 | |
| 56 | /* Selects MPA P2P mode (additional handshake during connection |
| 57 | * setup, if true. |
| 58 | */ |
| 59 | const bool peer_to_peer; |
| 60 | |
| 61 | struct task_struct *siw_tx_thread[NR_CPUS]; |
| 62 | struct crypto_shash *siw_crypto_shash; |
| 63 | |
| 64 | static int siw_device_register(struct siw_device *sdev, const char *name) |
| 65 | { |
| 66 | struct ib_device *base_dev = &sdev->base_dev; |
| 67 | static int dev_id = 1; |
| 68 | int rv; |
| 69 | |
Kamal Heib | 0434064 | 2020-07-07 16:09:31 +0300 | [diff] [blame] | 70 | sdev->vendor_part_id = dev_id++; |
| 71 | |
Jason Gunthorpe | e0477b3 | 2020-10-08 11:27:52 +0300 | [diff] [blame] | 72 | rv = ib_register_device(base_dev, name, NULL); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 73 | if (rv) { |
| 74 | pr_warn("siw: device registration error %d\n", rv); |
| 75 | return rv; |
| 76 | } |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 77 | |
| 78 | siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr); |
| 79 | |
| 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | static void siw_device_cleanup(struct ib_device *base_dev) |
| 84 | { |
| 85 | struct siw_device *sdev = to_siw_dev(base_dev); |
| 86 | |
| 87 | xa_destroy(&sdev->qp_xa); |
| 88 | xa_destroy(&sdev->mem_xa); |
| 89 | } |
| 90 | |
| 91 | static int siw_create_tx_threads(void) |
| 92 | { |
Bernard Metzler | 85de5d5 | 2019-07-10 08:38:00 +0000 | [diff] [blame] | 93 | int cpu, assigned = 0; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 94 | |
| 95 | for_each_online_cpu(cpu) { |
| 96 | /* Skip HT cores */ |
| 97 | if (cpu % cpumask_weight(topology_sibling_cpumask(cpu))) |
| 98 | continue; |
| 99 | |
| 100 | siw_tx_thread[cpu] = |
Cai Huoqing | e085011 | 2022-01-14 14:02:55 -0800 | [diff] [blame] | 101 | kthread_run_on_cpu(siw_run_sq, |
| 102 | (unsigned long *)(long)cpu, |
| 103 | cpu, "siw_tx/%u"); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 104 | if (IS_ERR(siw_tx_thread[cpu])) { |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 105 | siw_tx_thread[cpu] = NULL; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 106 | continue; |
| 107 | } |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 108 | |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 109 | assigned++; |
| 110 | } |
| 111 | return assigned; |
| 112 | } |
| 113 | |
| 114 | static int siw_dev_qualified(struct net_device *netdev) |
| 115 | { |
| 116 | /* |
| 117 | * Additional hardware support can be added here |
| 118 | * (e.g. ARPHRD_FDDI, ARPHRD_ATM, ...) - see |
| 119 | * <linux/if_arp.h> for type identifiers. |
| 120 | */ |
| 121 | if (netdev->type == ARPHRD_ETHER || netdev->type == ARPHRD_IEEE802 || |
| 122 | (netdev->type == ARPHRD_LOOPBACK && loopback_enabled)) |
| 123 | return 1; |
| 124 | |
| 125 | return 0; |
| 126 | } |
| 127 | |
Jason Gunthorpe | 4c7d6dc | 2019-07-08 11:36:32 -0300 | [diff] [blame] | 128 | static DEFINE_PER_CPU(atomic_t, siw_use_cnt); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 129 | |
| 130 | static struct { |
| 131 | struct cpumask **tx_valid_cpus; |
| 132 | int num_nodes; |
| 133 | } siw_cpu_info; |
| 134 | |
| 135 | static int siw_init_cpulist(void) |
| 136 | { |
Kamal Heib | 429fa96 | 2021-02-01 13:29:22 +0200 | [diff] [blame] | 137 | int i, num_nodes = nr_node_ids; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 138 | |
| 139 | memset(siw_tx_thread, 0, sizeof(siw_tx_thread)); |
| 140 | |
| 141 | siw_cpu_info.num_nodes = num_nodes; |
| 142 | |
| 143 | siw_cpu_info.tx_valid_cpus = |
| 144 | kcalloc(num_nodes, sizeof(struct cpumask *), GFP_KERNEL); |
| 145 | if (!siw_cpu_info.tx_valid_cpus) { |
| 146 | siw_cpu_info.num_nodes = 0; |
| 147 | return -ENOMEM; |
| 148 | } |
| 149 | for (i = 0; i < siw_cpu_info.num_nodes; i++) { |
| 150 | siw_cpu_info.tx_valid_cpus[i] = |
| 151 | kzalloc(sizeof(struct cpumask), GFP_KERNEL); |
| 152 | if (!siw_cpu_info.tx_valid_cpus[i]) |
| 153 | goto out_err; |
| 154 | |
| 155 | cpumask_clear(siw_cpu_info.tx_valid_cpus[i]); |
| 156 | } |
| 157 | for_each_possible_cpu(i) |
| 158 | cpumask_set_cpu(i, siw_cpu_info.tx_valid_cpus[cpu_to_node(i)]); |
| 159 | |
| 160 | return 0; |
| 161 | |
| 162 | out_err: |
| 163 | siw_cpu_info.num_nodes = 0; |
Dan Carpenter | 17c1928 | 2019-08-09 17:09:04 +0300 | [diff] [blame] | 164 | while (--i >= 0) |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 165 | kfree(siw_cpu_info.tx_valid_cpus[i]); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 166 | kfree(siw_cpu_info.tx_valid_cpus); |
| 167 | siw_cpu_info.tx_valid_cpus = NULL; |
| 168 | |
| 169 | return -ENOMEM; |
| 170 | } |
| 171 | |
| 172 | static void siw_destroy_cpulist(void) |
| 173 | { |
| 174 | int i = 0; |
| 175 | |
| 176 | while (i < siw_cpu_info.num_nodes) |
| 177 | kfree(siw_cpu_info.tx_valid_cpus[i++]); |
| 178 | |
| 179 | kfree(siw_cpu_info.tx_valid_cpus); |
| 180 | } |
| 181 | |
| 182 | /* |
| 183 | * Choose CPU with least number of active QP's from NUMA node of |
| 184 | * TX interface. |
| 185 | */ |
| 186 | int siw_get_tx_cpu(struct siw_device *sdev) |
| 187 | { |
| 188 | const struct cpumask *tx_cpumask; |
| 189 | int i, num_cpus, cpu, min_use, node = sdev->numa_node, tx_cpu = -1; |
| 190 | |
| 191 | if (node < 0) |
| 192 | tx_cpumask = cpu_online_mask; |
| 193 | else |
| 194 | tx_cpumask = siw_cpu_info.tx_valid_cpus[node]; |
| 195 | |
| 196 | num_cpus = cpumask_weight(tx_cpumask); |
| 197 | if (!num_cpus) { |
| 198 | /* no CPU on this NUMA node */ |
| 199 | tx_cpumask = cpu_online_mask; |
| 200 | num_cpus = cpumask_weight(tx_cpumask); |
| 201 | } |
| 202 | if (!num_cpus) |
| 203 | goto out; |
| 204 | |
| 205 | cpu = cpumask_first(tx_cpumask); |
| 206 | |
| 207 | for (i = 0, min_use = SIW_MAX_QP; i < num_cpus; |
| 208 | i++, cpu = cpumask_next(cpu, tx_cpumask)) { |
| 209 | int usage; |
| 210 | |
| 211 | /* Skip any cores which have no TX thread */ |
| 212 | if (!siw_tx_thread[cpu]) |
| 213 | continue; |
| 214 | |
Jason Gunthorpe | 4c7d6dc | 2019-07-08 11:36:32 -0300 | [diff] [blame] | 215 | usage = atomic_read(&per_cpu(siw_use_cnt, cpu)); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 216 | if (usage <= min_use) { |
| 217 | tx_cpu = cpu; |
| 218 | min_use = usage; |
| 219 | } |
| 220 | } |
| 221 | siw_dbg(&sdev->base_dev, |
| 222 | "tx cpu %d, node %d, %d qp's\n", tx_cpu, node, min_use); |
| 223 | |
| 224 | out: |
| 225 | if (tx_cpu >= 0) |
Jason Gunthorpe | 4c7d6dc | 2019-07-08 11:36:32 -0300 | [diff] [blame] | 226 | atomic_inc(&per_cpu(siw_use_cnt, tx_cpu)); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 227 | else |
| 228 | pr_warn("siw: no tx cpu found\n"); |
| 229 | |
| 230 | return tx_cpu; |
| 231 | } |
| 232 | |
| 233 | void siw_put_tx_cpu(int cpu) |
| 234 | { |
Jason Gunthorpe | 4c7d6dc | 2019-07-08 11:36:32 -0300 | [diff] [blame] | 235 | atomic_dec(&per_cpu(siw_use_cnt, cpu)); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 236 | } |
| 237 | |
| 238 | static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id) |
| 239 | { |
| 240 | struct siw_qp *qp = siw_qp_id2obj(to_siw_dev(base_dev), id); |
| 241 | |
| 242 | if (qp) { |
| 243 | /* |
| 244 | * siw_qp_id2obj() increments object reference count |
| 245 | */ |
| 246 | siw_qp_put(qp); |
Bernard Metzler | 58fb0b5 | 2019-12-10 17:17:29 +0100 | [diff] [blame] | 247 | return &qp->base_qp; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 248 | } |
| 249 | return NULL; |
| 250 | } |
| 251 | |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 252 | static const struct ib_device_ops siw_device_ops = { |
| 253 | .owner = THIS_MODULE, |
| 254 | .uverbs_abi_ver = SIW_ABI_VERSION, |
| 255 | .driver_id = RDMA_DRIVER_SIW, |
| 256 | |
| 257 | .alloc_mr = siw_alloc_mr, |
| 258 | .alloc_pd = siw_alloc_pd, |
| 259 | .alloc_ucontext = siw_alloc_ucontext, |
| 260 | .create_cq = siw_create_cq, |
| 261 | .create_qp = siw_create_qp, |
| 262 | .create_srq = siw_create_srq, |
| 263 | .dealloc_driver = siw_device_cleanup, |
| 264 | .dealloc_pd = siw_dealloc_pd, |
| 265 | .dealloc_ucontext = siw_dealloc_ucontext, |
| 266 | .dereg_mr = siw_dereg_mr, |
| 267 | .destroy_cq = siw_destroy_cq, |
| 268 | .destroy_qp = siw_destroy_qp, |
| 269 | .destroy_srq = siw_destroy_srq, |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 270 | .get_dma_mr = siw_get_dma_mr, |
| 271 | .get_port_immutable = siw_get_port_immutable, |
| 272 | .iw_accept = siw_accept, |
| 273 | .iw_add_ref = siw_qp_get_ref, |
| 274 | .iw_connect = siw_connect, |
| 275 | .iw_create_listen = siw_create_listen, |
| 276 | .iw_destroy_listen = siw_destroy_listen, |
| 277 | .iw_get_qp = siw_get_base_qp, |
| 278 | .iw_reject = siw_reject, |
| 279 | .iw_rem_ref = siw_qp_put_ref, |
| 280 | .map_mr_sg = siw_map_mr_sg, |
| 281 | .mmap = siw_mmap, |
Michal Kalderon | 11f1a75 | 2019-10-30 11:44:14 +0200 | [diff] [blame] | 282 | .mmap_free = siw_mmap_free, |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 283 | .modify_qp = siw_verbs_modify_qp, |
| 284 | .modify_srq = siw_modify_srq, |
| 285 | .poll_cq = siw_poll_cq, |
| 286 | .post_recv = siw_post_receive, |
| 287 | .post_send = siw_post_send, |
| 288 | .post_srq_recv = siw_post_srq_recv, |
| 289 | .query_device = siw_query_device, |
| 290 | .query_gid = siw_query_gid, |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 291 | .query_port = siw_query_port, |
| 292 | .query_qp = siw_query_qp, |
| 293 | .query_srq = siw_query_srq, |
| 294 | .req_notify_cq = siw_req_notify_cq, |
| 295 | .reg_user_mr = siw_reg_user_mr, |
| 296 | |
| 297 | INIT_RDMA_OBJ_SIZE(ib_cq, siw_cq, base_cq), |
| 298 | INIT_RDMA_OBJ_SIZE(ib_pd, siw_pd, base_pd), |
Leon Romanovsky | 514aee6 | 2021-07-23 14:39:50 +0300 | [diff] [blame] | 299 | INIT_RDMA_OBJ_SIZE(ib_qp, siw_qp, base_qp), |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 300 | INIT_RDMA_OBJ_SIZE(ib_srq, siw_srq, base_srq), |
| 301 | INIT_RDMA_OBJ_SIZE(ib_ucontext, siw_ucontext, base_ucontext), |
| 302 | }; |
| 303 | |
| 304 | static struct siw_device *siw_device_create(struct net_device *netdev) |
| 305 | { |
| 306 | struct siw_device *sdev = NULL; |
| 307 | struct ib_device *base_dev; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 308 | int rv; |
| 309 | |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 310 | sdev = ib_alloc_device(siw_device, base_dev); |
| 311 | if (!sdev) |
| 312 | return NULL; |
| 313 | |
| 314 | base_dev = &sdev->base_dev; |
| 315 | |
| 316 | sdev->netdev = netdev; |
| 317 | |
| 318 | if (netdev->type != ARPHRD_LOOPBACK) { |
Bart Van Assche | 934f05b | 2019-09-30 16:16:57 -0700 | [diff] [blame] | 319 | addrconf_addr_eui48((unsigned char *)&base_dev->node_guid, |
| 320 | netdev->dev_addr); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 321 | } else { |
| 322 | /* |
| 323 | * The loopback device does not have a HW address, |
| 324 | * but connection mangagement lib expects gid != 0 |
| 325 | */ |
Bart Van Assche | 934f05b | 2019-09-30 16:16:57 -0700 | [diff] [blame] | 326 | size_t len = min_t(size_t, strlen(base_dev->name), 6); |
| 327 | char addr[6] = { }; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 328 | |
Bart Van Assche | 934f05b | 2019-09-30 16:16:57 -0700 | [diff] [blame] | 329 | memcpy(addr, base_dev->name, len); |
| 330 | addrconf_addr_eui48((unsigned char *)&base_dev->node_guid, |
| 331 | addr); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 332 | } |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 333 | |
Jason Gunthorpe | 5c41936 | 2020-10-30 11:03:05 -0300 | [diff] [blame] | 334 | base_dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND); |
| 335 | |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 336 | base_dev->node_type = RDMA_NODE_RNIC; |
| 337 | memcpy(base_dev->node_desc, SIW_NODE_DESC_COMMON, |
| 338 | sizeof(SIW_NODE_DESC_COMMON)); |
| 339 | |
| 340 | /* |
| 341 | * Current model (one-to-one device association): |
| 342 | * One Softiwarp device per net_device or, equivalently, |
| 343 | * per physical port. |
| 344 | */ |
| 345 | base_dev->phys_port_cnt = 1; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 346 | base_dev->num_comp_vectors = num_possible_cpus(); |
| 347 | |
Bernard Metzler | 12e5eef | 2020-03-02 16:58:14 +0100 | [diff] [blame] | 348 | xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); |
| 349 | xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1); |
| 350 | |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 351 | ib_set_device_ops(base_dev, &siw_device_ops); |
| 352 | rv = ib_device_set_netdev(base_dev, netdev, 1); |
| 353 | if (rv) |
| 354 | goto error; |
| 355 | |
| 356 | memcpy(base_dev->iw_ifname, netdev->name, |
| 357 | sizeof(base_dev->iw_ifname)); |
| 358 | |
| 359 | /* Disable TCP port mapping */ |
Zheng Yongjun | 90eef9f | 2020-12-14 21:41:18 +0800 | [diff] [blame] | 360 | base_dev->iw_driver_flags = IW_F_NO_PORT_MAP; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 361 | |
| 362 | sdev->attrs.max_qp = SIW_MAX_QP; |
| 363 | sdev->attrs.max_qp_wr = SIW_MAX_QP_WR; |
| 364 | sdev->attrs.max_ord = SIW_MAX_ORD_QP; |
| 365 | sdev->attrs.max_ird = SIW_MAX_IRD_QP; |
| 366 | sdev->attrs.max_sge = SIW_MAX_SGE; |
| 367 | sdev->attrs.max_sge_rd = SIW_MAX_SGE_RD; |
| 368 | sdev->attrs.max_cq = SIW_MAX_CQ; |
| 369 | sdev->attrs.max_cqe = SIW_MAX_CQE; |
| 370 | sdev->attrs.max_mr = SIW_MAX_MR; |
| 371 | sdev->attrs.max_pd = SIW_MAX_PD; |
| 372 | sdev->attrs.max_mw = SIW_MAX_MW; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 373 | sdev->attrs.max_srq = SIW_MAX_SRQ; |
| 374 | sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR; |
| 375 | sdev->attrs.max_srq_sge = SIW_MAX_SGE; |
| 376 | |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 377 | INIT_LIST_HEAD(&sdev->cep_list); |
| 378 | INIT_LIST_HEAD(&sdev->qp_list); |
| 379 | |
| 380 | atomic_set(&sdev->num_ctx, 0); |
| 381 | atomic_set(&sdev->num_srq, 0); |
| 382 | atomic_set(&sdev->num_qp, 0); |
| 383 | atomic_set(&sdev->num_cq, 0); |
| 384 | atomic_set(&sdev->num_mr, 0); |
| 385 | atomic_set(&sdev->num_pd, 0); |
| 386 | |
Jason Gunthorpe | a9d2e9a | 2020-11-06 10:00:49 -0400 | [diff] [blame] | 387 | sdev->numa_node = dev_to_node(&netdev->dev); |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 388 | spin_lock_init(&sdev->lock); |
| 389 | |
| 390 | return sdev; |
| 391 | error: |
| 392 | ib_dealloc_device(base_dev); |
| 393 | |
| 394 | return NULL; |
| 395 | } |
| 396 | |
| 397 | /* |
| 398 | * Network link becomes unavailable. Mark all |
| 399 | * affected QP's accordingly. |
| 400 | */ |
| 401 | static void siw_netdev_down(struct work_struct *work) |
| 402 | { |
| 403 | struct siw_device *sdev = |
| 404 | container_of(work, struct siw_device, netdev_down); |
| 405 | |
| 406 | struct siw_qp_attrs qp_attrs; |
| 407 | struct list_head *pos, *tmp; |
| 408 | |
| 409 | memset(&qp_attrs, 0, sizeof(qp_attrs)); |
| 410 | qp_attrs.state = SIW_QP_STATE_ERROR; |
| 411 | |
| 412 | list_for_each_safe(pos, tmp, &sdev->qp_list) { |
| 413 | struct siw_qp *qp = list_entry(pos, struct siw_qp, devq); |
| 414 | |
| 415 | down_write(&qp->state_lock); |
| 416 | WARN_ON(siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE)); |
| 417 | up_write(&qp->state_lock); |
| 418 | } |
| 419 | ib_device_put(&sdev->base_dev); |
| 420 | } |
| 421 | |
| 422 | static void siw_device_goes_down(struct siw_device *sdev) |
| 423 | { |
| 424 | if (ib_device_try_get(&sdev->base_dev)) { |
| 425 | INIT_WORK(&sdev->netdev_down, siw_netdev_down); |
| 426 | schedule_work(&sdev->netdev_down); |
| 427 | } |
| 428 | } |
| 429 | |
| 430 | static int siw_netdev_event(struct notifier_block *nb, unsigned long event, |
| 431 | void *arg) |
| 432 | { |
| 433 | struct net_device *netdev = netdev_notifier_info_to_dev(arg); |
| 434 | struct ib_device *base_dev; |
| 435 | struct siw_device *sdev; |
| 436 | |
| 437 | dev_dbg(&netdev->dev, "siw: event %lu\n", event); |
| 438 | |
| 439 | if (dev_net(netdev) != &init_net) |
| 440 | return NOTIFY_OK; |
| 441 | |
| 442 | base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW); |
| 443 | if (!base_dev) |
| 444 | return NOTIFY_OK; |
| 445 | |
| 446 | sdev = to_siw_dev(base_dev); |
| 447 | |
| 448 | switch (event) { |
| 449 | case NETDEV_UP: |
| 450 | sdev->state = IB_PORT_ACTIVE; |
| 451 | siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE); |
| 452 | break; |
| 453 | |
| 454 | case NETDEV_GOING_DOWN: |
| 455 | siw_device_goes_down(sdev); |
| 456 | break; |
| 457 | |
| 458 | case NETDEV_DOWN: |
| 459 | sdev->state = IB_PORT_DOWN; |
| 460 | siw_port_event(sdev, 1, IB_EVENT_PORT_ERR); |
| 461 | break; |
| 462 | |
| 463 | case NETDEV_REGISTER: |
| 464 | /* |
| 465 | * Device registration now handled only by |
| 466 | * rdma netlink commands. So it shall be impossible |
| 467 | * to end up here with a valid siw device. |
| 468 | */ |
| 469 | siw_dbg(base_dev, "unexpected NETDEV_REGISTER event\n"); |
| 470 | break; |
| 471 | |
| 472 | case NETDEV_UNREGISTER: |
| 473 | ib_unregister_device_queued(&sdev->base_dev); |
| 474 | break; |
| 475 | |
| 476 | case NETDEV_CHANGEADDR: |
| 477 | siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE); |
| 478 | break; |
| 479 | /* |
| 480 | * Todo: Below netdev events are currently not handled. |
| 481 | */ |
| 482 | case NETDEV_CHANGEMTU: |
| 483 | case NETDEV_CHANGE: |
| 484 | break; |
| 485 | |
| 486 | default: |
| 487 | break; |
| 488 | } |
| 489 | ib_device_put(&sdev->base_dev); |
| 490 | |
| 491 | return NOTIFY_OK; |
| 492 | } |
| 493 | |
| 494 | static struct notifier_block siw_netdev_nb = { |
| 495 | .notifier_call = siw_netdev_event, |
| 496 | }; |
| 497 | |
| 498 | static int siw_newlink(const char *basedev_name, struct net_device *netdev) |
| 499 | { |
| 500 | struct ib_device *base_dev; |
| 501 | struct siw_device *sdev = NULL; |
| 502 | int rv = -ENOMEM; |
| 503 | |
| 504 | if (!siw_dev_qualified(netdev)) |
| 505 | return -EINVAL; |
| 506 | |
| 507 | base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW); |
| 508 | if (base_dev) { |
| 509 | ib_device_put(base_dev); |
| 510 | return -EEXIST; |
| 511 | } |
| 512 | sdev = siw_device_create(netdev); |
| 513 | if (sdev) { |
| 514 | dev_dbg(&netdev->dev, "siw: new device\n"); |
| 515 | |
| 516 | if (netif_running(netdev) && netif_carrier_ok(netdev)) |
| 517 | sdev->state = IB_PORT_ACTIVE; |
| 518 | else |
| 519 | sdev->state = IB_PORT_DOWN; |
| 520 | |
| 521 | rv = siw_device_register(sdev, basedev_name); |
| 522 | if (rv) |
| 523 | ib_dealloc_device(&sdev->base_dev); |
| 524 | } |
| 525 | return rv; |
| 526 | } |
| 527 | |
| 528 | static struct rdma_link_ops siw_link_ops = { |
| 529 | .type = "siw", |
| 530 | .newlink = siw_newlink, |
| 531 | }; |
| 532 | |
| 533 | /* |
| 534 | * siw_init_module - Initialize Softiwarp module and register with netdev |
| 535 | * subsystem. |
| 536 | */ |
| 537 | static __init int siw_init_module(void) |
| 538 | { |
| 539 | int rv; |
| 540 | int nr_cpu; |
| 541 | |
| 542 | if (SENDPAGE_THRESH < SIW_MAX_INLINE) { |
| 543 | pr_info("siw: sendpage threshold too small: %u\n", |
| 544 | (int)SENDPAGE_THRESH); |
| 545 | rv = -EINVAL; |
| 546 | goto out_error; |
| 547 | } |
| 548 | rv = siw_init_cpulist(); |
| 549 | if (rv) |
| 550 | goto out_error; |
| 551 | |
| 552 | rv = siw_cm_init(); |
| 553 | if (rv) |
| 554 | goto out_error; |
| 555 | |
| 556 | if (!siw_create_tx_threads()) { |
| 557 | pr_info("siw: Could not start any TX thread\n"); |
Wei Yongjun | d5121ff | 2019-07-18 09:27:10 +0000 | [diff] [blame] | 558 | rv = -ENOMEM; |
Bernard Metzler | bdcf26b | 2019-06-20 18:21:25 +0200 | [diff] [blame] | 559 | goto out_error; |
| 560 | } |
| 561 | /* |
| 562 | * Locate CRC32 algorithm. If unsuccessful, fail |
| 563 | * loading siw only, if CRC is required. |
| 564 | */ |
| 565 | siw_crypto_shash = crypto_alloc_shash("crc32c", 0, 0); |
| 566 | if (IS_ERR(siw_crypto_shash)) { |
| 567 | pr_info("siw: Loading CRC32c failed: %ld\n", |
| 568 | PTR_ERR(siw_crypto_shash)); |
| 569 | siw_crypto_shash = NULL; |
| 570 | if (mpa_crc_required) { |
| 571 | rv = -EOPNOTSUPP; |
| 572 | goto out_error; |
| 573 | } |
| 574 | } |
| 575 | rv = register_netdevice_notifier(&siw_netdev_nb); |
| 576 | if (rv) |
| 577 | goto out_error; |
| 578 | |
| 579 | rdma_link_register(&siw_link_ops); |
| 580 | |
| 581 | pr_info("SoftiWARP attached\n"); |
| 582 | return 0; |
| 583 | |
| 584 | out_error: |
| 585 | for (nr_cpu = 0; nr_cpu < nr_cpu_ids; nr_cpu++) { |
| 586 | if (siw_tx_thread[nr_cpu]) { |
| 587 | siw_stop_tx_thread(nr_cpu); |
| 588 | siw_tx_thread[nr_cpu] = NULL; |
| 589 | } |
| 590 | } |
| 591 | if (siw_crypto_shash) |
| 592 | crypto_free_shash(siw_crypto_shash); |
| 593 | |
| 594 | pr_info("SoftIWARP attach failed. Error: %d\n", rv); |
| 595 | |
| 596 | siw_cm_exit(); |
| 597 | siw_destroy_cpulist(); |
| 598 | |
| 599 | return rv; |
| 600 | } |
| 601 | |
| 602 | static void __exit siw_exit_module(void) |
| 603 | { |
| 604 | int cpu; |
| 605 | |
| 606 | for_each_possible_cpu(cpu) { |
| 607 | if (siw_tx_thread[cpu]) { |
| 608 | siw_stop_tx_thread(cpu); |
| 609 | siw_tx_thread[cpu] = NULL; |
| 610 | } |
| 611 | } |
| 612 | unregister_netdevice_notifier(&siw_netdev_nb); |
| 613 | rdma_link_unregister(&siw_link_ops); |
| 614 | ib_unregister_driver(RDMA_DRIVER_SIW); |
| 615 | |
| 616 | siw_cm_exit(); |
| 617 | |
| 618 | siw_destroy_cpulist(); |
| 619 | |
| 620 | if (siw_crypto_shash) |
| 621 | crypto_free_shash(siw_crypto_shash); |
| 622 | |
| 623 | pr_info("SoftiWARP detached\n"); |
| 624 | } |
| 625 | |
| 626 | module_init(siw_init_module); |
| 627 | module_exit(siw_exit_module); |
| 628 | |
| 629 | MODULE_ALIAS_RDMA_LINK("siw"); |