blob: 5d8b49c57f507b62ab23a8067000757d0bfd0265 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ursula Brauna4cf0442017-01-09 16:55:14 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Definitions for IB environment
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <Ursula Braun@linux.vnet.ibm.com>
10 */
11
12#ifndef _SMC_IB_H
13#define _SMC_IB_H
14
Florian Westphal282ccf62017-03-29 17:17:31 +020015#include <linux/interrupt.h>
Ursula Braun143c0172017-01-12 14:57:15 +010016#include <linux/if_ether.h>
Ursula Braun92f3cb02020-07-08 17:05:13 +020017#include <linux/mutex.h>
Ursula Braun6dabd402019-11-16 17:47:29 +010018#include <linux/wait.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010019#include <rdma/ib_verbs.h>
Ursula Braun0afff912018-06-28 19:05:05 +020020#include <net/smc.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010021
22#define SMC_MAX_PORTS 2 /* Max # of ports */
23#define SMC_GID_SIZE sizeof(union ib_gid)
24
Ursula Braunf38ba1792017-01-09 16:55:19 +010025#define SMC_IB_MAX_SEND_SGE 2
26
Ursula Brauna4cf0442017-01-09 16:55:14 +010027struct smc_ib_devices { /* list of smc ib devices definition */
28 struct list_head list;
Ursula Braun92f3cb02020-07-08 17:05:13 +020029 struct mutex mutex; /* protects list of smc ib devices */
Ursula Brauna4cf0442017-01-09 16:55:14 +010030};
31
32extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */
Guvenc Gulcea3db10e2020-12-01 20:20:49 +010033extern struct smc_lgr_list smc_lgr_list; /* list of linkgroups */
Ursula Brauna4cf0442017-01-09 16:55:14 +010034
35struct smc_ib_device { /* ib-device infos for smc */
36 struct list_head list;
37 struct ib_device *ibdev;
38 struct ib_port_attr pattr[SMC_MAX_PORTS]; /* ib dev. port attrs */
Ursula Braunbd4ad572017-01-09 16:55:20 +010039 struct ib_event_handler event_handler; /* global ib_event handler */
Ursula Braunf38ba1792017-01-09 16:55:19 +010040 struct ib_cq *roce_cq_send; /* send completion queue */
41 struct ib_cq *roce_cq_recv; /* recv completion queue */
42 struct tasklet_struct send_tasklet; /* called by send cq handler */
43 struct tasklet_struct recv_tasklet; /* called by recv cq handler */
Ursula Braun143c0172017-01-12 14:57:15 +010044 char mac[SMC_MAX_PORTS][ETH_ALEN];
45 /* mac address per port*/
Ursula Braun0afff912018-06-28 19:05:05 +020046 u8 pnetid[SMC_MAX_PORTS][SMC_MAX_PNETID_LEN];
47 /* pnetid per port */
Hans Wippel890a2cb2019-02-21 13:01:00 +010048 bool pnetid_by_user[SMC_MAX_PORTS];
49 /* pnetid defined by user? */
Ursula Brauna4cf0442017-01-09 16:55:14 +010050 u8 initialized : 1; /* ib dev CQ, evthdl done */
Ursula Braunbd4ad572017-01-09 16:55:20 +010051 struct work_struct port_event_work;
52 unsigned long port_event_mask;
Ursula Braunc3d94942019-10-09 10:07:46 +020053 DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS);
Ursula Braun6dabd402019-11-16 17:47:29 +010054 atomic_t lnk_cnt; /* number of links on ibdev */
55 wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/
Karsten Graul63673592020-07-18 15:06:12 +020056 struct mutex mutex; /* protect dev setup+cleanup */
Guvenc Gulceddc99282020-12-01 20:20:39 +010057 atomic_t lnk_cnt_by_port[SMC_MAX_PORTS];
58 /* number of links per port */
Guvenc Gulce3d453f52020-12-01 20:20:40 +010059 int ndev_ifidx[SMC_MAX_PORTS]; /* ndev if indexes */
Ursula Brauna4cf0442017-01-09 16:55:14 +010060};
61
Karsten Graule5c47442021-10-16 11:37:45 +020062static inline __be32 smc_ib_gid_to_ipv4(u8 gid[SMC_GID_SIZE])
63{
64 struct in6_addr *addr6 = (struct in6_addr *)gid;
65
66 if (ipv6_addr_v4mapped(addr6) ||
67 !(addr6->s6_addr32[0] | addr6->s6_addr32[1] | addr6->s6_addr32[2]))
68 return addr6->s6_addr32[3];
69 return cpu_to_be32(INADDR_NONE);
70}
71
Tony Lu0237a3a2021-12-28 21:06:09 +080072static inline struct net *smc_ib_net(struct smc_ib_device *smcibdev)
73{
74 if (smcibdev && smcibdev->ibdev)
75 return read_pnet(&smcibdev->ibdev->coredev.rdma_net);
76 return NULL;
77}
78
Karsten Graule5c47442021-10-16 11:37:45 +020079struct smc_init_info_smcrv2;
Ursula Brauncd6851f2017-01-09 16:55:18 +010080struct smc_buf_desc;
Ursula Braunf38ba1792017-01-09 16:55:19 +010081struct smc_link;
Ursula Brauncd6851f2017-01-09 16:55:18 +010082
Guvenc Gulce3d453f52020-12-01 20:20:40 +010083void smc_ib_ndev_change(struct net_device *ndev, unsigned long event);
Ursula Brauna4cf0442017-01-09 16:55:14 +010084int smc_ib_register_client(void) __init;
85void smc_ib_unregister_client(void);
86bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
Karsten Graul387707f2020-04-29 17:10:40 +020087int smc_ib_buf_map_sg(struct smc_link *lnk,
Ursula Brauna3fe3d02017-07-28 13:56:15 +020088 struct smc_buf_desc *buf_slot,
89 enum dma_data_direction data_direction);
Karsten Graul387707f2020-04-29 17:10:40 +020090void smc_ib_buf_unmap_sg(struct smc_link *lnk,
Ursula Brauna3fe3d02017-07-28 13:56:15 +020091 struct smc_buf_desc *buf_slot,
92 enum dma_data_direction data_direction);
Ursula Braunf38ba1792017-01-09 16:55:19 +010093void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
94int smc_ib_create_protection_domain(struct smc_link *lnk);
95void smc_ib_destroy_queue_pair(struct smc_link *lnk);
96int smc_ib_create_queue_pair(struct smc_link *lnk);
Ursula Braunbd4ad572017-01-09 16:55:20 +010097int smc_ib_ready_link(struct smc_link *lnk);
98int smc_ib_modify_qp_rts(struct smc_link *lnk);
99int smc_ib_modify_qp_reset(struct smc_link *lnk);
Dust Li349d4312021-12-28 17:03:25 +0800100int smc_ib_modify_qp_error(struct smc_link *lnk);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100101long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
Ursula Braun897e1c22017-07-28 13:56:16 +0200102int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
Karsten Graul387707f2020-04-29 17:10:40 +0200103 struct smc_buf_desc *buf_slot, u8 link_idx);
Ursula Braun897e1c22017-07-28 13:56:16 +0200104void smc_ib_put_memory_region(struct ib_mr *mr);
Karsten Graul387707f2020-04-29 17:10:40 +0200105void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
Ursula Braun10428dd2017-07-28 13:56:22 +0200106 struct smc_buf_desc *buf_slot,
107 enum dma_data_direction data_direction);
Karsten Graul387707f2020-04-29 17:10:40 +0200108void smc_ib_sync_sg_for_device(struct smc_link *lnk,
Ursula Braun10428dd2017-07-28 13:56:22 +0200109 struct smc_buf_desc *buf_slot,
110 enum dma_data_direction data_direction);
Ursula Braun7005ada2018-07-25 16:35:31 +0200111int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
Karsten Graul24fb6812021-10-16 11:37:48 +0200112 unsigned short vlan_id, u8 gid[], u8 *sgid_index,
113 struct smc_init_info_smcrv2 *smcrv2);
Karsten Graule5c47442021-10-16 11:37:45 +0200114int smc_ib_find_route(__be32 saddr, __be32 daddr,
115 u8 nexthop_mac[], u8 *uses_gateway);
Hans Wippela082ec82020-02-25 22:41:22 +0100116bool smc_ib_is_valid_local_systemid(void);
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100117int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100118#endif