blob: efde3353668734d7d0457d721f8ecd7a4e0bc551 [file] [log] [blame]
Jakub Kicinskic7d759e2020-07-09 17:42:47 -07001// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/ethtool_netlink.h>
4#include <net/udp_tunnel.h>
Jakub Kicinski966e5052020-07-28 14:47:58 -07005#include <net/vxlan.h>
Jakub Kicinskic7d759e2020-07-09 17:42:47 -07006
7#include "bitset.h"
8#include "common.h"
9#include "netlink.h"
10
Jakub Kicinskiff419af2020-10-05 15:07:35 -070011const struct nla_policy ethnl_tunnel_info_get_policy[] = {
Jakub Kicinski329d9c32020-10-05 15:07:36 -070012 [ETHTOOL_A_TUNNEL_INFO_HEADER] =
13 NLA_POLICY_NESTED(ethnl_header_policy),
Jakub Kicinskic7d759e2020-07-09 17:42:47 -070014};
15
16static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
17static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
18static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
19 ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
20
Jakub Kicinski966e5052020-07-28 14:47:58 -070021static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
22{
23 ssize_t size;
24
25 size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
26 udp_tunnel_type_names, compact);
27 if (size < 0)
28 return size;
29
30 return size +
31 nla_total_size(0) + /* _UDP_TABLE */
32 nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
33}
34
Jakub Kicinskic7d759e2020-07-09 17:42:47 -070035static ssize_t
36ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
37 struct netlink_ext_ack *extack)
38{
39 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
40 const struct udp_tunnel_nic_info *info;
41 unsigned int i;
Jakub Kicinski966e5052020-07-28 14:47:58 -070042 ssize_t ret;
Jakub Kicinskic7d759e2020-07-09 17:42:47 -070043 size_t size;
Jakub Kicinskic7d759e2020-07-09 17:42:47 -070044
45 info = req_base->dev->udp_tunnel_nic_info;
46 if (!info) {
47 NL_SET_ERR_MSG(extack,
48 "device does not report tunnel offload info");
49 return -EOPNOTSUPP;
50 }
51
52 size = nla_total_size(0); /* _INFO_UDP_PORTS */
53
54 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
55 if (!info->tables[i].n_entries)
Jakub Kicinski966e5052020-07-28 14:47:58 -070056 break;
Jakub Kicinskic7d759e2020-07-09 17:42:47 -070057
Jakub Kicinski966e5052020-07-28 14:47:58 -070058 ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types,
59 compact);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -070060 if (ret < 0)
61 return ret;
62 size += ret;
63
64 size += udp_tunnel_nic_dump_size(req_base->dev, i);
65 }
66
Jakub Kicinski966e5052020-07-28 14:47:58 -070067 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
68 ret = ethnl_udp_table_reply_size(0, compact);
69 if (ret < 0)
70 return ret;
71 size += ret;
72
73 size += nla_total_size(0) + /* _TABLE_ENTRY */
74 nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
75 nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */
76 }
77
Jakub Kicinskic7d759e2020-07-09 17:42:47 -070078 return size;
79}
80
81static int
82ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
83 struct sk_buff *skb)
84{
85 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
86 const struct udp_tunnel_nic_info *info;
Jakub Kicinski966e5052020-07-28 14:47:58 -070087 struct nlattr *ports, *table, *entry;
Jakub Kicinskic7d759e2020-07-09 17:42:47 -070088 unsigned int i;
89
90 info = req_base->dev->udp_tunnel_nic_info;
91 if (!info)
92 return -EOPNOTSUPP;
93
94 ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
95 if (!ports)
96 return -EMSGSIZE;
97
98 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
99 if (!info->tables[i].n_entries)
100 break;
101
102 table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
103 if (!table)
104 goto err_cancel_ports;
105
106 if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
107 info->tables[i].n_entries))
108 goto err_cancel_table;
109
110 if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
111 &info->tables[i].tunnel_types, NULL,
112 __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
113 udp_tunnel_type_names, compact))
114 goto err_cancel_table;
115
116 if (udp_tunnel_nic_dump_write(req_base->dev, i, skb))
117 goto err_cancel_table;
118
119 nla_nest_end(skb, table);
120 }
121
Jakub Kicinski966e5052020-07-28 14:47:58 -0700122 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
123 u32 zero = 0;
124
125 table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
126 if (!table)
127 goto err_cancel_ports;
128
129 if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1))
130 goto err_cancel_table;
131
132 if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
133 &zero, NULL,
134 __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
135 udp_tunnel_type_names, compact))
136 goto err_cancel_table;
137
138 entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
139
140 if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
141 htons(IANA_VXLAN_UDP_PORT)) ||
142 nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
143 ilog2(UDP_TUNNEL_TYPE_VXLAN)))
144 goto err_cancel_entry;
145
146 nla_nest_end(skb, entry);
147 nla_nest_end(skb, table);
148 }
149
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700150 nla_nest_end(skb, ports);
151
152 return 0;
153
Jakub Kicinski966e5052020-07-28 14:47:58 -0700154err_cancel_entry:
155 nla_nest_cancel(skb, entry);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700156err_cancel_table:
157 nla_nest_cancel(skb, table);
158err_cancel_ports:
159 nla_nest_cancel(skb, ports);
160 return -EMSGSIZE;
161}
162
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700163int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
164{
165 struct ethnl_req_info req_info = {};
Jakub Kicinski4f309742020-10-05 15:07:33 -0700166 struct nlattr **tb = info->attrs;
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700167 struct sk_buff *rskb;
168 void *reply_payload;
169 int reply_len;
170 int ret;
171
Jakub Kicinski4f309742020-10-05 15:07:33 -0700172 ret = ethnl_parse_header_dev_get(&req_info,
173 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
174 genl_info_net(info), info->extack,
175 true);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700176 if (ret < 0)
177 return ret;
178
179 rtnl_lock();
180 ret = ethnl_tunnel_info_reply_size(&req_info, info->extack);
181 if (ret < 0)
182 goto err_unlock_rtnl;
183 reply_len = ret + ethnl_reply_header_size();
184
185 rskb = ethnl_reply_init(reply_len, req_info.dev,
Michal Kubecek19a83d32020-09-17 01:04:10 +0200186 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700187 ETHTOOL_A_TUNNEL_INFO_HEADER,
188 info, &reply_payload);
189 if (!rskb) {
190 ret = -ENOMEM;
191 goto err_unlock_rtnl;
192 }
193
194 ret = ethnl_tunnel_info_fill_reply(&req_info, rskb);
195 if (ret)
196 goto err_free_msg;
197 rtnl_unlock();
Eric Dumazet34ac17e2021-12-14 00:42:30 -0800198 ethnl_parse_header_dev_put(&req_info);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700199 genlmsg_end(rskb, reply_payload);
200
201 return genlmsg_reply(rskb, info);
202
203err_free_msg:
204 nlmsg_free(rskb);
205err_unlock_rtnl:
206 rtnl_unlock();
Eric Dumazet34ac17e2021-12-14 00:42:30 -0800207 ethnl_parse_header_dev_put(&req_info);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700208 return ret;
209}
210
211struct ethnl_tunnel_info_dump_ctx {
212 struct ethnl_req_info req_info;
213 int pos_hash;
214 int pos_idx;
215};
216
217int ethnl_tunnel_info_start(struct netlink_callback *cb)
218{
Jakub Kicinski4f309742020-10-05 15:07:33 -0700219 const struct genl_dumpit_info *info = genl_dumpit_info(cb);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700220 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
Jakub Kicinski4f309742020-10-05 15:07:33 -0700221 struct nlattr **tb = info->attrs;
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700222 int ret;
223
224 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
225
226 memset(ctx, 0, sizeof(*ctx));
227
Jakub Kicinski4f309742020-10-05 15:07:33 -0700228 ret = ethnl_parse_header_dev_get(&ctx->req_info,
229 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
230 sock_net(cb->skb->sk), cb->extack,
231 false);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700232 if (ctx->req_info.dev) {
Eric Dumazet34ac17e2021-12-14 00:42:30 -0800233 ethnl_parse_header_dev_put(&ctx->req_info);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700234 ctx->req_info.dev = NULL;
235 }
236
237 return ret;
238}
239
240int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
241{
242 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
243 struct net *net = sock_net(skb->sk);
244 int s_idx = ctx->pos_idx;
245 int h, idx = 0;
246 int ret = 0;
247 void *ehdr;
248
249 rtnl_lock();
250 cb->seq = net->dev_base_seq;
251 for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
252 struct hlist_head *head;
253 struct net_device *dev;
254
255 head = &net->dev_index_head[h];
256 idx = 0;
257 hlist_for_each_entry(dev, head, index_hlist) {
258 if (idx < s_idx)
259 goto cont;
260
261 ehdr = ethnl_dump_put(skb, cb,
Michal Kubecek19a83d32020-09-17 01:04:10 +0200262 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700263 if (!ehdr) {
264 ret = -EMSGSIZE;
265 goto out;
266 }
267
268 ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER);
269 if (ret < 0) {
270 genlmsg_cancel(skb, ehdr);
271 goto out;
272 }
273
274 ctx->req_info.dev = dev;
275 ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
276 ctx->req_info.dev = NULL;
277 if (ret < 0) {
278 genlmsg_cancel(skb, ehdr);
279 if (ret == -EOPNOTSUPP)
280 goto cont;
281 goto out;
282 }
283 genlmsg_end(skb, ehdr);
284cont:
285 idx++;
286 }
287 }
288out:
289 rtnl_unlock();
290
291 ctx->pos_hash = h;
292 ctx->pos_idx = idx;
293 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
294
295 if (ret == -EMSGSIZE && skb->len)
296 return skb->len;
297 return ret;
298}