blob: 6a070dc8e4b0dba90656d92f742320200c6bdfe3 [file] [log] [blame]
Michal Kubecek0c849792020-03-12 21:08:38 +01001// SPDX-License-Identifier: GPL-2.0-only
2
Magnus Karlssona71506a2020-05-20 21:20:51 +02003#include <net/xdp_sock_drv.h>
Michal Kubeceke19c5912020-03-12 21:08:43 +01004
Michal Kubecek0c849792020-03-12 21:08:38 +01005#include "netlink.h"
6#include "common.h"
7
8struct channels_req_info {
9 struct ethnl_req_info base;
10};
11
12struct channels_reply_data {
13 struct ethnl_reply_data base;
14 struct ethtool_channels channels;
15};
16
17#define CHANNELS_REPDATA(__reply_base) \
18 container_of(__reply_base, struct channels_reply_data, base)
19
Jakub Kicinskiff419af2020-10-05 15:07:35 -070020const struct nla_policy ethnl_channels_get_policy[] = {
Jakub Kicinski329d9c32020-10-05 15:07:36 -070021 [ETHTOOL_A_CHANNELS_HEADER] =
22 NLA_POLICY_NESTED(ethnl_header_policy),
Michal Kubecek0c849792020-03-12 21:08:38 +010023};
24
25static int channels_prepare_data(const struct ethnl_req_info *req_base,
26 struct ethnl_reply_data *reply_base,
27 struct genl_info *info)
28{
29 struct channels_reply_data *data = CHANNELS_REPDATA(reply_base);
30 struct net_device *dev = reply_base->dev;
31 int ret;
32
33 if (!dev->ethtool_ops->get_channels)
34 return -EOPNOTSUPP;
35 ret = ethnl_ops_begin(dev);
36 if (ret < 0)
37 return ret;
38 dev->ethtool_ops->get_channels(dev, &data->channels);
39 ethnl_ops_complete(dev);
40
41 return 0;
42}
43
44static int channels_reply_size(const struct ethnl_req_info *req_base,
45 const struct ethnl_reply_data *reply_base)
46{
47 return nla_total_size(sizeof(u32)) + /* _CHANNELS_RX_MAX */
48 nla_total_size(sizeof(u32)) + /* _CHANNELS_TX_MAX */
49 nla_total_size(sizeof(u32)) + /* _CHANNELS_OTHER_MAX */
50 nla_total_size(sizeof(u32)) + /* _CHANNELS_COMBINED_MAX */
51 nla_total_size(sizeof(u32)) + /* _CHANNELS_RX_COUNT */
52 nla_total_size(sizeof(u32)) + /* _CHANNELS_TX_COUNT */
53 nla_total_size(sizeof(u32)) + /* _CHANNELS_OTHER_COUNT */
54 nla_total_size(sizeof(u32)); /* _CHANNELS_COMBINED_COUNT */
55}
56
57static int channels_fill_reply(struct sk_buff *skb,
58 const struct ethnl_req_info *req_base,
59 const struct ethnl_reply_data *reply_base)
60{
61 const struct channels_reply_data *data = CHANNELS_REPDATA(reply_base);
62 const struct ethtool_channels *channels = &data->channels;
63
64 if ((channels->max_rx &&
65 (nla_put_u32(skb, ETHTOOL_A_CHANNELS_RX_MAX,
66 channels->max_rx) ||
67 nla_put_u32(skb, ETHTOOL_A_CHANNELS_RX_COUNT,
68 channels->rx_count))) ||
69 (channels->max_tx &&
70 (nla_put_u32(skb, ETHTOOL_A_CHANNELS_TX_MAX,
71 channels->max_tx) ||
72 nla_put_u32(skb, ETHTOOL_A_CHANNELS_TX_COUNT,
73 channels->tx_count))) ||
74 (channels->max_other &&
75 (nla_put_u32(skb, ETHTOOL_A_CHANNELS_OTHER_MAX,
76 channels->max_other) ||
77 nla_put_u32(skb, ETHTOOL_A_CHANNELS_OTHER_COUNT,
78 channels->other_count))) ||
79 (channels->max_combined &&
80 (nla_put_u32(skb, ETHTOOL_A_CHANNELS_COMBINED_MAX,
81 channels->max_combined) ||
82 nla_put_u32(skb, ETHTOOL_A_CHANNELS_COMBINED_COUNT,
83 channels->combined_count))))
84 return -EMSGSIZE;
85
86 return 0;
87}
88
89const struct ethnl_request_ops ethnl_channels_request_ops = {
90 .request_cmd = ETHTOOL_MSG_CHANNELS_GET,
91 .reply_cmd = ETHTOOL_MSG_CHANNELS_GET_REPLY,
92 .hdr_attr = ETHTOOL_A_CHANNELS_HEADER,
Michal Kubecek0c849792020-03-12 21:08:38 +010093 .req_info_size = sizeof(struct channels_req_info),
94 .reply_data_size = sizeof(struct channels_reply_data),
Michal Kubecek0c849792020-03-12 21:08:38 +010095
96 .prepare_data = channels_prepare_data,
97 .reply_size = channels_reply_size,
98 .fill_reply = channels_fill_reply,
99};
Michal Kubeceke19c5912020-03-12 21:08:43 +0100100
101/* CHANNELS_SET */
102
Jakub Kicinskiff419af2020-10-05 15:07:35 -0700103const struct nla_policy ethnl_channels_set_policy[] = {
Jakub Kicinski329d9c32020-10-05 15:07:36 -0700104 [ETHTOOL_A_CHANNELS_HEADER] =
105 NLA_POLICY_NESTED(ethnl_header_policy),
Michal Kubeceke19c5912020-03-12 21:08:43 +0100106 [ETHTOOL_A_CHANNELS_RX_COUNT] = { .type = NLA_U32 },
107 [ETHTOOL_A_CHANNELS_TX_COUNT] = { .type = NLA_U32 },
108 [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .type = NLA_U32 },
109 [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .type = NLA_U32 },
110};
111
112int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
113{
Michal Kubeceke19c5912020-03-12 21:08:43 +0100114 unsigned int from_channel, old_total, i;
Jakub Kicinski7be92512020-05-15 12:49:00 -0700115 bool mod = false, mod_combined = false;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100116 struct ethtool_channels channels = {};
117 struct ethnl_req_info req_info = {};
Jakub Kicinski5028588b2020-10-05 15:07:34 -0700118 struct nlattr **tb = info->attrs;
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100119 u32 err_attr, max_rx_in_use = 0;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100120 const struct ethtool_ops *ops;
121 struct net_device *dev;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100122 int ret;
123
Michal Kubeceke19c5912020-03-12 21:08:43 +0100124 ret = ethnl_parse_header_dev_get(&req_info,
125 tb[ETHTOOL_A_CHANNELS_HEADER],
126 genl_info_net(info), info->extack,
127 true);
128 if (ret < 0)
129 return ret;
130 dev = req_info.dev;
131 ops = dev->ethtool_ops;
132 ret = -EOPNOTSUPP;
133 if (!ops->get_channels || !ops->set_channels)
134 goto out_dev;
135
136 rtnl_lock();
137 ret = ethnl_ops_begin(dev);
138 if (ret < 0)
139 goto out_rtnl;
140 ops->get_channels(dev, &channels);
141 old_total = channels.combined_count +
142 max(channels.rx_count, channels.tx_count);
143
144 ethnl_update_u32(&channels.rx_count, tb[ETHTOOL_A_CHANNELS_RX_COUNT],
145 &mod);
146 ethnl_update_u32(&channels.tx_count, tb[ETHTOOL_A_CHANNELS_TX_COUNT],
147 &mod);
148 ethnl_update_u32(&channels.other_count,
149 tb[ETHTOOL_A_CHANNELS_OTHER_COUNT], &mod);
150 ethnl_update_u32(&channels.combined_count,
Jakub Kicinski7be92512020-05-15 12:49:00 -0700151 tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT], &mod_combined);
152 mod |= mod_combined;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100153 ret = 0;
154 if (!mod)
155 goto out_ops;
156
157 /* ensure new channel counts are within limits */
158 if (channels.rx_count > channels.max_rx)
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100159 err_attr = ETHTOOL_A_CHANNELS_RX_COUNT;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100160 else if (channels.tx_count > channels.max_tx)
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100161 err_attr = ETHTOOL_A_CHANNELS_TX_COUNT;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100162 else if (channels.other_count > channels.max_other)
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100163 err_attr = ETHTOOL_A_CHANNELS_OTHER_COUNT;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100164 else if (channels.combined_count > channels.max_combined)
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100165 err_attr = ETHTOOL_A_CHANNELS_COMBINED_COUNT;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100166 else
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100167 err_attr = 0;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100168 if (err_attr) {
169 ret = -EINVAL;
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100170 NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr],
Colin Ian King5ec82c42020-03-13 11:25:34 +0000171 "requested channel count exceeds maximum");
Michal Kubeceke19c5912020-03-12 21:08:43 +0100172 goto out_ops;
173 }
174
Jakub Kicinski7be92512020-05-15 12:49:00 -0700175 /* ensure there is at least one RX and one TX channel */
176 if (!channels.combined_count && !channels.rx_count)
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100177 err_attr = ETHTOOL_A_CHANNELS_RX_COUNT;
Jakub Kicinski7be92512020-05-15 12:49:00 -0700178 else if (!channels.combined_count && !channels.tx_count)
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100179 err_attr = ETHTOOL_A_CHANNELS_TX_COUNT;
Jakub Kicinski7be92512020-05-15 12:49:00 -0700180 else
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100181 err_attr = 0;
Jakub Kicinski7be92512020-05-15 12:49:00 -0700182 if (err_attr) {
183 if (mod_combined)
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100184 err_attr = ETHTOOL_A_CHANNELS_COMBINED_COUNT;
Jakub Kicinski7be92512020-05-15 12:49:00 -0700185 ret = -EINVAL;
Yinjun Zhanga4fc0882021-02-25 13:51:02 +0100186 NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr],
187 "requested channel counts would result in no RX or TX channel being configured");
Jakub Kicinski7be92512020-05-15 12:49:00 -0700188 goto out_ops;
189 }
190
Michal Kubeceke19c5912020-03-12 21:08:43 +0100191 /* ensure the new Rx count fits within the configured Rx flow
192 * indirection table settings
193 */
194 if (netif_is_rxfh_configured(dev) &&
195 !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
196 (channels.combined_count + channels.rx_count) <= max_rx_in_use) {
Ivan Veceraef72cd32020-12-15 10:08:10 +0100197 ret = -EINVAL;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100198 GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
Ivan Veceraef72cd32020-12-15 10:08:10 +0100199 goto out_ops;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100200 }
201
202 /* Disabling channels, query zero-copy AF_XDP sockets */
203 from_channel = channels.combined_count +
204 min(channels.rx_count, channels.tx_count);
205 for (i = from_channel; i < old_total; i++)
Magnus Karlssonc4655762020-08-28 10:26:16 +0200206 if (xsk_get_pool_from_qid(dev, i)) {
Ivan Veceraef72cd32020-12-15 10:08:10 +0100207 ret = -EINVAL;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100208 GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
Ivan Veceraef72cd32020-12-15 10:08:10 +0100209 goto out_ops;
Michal Kubeceke19c5912020-03-12 21:08:43 +0100210 }
211
212 ret = dev->ethtool_ops->set_channels(dev, &channels);
Michal Kubecek546379b2020-03-12 21:08:48 +0100213 if (ret < 0)
214 goto out_ops;
215 ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL);
Michal Kubeceke19c5912020-03-12 21:08:43 +0100216
217out_ops:
218 ethnl_ops_complete(dev);
219out_rtnl:
220 rtnl_unlock();
221out_dev:
222 dev_put(dev);
223 return ret;
224}