Michal Kubecek | e4a1717 | 2020-03-12 21:08:23 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | |
| 3 | #include "netlink.h" |
| 4 | #include "common.h" |
| 5 | |
| 6 | struct rings_req_info { |
| 7 | struct ethnl_req_info base; |
| 8 | }; |
| 9 | |
| 10 | struct rings_reply_data { |
| 11 | struct ethnl_reply_data base; |
| 12 | struct ethtool_ringparam ringparam; |
| 13 | }; |
| 14 | |
| 15 | #define RINGS_REPDATA(__reply_base) \ |
| 16 | container_of(__reply_base, struct rings_reply_data, base) |
| 17 | |
Jakub Kicinski | ff419af | 2020-10-05 15:07:35 -0700 | [diff] [blame] | 18 | const struct nla_policy ethnl_rings_get_policy[] = { |
Jakub Kicinski | 329d9c3 | 2020-10-05 15:07:36 -0700 | [diff] [blame] | 19 | [ETHTOOL_A_RINGS_HEADER] = |
| 20 | NLA_POLICY_NESTED(ethnl_header_policy), |
Michal Kubecek | e4a1717 | 2020-03-12 21:08:23 +0100 | [diff] [blame] | 21 | }; |
| 22 | |
| 23 | static int rings_prepare_data(const struct ethnl_req_info *req_base, |
| 24 | struct ethnl_reply_data *reply_base, |
| 25 | struct genl_info *info) |
| 26 | { |
| 27 | struct rings_reply_data *data = RINGS_REPDATA(reply_base); |
| 28 | struct net_device *dev = reply_base->dev; |
| 29 | int ret; |
| 30 | |
| 31 | if (!dev->ethtool_ops->get_ringparam) |
| 32 | return -EOPNOTSUPP; |
| 33 | ret = ethnl_ops_begin(dev); |
| 34 | if (ret < 0) |
| 35 | return ret; |
| 36 | dev->ethtool_ops->get_ringparam(dev, &data->ringparam); |
| 37 | ethnl_ops_complete(dev); |
| 38 | |
| 39 | return 0; |
| 40 | } |
| 41 | |
| 42 | static int rings_reply_size(const struct ethnl_req_info *req_base, |
| 43 | const struct ethnl_reply_data *reply_base) |
| 44 | { |
| 45 | return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */ |
| 46 | nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ |
| 47 | nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ |
| 48 | nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */ |
| 49 | nla_total_size(sizeof(u32)) + /* _RINGS_RX */ |
| 50 | nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ |
| 51 | nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ |
| 52 | nla_total_size(sizeof(u32)); /* _RINGS_TX */ |
| 53 | } |
| 54 | |
| 55 | static int rings_fill_reply(struct sk_buff *skb, |
| 56 | const struct ethnl_req_info *req_base, |
| 57 | const struct ethnl_reply_data *reply_base) |
| 58 | { |
| 59 | const struct rings_reply_data *data = RINGS_REPDATA(reply_base); |
| 60 | const struct ethtool_ringparam *ringparam = &data->ringparam; |
| 61 | |
| 62 | if ((ringparam->rx_max_pending && |
| 63 | (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, |
| 64 | ringparam->rx_max_pending) || |
| 65 | nla_put_u32(skb, ETHTOOL_A_RINGS_RX, |
| 66 | ringparam->rx_pending))) || |
| 67 | (ringparam->rx_mini_max_pending && |
| 68 | (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX, |
| 69 | ringparam->rx_mini_max_pending) || |
| 70 | nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI, |
| 71 | ringparam->rx_mini_pending))) || |
| 72 | (ringparam->rx_jumbo_max_pending && |
| 73 | (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX, |
| 74 | ringparam->rx_jumbo_max_pending) || |
| 75 | nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO, |
| 76 | ringparam->rx_jumbo_pending))) || |
| 77 | (ringparam->tx_max_pending && |
| 78 | (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, |
| 79 | ringparam->tx_max_pending) || |
| 80 | nla_put_u32(skb, ETHTOOL_A_RINGS_TX, |
| 81 | ringparam->tx_pending)))) |
| 82 | return -EMSGSIZE; |
| 83 | |
| 84 | return 0; |
| 85 | } |
| 86 | |
| 87 | const struct ethnl_request_ops ethnl_rings_request_ops = { |
| 88 | .request_cmd = ETHTOOL_MSG_RINGS_GET, |
| 89 | .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, |
| 90 | .hdr_attr = ETHTOOL_A_RINGS_HEADER, |
Michal Kubecek | e4a1717 | 2020-03-12 21:08:23 +0100 | [diff] [blame] | 91 | .req_info_size = sizeof(struct rings_req_info), |
| 92 | .reply_data_size = sizeof(struct rings_reply_data), |
Michal Kubecek | e4a1717 | 2020-03-12 21:08:23 +0100 | [diff] [blame] | 93 | |
| 94 | .prepare_data = rings_prepare_data, |
| 95 | .reply_size = rings_reply_size, |
| 96 | .fill_reply = rings_fill_reply, |
| 97 | }; |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 98 | |
| 99 | /* RINGS_SET */ |
| 100 | |
Jakub Kicinski | ff419af | 2020-10-05 15:07:35 -0700 | [diff] [blame] | 101 | const struct nla_policy ethnl_rings_set_policy[] = { |
Jakub Kicinski | 329d9c3 | 2020-10-05 15:07:36 -0700 | [diff] [blame] | 102 | [ETHTOOL_A_RINGS_HEADER] = |
| 103 | NLA_POLICY_NESTED(ethnl_header_policy), |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 104 | [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, |
| 105 | [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, |
| 106 | [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, |
| 107 | [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, |
| 108 | }; |
| 109 | |
| 110 | int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) |
| 111 | { |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 112 | struct ethtool_ringparam ringparam = {}; |
| 113 | struct ethnl_req_info req_info = {}; |
Jakub Kicinski | 5028588b | 2020-10-05 15:07:34 -0700 | [diff] [blame] | 114 | struct nlattr **tb = info->attrs; |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 115 | const struct nlattr *err_attr; |
| 116 | const struct ethtool_ops *ops; |
| 117 | struct net_device *dev; |
| 118 | bool mod = false; |
| 119 | int ret; |
| 120 | |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 121 | ret = ethnl_parse_header_dev_get(&req_info, |
| 122 | tb[ETHTOOL_A_RINGS_HEADER], |
| 123 | genl_info_net(info), info->extack, |
| 124 | true); |
| 125 | if (ret < 0) |
| 126 | return ret; |
| 127 | dev = req_info.dev; |
| 128 | ops = dev->ethtool_ops; |
| 129 | ret = -EOPNOTSUPP; |
| 130 | if (!ops->get_ringparam || !ops->set_ringparam) |
| 131 | goto out_dev; |
| 132 | |
| 133 | rtnl_lock(); |
| 134 | ret = ethnl_ops_begin(dev); |
| 135 | if (ret < 0) |
| 136 | goto out_rtnl; |
| 137 | ops->get_ringparam(dev, &ringparam); |
| 138 | |
| 139 | ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); |
| 140 | ethnl_update_u32(&ringparam.rx_mini_pending, |
| 141 | tb[ETHTOOL_A_RINGS_RX_MINI], &mod); |
| 142 | ethnl_update_u32(&ringparam.rx_jumbo_pending, |
| 143 | tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); |
| 144 | ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); |
| 145 | ret = 0; |
| 146 | if (!mod) |
| 147 | goto out_ops; |
| 148 | |
| 149 | /* ensure new ring parameters are within limits */ |
| 150 | if (ringparam.rx_pending > ringparam.rx_max_pending) |
| 151 | err_attr = tb[ETHTOOL_A_RINGS_RX]; |
| 152 | else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) |
| 153 | err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; |
| 154 | else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) |
| 155 | err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; |
| 156 | else if (ringparam.tx_pending > ringparam.tx_max_pending) |
| 157 | err_attr = tb[ETHTOOL_A_RINGS_TX]; |
| 158 | else |
| 159 | err_attr = NULL; |
| 160 | if (err_attr) { |
| 161 | ret = -EINVAL; |
| 162 | NL_SET_ERR_MSG_ATTR(info->extack, err_attr, |
Colin Ian King | 5ec82c4 | 2020-03-13 11:25:34 +0000 | [diff] [blame] | 163 | "requested ring size exceeds maximum"); |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 164 | goto out_ops; |
| 165 | } |
| 166 | |
| 167 | ret = dev->ethtool_ops->set_ringparam(dev, &ringparam); |
Michal Kubecek | bc9d1c9 | 2020-03-12 21:08:33 +0100 | [diff] [blame] | 168 | if (ret < 0) |
| 169 | goto out_ops; |
| 170 | ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 171 | |
| 172 | out_ops: |
| 173 | ethnl_ops_complete(dev); |
| 174 | out_rtnl: |
| 175 | rtnl_unlock(); |
| 176 | out_dev: |
| 177 | dev_put(dev); |
| 178 | return ret; |
| 179 | } |