Michal Kubecek | e4a1717 | 2020-03-12 21:08:23 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | |
| 3 | #include "netlink.h" |
| 4 | #include "common.h" |
| 5 | |
| 6 | struct rings_req_info { |
| 7 | struct ethnl_req_info base; |
| 8 | }; |
| 9 | |
| 10 | struct rings_reply_data { |
| 11 | struct ethnl_reply_data base; |
| 12 | struct ethtool_ringparam ringparam; |
| 13 | }; |
| 14 | |
| 15 | #define RINGS_REPDATA(__reply_base) \ |
| 16 | container_of(__reply_base, struct rings_reply_data, base) |
| 17 | |
| 18 | static const struct nla_policy |
| 19 | rings_get_policy[ETHTOOL_A_RINGS_MAX + 1] = { |
| 20 | [ETHTOOL_A_RINGS_UNSPEC] = { .type = NLA_REJECT }, |
| 21 | [ETHTOOL_A_RINGS_HEADER] = { .type = NLA_NESTED }, |
| 22 | [ETHTOOL_A_RINGS_RX_MAX] = { .type = NLA_REJECT }, |
| 23 | [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .type = NLA_REJECT }, |
| 24 | [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .type = NLA_REJECT }, |
| 25 | [ETHTOOL_A_RINGS_TX_MAX] = { .type = NLA_REJECT }, |
| 26 | [ETHTOOL_A_RINGS_RX] = { .type = NLA_REJECT }, |
| 27 | [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_REJECT }, |
| 28 | [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_REJECT }, |
| 29 | [ETHTOOL_A_RINGS_TX] = { .type = NLA_REJECT }, |
| 30 | }; |
| 31 | |
| 32 | static int rings_prepare_data(const struct ethnl_req_info *req_base, |
| 33 | struct ethnl_reply_data *reply_base, |
| 34 | struct genl_info *info) |
| 35 | { |
| 36 | struct rings_reply_data *data = RINGS_REPDATA(reply_base); |
| 37 | struct net_device *dev = reply_base->dev; |
| 38 | int ret; |
| 39 | |
| 40 | if (!dev->ethtool_ops->get_ringparam) |
| 41 | return -EOPNOTSUPP; |
| 42 | ret = ethnl_ops_begin(dev); |
| 43 | if (ret < 0) |
| 44 | return ret; |
| 45 | dev->ethtool_ops->get_ringparam(dev, &data->ringparam); |
| 46 | ethnl_ops_complete(dev); |
| 47 | |
| 48 | return 0; |
| 49 | } |
| 50 | |
| 51 | static int rings_reply_size(const struct ethnl_req_info *req_base, |
| 52 | const struct ethnl_reply_data *reply_base) |
| 53 | { |
| 54 | return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */ |
| 55 | nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ |
| 56 | nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ |
| 57 | nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */ |
| 58 | nla_total_size(sizeof(u32)) + /* _RINGS_RX */ |
| 59 | nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ |
| 60 | nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ |
| 61 | nla_total_size(sizeof(u32)); /* _RINGS_TX */ |
| 62 | } |
| 63 | |
| 64 | static int rings_fill_reply(struct sk_buff *skb, |
| 65 | const struct ethnl_req_info *req_base, |
| 66 | const struct ethnl_reply_data *reply_base) |
| 67 | { |
| 68 | const struct rings_reply_data *data = RINGS_REPDATA(reply_base); |
| 69 | const struct ethtool_ringparam *ringparam = &data->ringparam; |
| 70 | |
| 71 | if ((ringparam->rx_max_pending && |
| 72 | (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, |
| 73 | ringparam->rx_max_pending) || |
| 74 | nla_put_u32(skb, ETHTOOL_A_RINGS_RX, |
| 75 | ringparam->rx_pending))) || |
| 76 | (ringparam->rx_mini_max_pending && |
| 77 | (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX, |
| 78 | ringparam->rx_mini_max_pending) || |
| 79 | nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI, |
| 80 | ringparam->rx_mini_pending))) || |
| 81 | (ringparam->rx_jumbo_max_pending && |
| 82 | (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX, |
| 83 | ringparam->rx_jumbo_max_pending) || |
| 84 | nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO, |
| 85 | ringparam->rx_jumbo_pending))) || |
| 86 | (ringparam->tx_max_pending && |
| 87 | (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, |
| 88 | ringparam->tx_max_pending) || |
| 89 | nla_put_u32(skb, ETHTOOL_A_RINGS_TX, |
| 90 | ringparam->tx_pending)))) |
| 91 | return -EMSGSIZE; |
| 92 | |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | const struct ethnl_request_ops ethnl_rings_request_ops = { |
| 97 | .request_cmd = ETHTOOL_MSG_RINGS_GET, |
| 98 | .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, |
| 99 | .hdr_attr = ETHTOOL_A_RINGS_HEADER, |
| 100 | .max_attr = ETHTOOL_A_RINGS_MAX, |
| 101 | .req_info_size = sizeof(struct rings_req_info), |
| 102 | .reply_data_size = sizeof(struct rings_reply_data), |
| 103 | .request_policy = rings_get_policy, |
| 104 | |
| 105 | .prepare_data = rings_prepare_data, |
| 106 | .reply_size = rings_reply_size, |
| 107 | .fill_reply = rings_fill_reply, |
| 108 | }; |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 109 | |
| 110 | /* RINGS_SET */ |
| 111 | |
| 112 | static const struct nla_policy |
| 113 | rings_set_policy[ETHTOOL_A_RINGS_MAX + 1] = { |
| 114 | [ETHTOOL_A_RINGS_UNSPEC] = { .type = NLA_REJECT }, |
| 115 | [ETHTOOL_A_RINGS_HEADER] = { .type = NLA_NESTED }, |
| 116 | [ETHTOOL_A_RINGS_RX_MAX] = { .type = NLA_REJECT }, |
| 117 | [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .type = NLA_REJECT }, |
| 118 | [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .type = NLA_REJECT }, |
| 119 | [ETHTOOL_A_RINGS_TX_MAX] = { .type = NLA_REJECT }, |
| 120 | [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, |
| 121 | [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, |
| 122 | [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, |
| 123 | [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, |
| 124 | }; |
| 125 | |
| 126 | int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) |
| 127 | { |
| 128 | struct nlattr *tb[ETHTOOL_A_RINGS_MAX + 1]; |
| 129 | struct ethtool_ringparam ringparam = {}; |
| 130 | struct ethnl_req_info req_info = {}; |
| 131 | const struct nlattr *err_attr; |
| 132 | const struct ethtool_ops *ops; |
| 133 | struct net_device *dev; |
| 134 | bool mod = false; |
| 135 | int ret; |
| 136 | |
| 137 | ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb, |
| 138 | ETHTOOL_A_RINGS_MAX, rings_set_policy, |
| 139 | info->extack); |
| 140 | if (ret < 0) |
| 141 | return ret; |
| 142 | ret = ethnl_parse_header_dev_get(&req_info, |
| 143 | tb[ETHTOOL_A_RINGS_HEADER], |
| 144 | genl_info_net(info), info->extack, |
| 145 | true); |
| 146 | if (ret < 0) |
| 147 | return ret; |
| 148 | dev = req_info.dev; |
| 149 | ops = dev->ethtool_ops; |
| 150 | ret = -EOPNOTSUPP; |
| 151 | if (!ops->get_ringparam || !ops->set_ringparam) |
| 152 | goto out_dev; |
| 153 | |
| 154 | rtnl_lock(); |
| 155 | ret = ethnl_ops_begin(dev); |
| 156 | if (ret < 0) |
| 157 | goto out_rtnl; |
| 158 | ops->get_ringparam(dev, &ringparam); |
| 159 | |
| 160 | ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); |
| 161 | ethnl_update_u32(&ringparam.rx_mini_pending, |
| 162 | tb[ETHTOOL_A_RINGS_RX_MINI], &mod); |
| 163 | ethnl_update_u32(&ringparam.rx_jumbo_pending, |
| 164 | tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); |
| 165 | ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); |
| 166 | ret = 0; |
| 167 | if (!mod) |
| 168 | goto out_ops; |
| 169 | |
| 170 | /* ensure new ring parameters are within limits */ |
| 171 | if (ringparam.rx_pending > ringparam.rx_max_pending) |
| 172 | err_attr = tb[ETHTOOL_A_RINGS_RX]; |
| 173 | else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) |
| 174 | err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; |
| 175 | else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) |
| 176 | err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; |
| 177 | else if (ringparam.tx_pending > ringparam.tx_max_pending) |
| 178 | err_attr = tb[ETHTOOL_A_RINGS_TX]; |
| 179 | else |
| 180 | err_attr = NULL; |
| 181 | if (err_attr) { |
| 182 | ret = -EINVAL; |
| 183 | NL_SET_ERR_MSG_ATTR(info->extack, err_attr, |
Colin Ian King | 5ec82c4 | 2020-03-13 11:25:34 +0000 | [diff] [blame] | 184 | "requested ring size exceeds maximum"); |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 185 | goto out_ops; |
| 186 | } |
| 187 | |
| 188 | ret = dev->ethtool_ops->set_ringparam(dev, &ringparam); |
Michal Kubecek | bc9d1c9 | 2020-03-12 21:08:33 +0100 | [diff] [blame] | 189 | if (ret < 0) |
| 190 | goto out_ops; |
| 191 | ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); |
Michal Kubecek | 2fc2929 | 2020-03-12 21:08:28 +0100 | [diff] [blame] | 192 | |
| 193 | out_ops: |
| 194 | ethnl_ops_complete(dev); |
| 195 | out_rtnl: |
| 196 | rtnl_unlock(); |
| 197 | out_dev: |
| 198 | dev_put(dev); |
| 199 | return ret; |
| 200 | } |