blob: e353b822bb1574e177438023e3139dc01d1d93af [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net-sysfs.c - network device class and attributes
4 *
5 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Randy Dunlap4fc268d2006-01-11 12:17:47 -08008#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/netdevice.h>
11#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010013#include <linux/sched/signal.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070014#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <net/sock.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070016#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/rtnetlink.h>
Tom Herbertfec5e652010-04-16 16:01:27 -070018#include <linux/vmalloc.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040019#include <linux/export.h>
Tom Herbert114cf582011-11-28 16:33:09 +000020#include <linux/jiffies.h>
Ming Lei9802c8e2013-02-22 16:34:16 -080021#include <linux/pm_runtime.h>
Florian Fainelliaa836df2015-03-09 14:31:20 -070022#include <linux/of.h>
Ben Dooks88832a22016-06-07 19:27:51 +010023#include <linux/of_net.h>
Andrei Vagin4d99f662018-08-08 20:07:35 -070024#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Pavel Emelyanov342709e2007-10-23 21:14:45 -070026#include "net-sysfs.h"
27
Eric W. Biederman8b41d182007-09-26 22:02:53 -070028#ifdef CONFIG_SYSFS
Linus Torvalds1da177e2005-04-16 15:20:36 -070029static const char fmt_hex[] = "%#x\n";
30static const char fmt_dec[] = "%d\n";
31static const char fmt_ulong[] = "%lu\n";
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +000032static const char fmt_u64[] = "%llu\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090034static inline int dev_isalive(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035{
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -070036 return dev->reg_state <= NETREG_REGISTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037}
38
39/* use same locking rules as GIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070040static ssize_t netdev_show(const struct device *dev,
41 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 ssize_t (*format)(const struct net_device *, char *))
43{
WANG Cong6b53daf2014-07-23 16:09:10 -070044 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 ssize_t ret = -EINVAL;
46
47 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -070048 if (dev_isalive(ndev))
49 ret = (*format)(ndev, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 read_unlock(&dev_base_lock);
51
52 return ret;
53}
54
55/* generate a show function for simple field */
56#define NETDEVICE_SHOW(field, format_string) \
WANG Cong6b53daf2014-07-23 16:09:10 -070057static ssize_t format_##field(const struct net_device *dev, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{ \
WANG Cong6b53daf2014-07-23 16:09:10 -070059 return sprintf(buf, format_string, dev->field); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070060} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070061static ssize_t field##_show(struct device *dev, \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070062 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070064 return netdev_show(dev, attr, buf, format_##field); \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070065} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070067#define NETDEVICE_SHOW_RO(field, format_string) \
68NETDEVICE_SHOW(field, format_string); \
69static DEVICE_ATTR_RO(field)
70
71#define NETDEVICE_SHOW_RW(field, format_string) \
72NETDEVICE_SHOW(field, format_string); \
73static DEVICE_ATTR_RW(field)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* use same locking and permission rules as SIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070076static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 const char *buf, size_t len,
78 int (*set)(struct net_device *, unsigned long))
79{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000080 struct net_device *netdev = to_net_dev(dev);
81 struct net *net = dev_net(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 unsigned long new;
Colin Ian King5f0224a2020-04-09 14:41:26 +010083 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000085 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 return -EPERM;
87
Shuah Khane1e420c2012-04-12 09:28:13 +000088 ret = kstrtoul(buf, 0, &new);
89 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 goto err;
91
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000092 if (!rtnl_trylock())
Eric W. Biederman336ca572009-05-13 16:57:25 +000093 return restart_syscall();
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000094
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000095 if (dev_isalive(netdev)) {
stephen hemminger6648c652017-08-18 13:46:28 -070096 ret = (*set)(netdev, new);
97 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 ret = len;
99 }
100 rtnl_unlock();
101 err:
102 return ret;
103}
104
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700105NETDEVICE_SHOW_RO(dev_id, fmt_hex);
Amir Vadai3f859442014-02-25 18:17:50 +0200106NETDEVICE_SHOW_RO(dev_port, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700107NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
108NETDEVICE_SHOW_RO(addr_len, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700109NETDEVICE_SHOW_RO(ifindex, fmt_dec);
110NETDEVICE_SHOW_RO(type, fmt_dec);
111NETDEVICE_SHOW_RO(link_mode, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200113static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
114 char *buf)
115{
116 struct net_device *ndev = to_net_dev(dev);
117
118 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
119}
120static DEVICE_ATTR_RO(iflink);
121
WANG Cong6b53daf2014-07-23 16:09:10 -0700122static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
Tom Gundersen685343f2014-07-14 16:37:22 +0200123{
WANG Cong6b53daf2014-07-23 16:09:10 -0700124 return sprintf(buf, fmt_dec, dev->name_assign_type);
Tom Gundersen685343f2014-07-14 16:37:22 +0200125}
126
127static ssize_t name_assign_type_show(struct device *dev,
128 struct device_attribute *attr,
129 char *buf)
130{
WANG Cong6b53daf2014-07-23 16:09:10 -0700131 struct net_device *ndev = to_net_dev(dev);
Tom Gundersen685343f2014-07-14 16:37:22 +0200132 ssize_t ret = -EINVAL;
133
WANG Cong6b53daf2014-07-23 16:09:10 -0700134 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
Tom Gundersen685343f2014-07-14 16:37:22 +0200135 ret = netdev_show(dev, attr, buf, format_name_assign_type);
136
137 return ret;
138}
139static DEVICE_ATTR_RO(name_assign_type);
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141/* use same locking rules as GIFHWADDR ioctl's */
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700142static ssize_t address_show(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700143 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
WANG Cong6b53daf2014-07-23 16:09:10 -0700145 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 ssize_t ret = -EINVAL;
147
148 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -0700149 if (dev_isalive(ndev))
150 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 read_unlock(&dev_base_lock);
152 return ret;
153}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700154static DEVICE_ATTR_RO(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700156static ssize_t broadcast_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
WANG Cong6b53daf2014-07-23 16:09:10 -0700159 struct net_device *ndev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700160
WANG Cong6b53daf2014-07-23 16:09:10 -0700161 if (dev_isalive(ndev))
162 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 return -EINVAL;
164}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700165static DEVICE_ATTR_RO(broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
WANG Cong6b53daf2014-07-23 16:09:10 -0700167static int change_carrier(struct net_device *dev, unsigned long new_carrier)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000168{
WANG Cong6b53daf2014-07-23 16:09:10 -0700169 if (!netif_running(dev))
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000170 return -EINVAL;
stephen hemminger6648c652017-08-18 13:46:28 -0700171 return dev_change_carrier(dev, (bool)new_carrier);
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000172}
173
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700174static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
175 const char *buf, size_t len)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000176{
177 return netdev_store(dev, attr, buf, len, change_carrier);
178}
179
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700180static ssize_t carrier_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700181 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700184
185 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
stephen hemminger6648c652017-08-18 13:46:28 -0700187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 return -EINVAL;
189}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700190static DEVICE_ATTR_RW(carrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700192static ssize_t speed_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000193 struct device_attribute *attr, char *buf)
194{
195 struct net_device *netdev = to_net_dev(dev);
196 int ret = -EINVAL;
197
198 if (!rtnl_trylock())
199 return restart_syscall();
200
David Decotigny8ae6daca2011-04-27 18:32:38 +0000201 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800202 struct ethtool_link_ksettings cmd;
203
204 if (!__ethtool_get_link_ksettings(netdev, &cmd))
205 ret = sprintf(buf, fmt_dec, cmd.base.speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000206 }
207 rtnl_unlock();
208 return ret;
209}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700210static DEVICE_ATTR_RO(speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000211
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700212static ssize_t duplex_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000213 struct device_attribute *attr, char *buf)
214{
215 struct net_device *netdev = to_net_dev(dev);
216 int ret = -EINVAL;
217
218 if (!rtnl_trylock())
219 return restart_syscall();
220
David Decotigny8ae6daca2011-04-27 18:32:38 +0000221 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800222 struct ethtool_link_ksettings cmd;
223
224 if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000225 const char *duplex;
David Decotigny7cad1ba2016-02-24 10:58:10 -0800226
227 switch (cmd.base.duplex) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000228 case DUPLEX_HALF:
229 duplex = "half";
230 break;
231 case DUPLEX_FULL:
232 duplex = "full";
233 break;
234 default:
235 duplex = "unknown";
236 break;
237 }
238 ret = sprintf(buf, "%s\n", duplex);
239 }
Andy Gospodarekd519e172009-10-02 09:26:12 +0000240 }
241 rtnl_unlock();
242 return ret;
243}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700244static DEVICE_ATTR_RO(duplex);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000245
Andrew Lunndb30a572020-04-20 00:11:51 +0200246static ssize_t testing_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
248{
249 struct net_device *netdev = to_net_dev(dev);
250
251 if (netif_running(netdev))
252 return sprintf(buf, fmt_dec, !!netif_testing(netdev));
253
254 return -EINVAL;
255}
256static DEVICE_ATTR_RO(testing);
257
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700258static ssize_t dormant_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700259 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800260{
261 struct net_device *netdev = to_net_dev(dev);
262
263 if (netif_running(netdev))
264 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
265
266 return -EINVAL;
267}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700268static DEVICE_ATTR_RO(dormant);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800269
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700270static const char *const operstates[] = {
Stefan Rompfb00055a2006-03-20 17:09:11 -0800271 "unknown",
272 "notpresent", /* currently unused */
273 "down",
274 "lowerlayerdown",
Andrew Lunndb30a572020-04-20 00:11:51 +0200275 "testing",
Stefan Rompfb00055a2006-03-20 17:09:11 -0800276 "dormant",
277 "up"
278};
279
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700280static ssize_t operstate_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700281 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800282{
283 const struct net_device *netdev = to_net_dev(dev);
284 unsigned char operstate;
285
286 read_lock(&dev_base_lock);
287 operstate = netdev->operstate;
288 if (!netif_running(netdev))
289 operstate = IF_OPER_DOWN;
290 read_unlock(&dev_base_lock);
291
Adrian Bunke3a5cd92006-04-05 22:19:47 -0700292 if (operstate >= ARRAY_SIZE(operstates))
Stefan Rompfb00055a2006-03-20 17:09:11 -0800293 return -EINVAL; /* should not happen */
294
295 return sprintf(buf, "%s\n", operstates[operstate]);
296}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700297static DEVICE_ATTR_RO(operstate);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800298
david decotigny2d3b4792014-03-29 09:48:35 -0700299static ssize_t carrier_changes_show(struct device *dev,
300 struct device_attribute *attr,
301 char *buf)
302{
303 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700304
david decotigny2d3b4792014-03-29 09:48:35 -0700305 return sprintf(buf, fmt_dec,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800306 atomic_read(&netdev->carrier_up_count) +
307 atomic_read(&netdev->carrier_down_count));
david decotigny2d3b4792014-03-29 09:48:35 -0700308}
309static DEVICE_ATTR_RO(carrier_changes);
310
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800311static ssize_t carrier_up_count_show(struct device *dev,
312 struct device_attribute *attr,
313 char *buf)
314{
315 struct net_device *netdev = to_net_dev(dev);
316
317 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
318}
319static DEVICE_ATTR_RO(carrier_up_count);
320
321static ssize_t carrier_down_count_show(struct device *dev,
322 struct device_attribute *attr,
323 char *buf)
324{
325 struct net_device *netdev = to_net_dev(dev);
326
327 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
328}
329static DEVICE_ATTR_RO(carrier_down_count);
330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331/* read-write attributes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
WANG Cong6b53daf2014-07-23 16:09:10 -0700333static int change_mtu(struct net_device *dev, unsigned long new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
stephen hemminger6648c652017-08-18 13:46:28 -0700335 return dev_set_mtu(dev, (int)new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700338static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700339 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700341 return netdev_store(dev, attr, buf, len, change_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700343NETDEVICE_SHOW_RW(mtu, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
WANG Cong6b53daf2014-07-23 16:09:10 -0700345static int change_flags(struct net_device *dev, unsigned long new_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
Petr Machata567c5e12018-12-06 17:05:42 +0000347 return dev_change_flags(dev, (unsigned int)new_flags, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700350static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700351 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700353 return netdev_store(dev, attr, buf, len, change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700355NETDEVICE_SHOW_RW(flags, fmt_hex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700357static ssize_t tx_queue_len_store(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700358 struct device_attribute *attr,
359 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000361 if (!capable(CAP_NET_ADMIN))
362 return -EPERM;
363
Cong Wang6a643dd2018-01-25 18:26:22 -0800364 return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365}
Alexey Dobriyan0cd29502017-05-17 13:30:44 +0300366NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Eric Dumazet3b47d302014-11-06 21:09:44 -0800368static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
369{
Eric Dumazet7e417a62020-04-22 09:13:28 -0700370 WRITE_ONCE(dev->gro_flush_timeout, val);
Eric Dumazet3b47d302014-11-06 21:09:44 -0800371 return 0;
372}
373
374static ssize_t gro_flush_timeout_store(struct device *dev,
stephen hemminger6648c652017-08-18 13:46:28 -0700375 struct device_attribute *attr,
376 const char *buf, size_t len)
Eric Dumazet3b47d302014-11-06 21:09:44 -0800377{
378 if (!capable(CAP_NET_ADMIN))
379 return -EPERM;
380
381 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
382}
383NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
384
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700385static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val)
386{
Eric Dumazet7e417a62020-04-22 09:13:28 -0700387 WRITE_ONCE(dev->napi_defer_hard_irqs, val);
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700388 return 0;
389}
390
391static ssize_t napi_defer_hard_irqs_store(struct device *dev,
392 struct device_attribute *attr,
393 const char *buf, size_t len)
394{
395 if (!capable(CAP_NET_ADMIN))
396 return -EPERM;
397
398 return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs);
399}
400NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec);
401
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700402static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700403 const char *buf, size_t len)
404{
405 struct net_device *netdev = to_net_dev(dev);
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000406 struct net *net = dev_net(netdev);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700407 size_t count = len;
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800408 ssize_t ret = 0;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700409
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000410 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700411 return -EPERM;
412
413 /* ignore trailing newline */
414 if (len > 0 && buf[len - 1] == '\n')
415 --count;
416
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800417 if (!rtnl_trylock())
418 return restart_syscall();
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700419
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800420 if (dev_isalive(netdev)) {
421 ret = dev_set_alias(netdev, buf, count);
422 if (ret < 0)
423 goto err;
424 ret = len;
425 netdev_state_change(netdev);
426 }
427err:
428 rtnl_unlock();
429
430 return ret;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700431}
432
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700433static ssize_t ifalias_show(struct device *dev,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700434 struct device_attribute *attr, char *buf)
435{
436 const struct net_device *netdev = to_net_dev(dev);
Florian Westphal6c557002017-10-02 23:50:05 +0200437 char tmp[IFALIASZ];
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700438 ssize_t ret = 0;
439
Florian Westphal6c557002017-10-02 23:50:05 +0200440 ret = dev_get_alias(netdev, tmp, sizeof(tmp));
441 if (ret > 0)
442 ret = sprintf(buf, "%s\n", tmp);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700443 return ret;
444}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700445static DEVICE_ATTR_RW(ifalias);
Vlad Dogarua512b922011-01-24 03:37:29 +0000446
WANG Cong6b53daf2014-07-23 16:09:10 -0700447static int change_group(struct net_device *dev, unsigned long new_group)
Vlad Dogarua512b922011-01-24 03:37:29 +0000448{
stephen hemminger6648c652017-08-18 13:46:28 -0700449 dev_set_group(dev, (int)new_group);
Vlad Dogarua512b922011-01-24 03:37:29 +0000450 return 0;
451}
452
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700453static ssize_t group_store(struct device *dev, struct device_attribute *attr,
454 const char *buf, size_t len)
Vlad Dogarua512b922011-01-24 03:37:29 +0000455{
456 return netdev_store(dev, attr, buf, len, change_group);
457}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700458NETDEVICE_SHOW(group, fmt_dec);
Joe Perchesd6444062018-03-23 15:54:38 -0700459static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
Vlad Dogarua512b922011-01-24 03:37:29 +0000460
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700461static int change_proto_down(struct net_device *dev, unsigned long proto_down)
462{
stephen hemminger6648c652017-08-18 13:46:28 -0700463 return dev_change_proto_down(dev, (bool)proto_down);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700464}
465
466static ssize_t proto_down_store(struct device *dev,
467 struct device_attribute *attr,
468 const char *buf, size_t len)
469{
470 return netdev_store(dev, attr, buf, len, change_proto_down);
471}
472NETDEVICE_SHOW_RW(proto_down, fmt_dec);
473
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700474static ssize_t phys_port_id_show(struct device *dev,
Jiri Pirkoff80e512013-07-29 18:16:51 +0200475 struct device_attribute *attr, char *buf)
476{
477 struct net_device *netdev = to_net_dev(dev);
478 ssize_t ret = -EINVAL;
479
480 if (!rtnl_trylock())
481 return restart_syscall();
482
483 if (dev_isalive(netdev)) {
Jiri Pirko02637fc2014-11-28 14:34:16 +0100484 struct netdev_phys_item_id ppid;
Jiri Pirkoff80e512013-07-29 18:16:51 +0200485
486 ret = dev_get_phys_port_id(netdev, &ppid);
487 if (!ret)
488 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
489 }
490 rtnl_unlock();
491
492 return ret;
493}
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700494static DEVICE_ATTR_RO(phys_port_id);
Jiri Pirkoff80e512013-07-29 18:16:51 +0200495
David Aherndb24a902015-03-17 20:23:15 -0600496static ssize_t phys_port_name_show(struct device *dev,
497 struct device_attribute *attr, char *buf)
498{
499 struct net_device *netdev = to_net_dev(dev);
500 ssize_t ret = -EINVAL;
501
502 if (!rtnl_trylock())
503 return restart_syscall();
504
505 if (dev_isalive(netdev)) {
506 char name[IFNAMSIZ];
507
508 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
509 if (!ret)
510 ret = sprintf(buf, "%s\n", name);
511 }
512 rtnl_unlock();
513
514 return ret;
515}
516static DEVICE_ATTR_RO(phys_port_name);
517
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100518static ssize_t phys_switch_id_show(struct device *dev,
519 struct device_attribute *attr, char *buf)
520{
521 struct net_device *netdev = to_net_dev(dev);
522 ssize_t ret = -EINVAL;
523
524 if (!rtnl_trylock())
525 return restart_syscall();
526
527 if (dev_isalive(netdev)) {
Florian Fainellibccb3022019-02-06 09:45:46 -0800528 struct netdev_phys_item_id ppid = { };
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100529
Florian Fainellibccb3022019-02-06 09:45:46 -0800530 ret = dev_get_port_parent_id(netdev, &ppid, false);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100531 if (!ret)
Florian Fainellibccb3022019-02-06 09:45:46 -0800532 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100533 }
534 rtnl_unlock();
535
536 return ret;
537}
538static DEVICE_ATTR_RO(phys_switch_id);
539
stephen hemmingerec6cc592017-08-18 13:46:23 -0700540static struct attribute *net_class_attrs[] __ro_after_init = {
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700541 &dev_attr_netdev_group.attr,
542 &dev_attr_type.attr,
543 &dev_attr_dev_id.attr,
Amir Vadai3f859442014-02-25 18:17:50 +0200544 &dev_attr_dev_port.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700545 &dev_attr_iflink.attr,
546 &dev_attr_ifindex.attr,
Tom Gundersen685343f2014-07-14 16:37:22 +0200547 &dev_attr_name_assign_type.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700548 &dev_attr_addr_assign_type.attr,
549 &dev_attr_addr_len.attr,
550 &dev_attr_link_mode.attr,
551 &dev_attr_address.attr,
552 &dev_attr_broadcast.attr,
553 &dev_attr_speed.attr,
554 &dev_attr_duplex.attr,
555 &dev_attr_dormant.attr,
Andrew Lunndb30a572020-04-20 00:11:51 +0200556 &dev_attr_testing.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700557 &dev_attr_operstate.attr,
david decotigny2d3b4792014-03-29 09:48:35 -0700558 &dev_attr_carrier_changes.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700559 &dev_attr_ifalias.attr,
560 &dev_attr_carrier.attr,
561 &dev_attr_mtu.attr,
562 &dev_attr_flags.attr,
563 &dev_attr_tx_queue_len.attr,
Eric Dumazet3b47d302014-11-06 21:09:44 -0800564 &dev_attr_gro_flush_timeout.attr,
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700565 &dev_attr_napi_defer_hard_irqs.attr,
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700566 &dev_attr_phys_port_id.attr,
David Aherndb24a902015-03-17 20:23:15 -0600567 &dev_attr_phys_port_name.attr,
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100568 &dev_attr_phys_switch_id.attr,
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700569 &dev_attr_proto_down.attr,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800570 &dev_attr_carrier_up_count.attr,
571 &dev_attr_carrier_down_count.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700572 NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573};
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700574ATTRIBUTE_GROUPS(net_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
576/* Show a given an attribute in the statistics group */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700577static ssize_t netstat_show(const struct device *d,
578 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 unsigned long offset)
580{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700581 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 ssize_t ret = -EINVAL;
583
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000584 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
stephen hemminger6648c652017-08-18 13:46:28 -0700585 offset % sizeof(u64) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 read_lock(&dev_base_lock);
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700588 if (dev_isalive(dev)) {
Eric Dumazet28172732010-07-07 14:58:56 -0700589 struct rtnl_link_stats64 temp;
590 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
591
stephen hemminger6648c652017-08-18 13:46:28 -0700592 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700593 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 read_unlock(&dev_base_lock);
595 return ret;
596}
597
598/* generate a read-only statistics attribute */
599#define NETSTAT_ENTRY(name) \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700600static ssize_t name##_show(struct device *d, \
stephen hemminger6648c652017-08-18 13:46:28 -0700601 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700603 return netstat_show(d, attr, buf, \
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000604 offsetof(struct rtnl_link_stats64, name)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700606static DEVICE_ATTR_RO(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
608NETSTAT_ENTRY(rx_packets);
609NETSTAT_ENTRY(tx_packets);
610NETSTAT_ENTRY(rx_bytes);
611NETSTAT_ENTRY(tx_bytes);
612NETSTAT_ENTRY(rx_errors);
613NETSTAT_ENTRY(tx_errors);
614NETSTAT_ENTRY(rx_dropped);
615NETSTAT_ENTRY(tx_dropped);
616NETSTAT_ENTRY(multicast);
617NETSTAT_ENTRY(collisions);
618NETSTAT_ENTRY(rx_length_errors);
619NETSTAT_ENTRY(rx_over_errors);
620NETSTAT_ENTRY(rx_crc_errors);
621NETSTAT_ENTRY(rx_frame_errors);
622NETSTAT_ENTRY(rx_fifo_errors);
623NETSTAT_ENTRY(rx_missed_errors);
624NETSTAT_ENTRY(tx_aborted_errors);
625NETSTAT_ENTRY(tx_carrier_errors);
626NETSTAT_ENTRY(tx_fifo_errors);
627NETSTAT_ENTRY(tx_heartbeat_errors);
628NETSTAT_ENTRY(tx_window_errors);
629NETSTAT_ENTRY(rx_compressed);
630NETSTAT_ENTRY(tx_compressed);
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500631NETSTAT_ENTRY(rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
stephen hemmingerec6cc592017-08-18 13:46:23 -0700633static struct attribute *netstat_attrs[] __ro_after_init = {
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700634 &dev_attr_rx_packets.attr,
635 &dev_attr_tx_packets.attr,
636 &dev_attr_rx_bytes.attr,
637 &dev_attr_tx_bytes.attr,
638 &dev_attr_rx_errors.attr,
639 &dev_attr_tx_errors.attr,
640 &dev_attr_rx_dropped.attr,
641 &dev_attr_tx_dropped.attr,
642 &dev_attr_multicast.attr,
643 &dev_attr_collisions.attr,
644 &dev_attr_rx_length_errors.attr,
645 &dev_attr_rx_over_errors.attr,
646 &dev_attr_rx_crc_errors.attr,
647 &dev_attr_rx_frame_errors.attr,
648 &dev_attr_rx_fifo_errors.attr,
649 &dev_attr_rx_missed_errors.attr,
650 &dev_attr_tx_aborted_errors.attr,
651 &dev_attr_tx_carrier_errors.attr,
652 &dev_attr_tx_fifo_errors.attr,
653 &dev_attr_tx_heartbeat_errors.attr,
654 &dev_attr_tx_window_errors.attr,
655 &dev_attr_rx_compressed.attr,
656 &dev_attr_tx_compressed.attr,
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500657 &dev_attr_rx_nohandler.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 NULL
659};
660
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530661static const struct attribute_group netstat_group = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 .name = "statistics",
663 .attrs = netstat_attrs,
664};
Johannes Berg38c1a012012-11-16 20:46:19 +0100665
666#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
667static struct attribute *wireless_attrs[] = {
668 NULL
669};
670
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530671static const struct attribute_group wireless_group = {
Johannes Berg38c1a012012-11-16 20:46:19 +0100672 .name = "wireless",
673 .attrs = wireless_attrs,
674};
675#endif
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700676
677#else /* CONFIG_SYSFS */
678#define net_class_groups NULL
Eric W. Biedermand6523dd2010-05-16 21:59:45 -0700679#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Michael Daltona953be52014-01-16 22:23:28 -0800681#ifdef CONFIG_SYSFS
stephen hemminger6648c652017-08-18 13:46:28 -0700682#define to_rx_queue_attr(_attr) \
683 container_of(_attr, struct rx_queue_attribute, attr)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000684
685#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
686
687static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
688 char *buf)
689{
stephen hemminger667e4272017-08-18 13:46:27 -0700690 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000691 struct netdev_rx_queue *queue = to_rx_queue(kobj);
692
693 if (!attribute->show)
694 return -EIO;
695
stephen hemminger718ad682017-08-18 13:46:24 -0700696 return attribute->show(queue, buf);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000697}
698
699static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
700 const char *buf, size_t count)
701{
stephen hemminger667e4272017-08-18 13:46:27 -0700702 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000703 struct netdev_rx_queue *queue = to_rx_queue(kobj);
704
705 if (!attribute->store)
706 return -EIO;
707
stephen hemminger718ad682017-08-18 13:46:24 -0700708 return attribute->store(queue, buf, count);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000709}
710
stephen hemmingerfa50d642010-08-31 12:14:13 +0000711static const struct sysfs_ops rx_queue_sysfs_ops = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000712 .show = rx_queue_attr_show,
713 .store = rx_queue_attr_store,
714};
715
Michael Daltona953be52014-01-16 22:23:28 -0800716#ifdef CONFIG_RPS
stephen hemminger718ad682017-08-18 13:46:24 -0700717static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000718{
719 struct rps_map *map;
720 cpumask_var_t mask;
Tejun Heof0906822015-02-13 14:37:42 -0800721 int i, len;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000722
723 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
724 return -ENOMEM;
725
726 rcu_read_lock();
727 map = rcu_dereference(queue->rps_map);
728 if (map)
729 for (i = 0; i < map->len; i++)
730 cpumask_set_cpu(map->cpus[i], mask);
731
Tejun Heof0906822015-02-13 14:37:42 -0800732 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000733 rcu_read_unlock();
Tom Herbert0a9627f2010-03-16 08:03:29 +0000734 free_cpumask_var(mask);
Tejun Heof0906822015-02-13 14:37:42 -0800735
736 return len < PAGE_SIZE ? len : -EINVAL;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000737}
738
Eric Dumazetf5acb902010-04-19 14:40:57 -0700739static ssize_t store_rps_map(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700740 const char *buf, size_t len)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000741{
742 struct rps_map *old_map, *map;
743 cpumask_var_t mask;
744 int err, cpu, i;
Sasha Levinda65ad12015-08-13 14:03:16 -0400745 static DEFINE_MUTEX(rps_map_mutex);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000746
747 if (!capable(CAP_NET_ADMIN))
748 return -EPERM;
749
750 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
751 return -ENOMEM;
752
753 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
754 if (err) {
755 free_cpumask_var(mask);
756 return err;
757 }
758
Eric Dumazet95c96172012-04-15 05:58:06 +0000759 map = kzalloc(max_t(unsigned int,
stephen hemminger6648c652017-08-18 13:46:28 -0700760 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
761 GFP_KERNEL);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000762 if (!map) {
763 free_cpumask_var(mask);
764 return -ENOMEM;
765 }
766
767 i = 0;
768 for_each_cpu_and(cpu, mask, cpu_online_mask)
769 map->cpus[i++] = cpu;
770
stephen hemminger6648c652017-08-18 13:46:28 -0700771 if (i) {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000772 map->len = i;
stephen hemminger6648c652017-08-18 13:46:28 -0700773 } else {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000774 kfree(map);
775 map = NULL;
776 }
777
Sasha Levinda65ad12015-08-13 14:03:16 -0400778 mutex_lock(&rps_map_mutex);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000779 old_map = rcu_dereference_protected(queue->rps_map,
Sasha Levinda65ad12015-08-13 14:03:16 -0400780 mutex_is_locked(&rps_map_mutex));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000781 rcu_assign_pointer(queue->rps_map, map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000782
Eric Dumazetadc93002011-11-17 03:13:26 +0000783 if (map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700784 static_branch_inc(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700785 if (old_map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700786 static_branch_dec(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700787
Sasha Levinda65ad12015-08-13 14:03:16 -0400788 mutex_unlock(&rps_map_mutex);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700789
790 if (old_map)
791 kfree_rcu(old_map, rcu);
792
Tom Herbert0a9627f2010-03-16 08:03:29 +0000793 free_cpumask_var(mask);
794 return len;
795}
796
Tom Herbertfec5e652010-04-16 16:01:27 -0700797static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
Tom Herbertfec5e652010-04-16 16:01:27 -0700798 char *buf)
799{
800 struct rps_dev_flow_table *flow_table;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000801 unsigned long val = 0;
Tom Herbertfec5e652010-04-16 16:01:27 -0700802
803 rcu_read_lock();
804 flow_table = rcu_dereference(queue->rps_flow_table);
805 if (flow_table)
Eric Dumazet60b778c2011-12-24 06:56:49 +0000806 val = (unsigned long)flow_table->mask + 1;
Tom Herbertfec5e652010-04-16 16:01:27 -0700807 rcu_read_unlock();
808
Eric Dumazet60b778c2011-12-24 06:56:49 +0000809 return sprintf(buf, "%lu\n", val);
Tom Herbertfec5e652010-04-16 16:01:27 -0700810}
811
Tom Herbertfec5e652010-04-16 16:01:27 -0700812static void rps_dev_flow_table_release(struct rcu_head *rcu)
813{
814 struct rps_dev_flow_table *table = container_of(rcu,
815 struct rps_dev_flow_table, rcu);
Al Viro243198d2013-05-05 16:05:55 +0000816 vfree(table);
Tom Herbertfec5e652010-04-16 16:01:27 -0700817}
818
Eric Dumazetf5acb902010-04-19 14:40:57 -0700819static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700820 const char *buf, size_t len)
Tom Herbertfec5e652010-04-16 16:01:27 -0700821{
Eric Dumazet60b778c2011-12-24 06:56:49 +0000822 unsigned long mask, count;
Tom Herbertfec5e652010-04-16 16:01:27 -0700823 struct rps_dev_flow_table *table, *old_table;
824 static DEFINE_SPINLOCK(rps_dev_flow_lock);
Eric Dumazet60b778c2011-12-24 06:56:49 +0000825 int rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700826
827 if (!capable(CAP_NET_ADMIN))
828 return -EPERM;
829
Eric Dumazet60b778c2011-12-24 06:56:49 +0000830 rc = kstrtoul(buf, 0, &count);
831 if (rc < 0)
832 return rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700833
834 if (count) {
Eric Dumazet60b778c2011-12-24 06:56:49 +0000835 mask = count - 1;
836 /* mask = roundup_pow_of_two(count) - 1;
837 * without overflows...
838 */
839 while ((mask | (mask >> 1)) != mask)
840 mask |= (mask >> 1);
841 /* On 64 bit arches, must check mask fits in table->mask (u32),
stephen hemminger8e3bff92013-12-08 12:15:44 -0800842 * and on 32bit arches, must check
843 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
Eric Dumazet60b778c2011-12-24 06:56:49 +0000844 */
845#if BITS_PER_LONG > 32
846 if (mask > (unsigned long)(u32)mask)
Xi Wanga0a129f2011-12-22 13:35:22 +0000847 return -EINVAL;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000848#else
849 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
Xi Wanga0a129f2011-12-22 13:35:22 +0000850 / sizeof(struct rps_dev_flow)) {
Tom Herbertfec5e652010-04-16 16:01:27 -0700851 /* Enforce a limit to prevent overflow */
852 return -EINVAL;
853 }
Eric Dumazet60b778c2011-12-24 06:56:49 +0000854#endif
855 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
Tom Herbertfec5e652010-04-16 16:01:27 -0700856 if (!table)
857 return -ENOMEM;
858
Eric Dumazet60b778c2011-12-24 06:56:49 +0000859 table->mask = mask;
860 for (count = 0; count <= mask; count++)
861 table->flows[count].cpu = RPS_NO_CPU;
stephen hemminger6648c652017-08-18 13:46:28 -0700862 } else {
Tom Herbertfec5e652010-04-16 16:01:27 -0700863 table = NULL;
stephen hemminger6648c652017-08-18 13:46:28 -0700864 }
Tom Herbertfec5e652010-04-16 16:01:27 -0700865
866 spin_lock(&rps_dev_flow_lock);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000867 old_table = rcu_dereference_protected(queue->rps_flow_table,
868 lockdep_is_held(&rps_dev_flow_lock));
Tom Herbertfec5e652010-04-16 16:01:27 -0700869 rcu_assign_pointer(queue->rps_flow_table, table);
870 spin_unlock(&rps_dev_flow_lock);
871
872 if (old_table)
873 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
874
875 return len;
876}
877
stephen hemminger667e4272017-08-18 13:46:27 -0700878static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700879 = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000880
stephen hemminger667e4272017-08-18 13:46:27 -0700881static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700882 = __ATTR(rps_flow_cnt, 0644,
stephen hemminger667e4272017-08-18 13:46:27 -0700883 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
Michael Daltona953be52014-01-16 22:23:28 -0800884#endif /* CONFIG_RPS */
Tom Herbertfec5e652010-04-16 16:01:27 -0700885
stephen hemminger667e4272017-08-18 13:46:27 -0700886static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
Michael Daltona953be52014-01-16 22:23:28 -0800887#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000888 &rps_cpus_attribute.attr,
Tom Herbertfec5e652010-04-16 16:01:27 -0700889 &rps_dev_flow_table_cnt_attribute.attr,
Michael Daltona953be52014-01-16 22:23:28 -0800890#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000891 NULL
892};
Kimberly Brownbe0d6922019-04-01 22:51:35 -0400893ATTRIBUTE_GROUPS(rx_queue_default);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000894
895static void rx_queue_release(struct kobject *kobj)
896{
897 struct netdev_rx_queue *queue = to_rx_queue(kobj);
Michael Daltona953be52014-01-16 22:23:28 -0800898#ifdef CONFIG_RPS
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000899 struct rps_map *map;
900 struct rps_dev_flow_table *flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000901
Eric Dumazet33d480c2011-08-11 19:30:52 +0000902 map = rcu_dereference_protected(queue->rps_map, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000903 if (map) {
904 RCU_INIT_POINTER(queue->rps_map, NULL);
Lai Jiangshanf6f80232011-03-18 12:01:31 +0800905 kfree_rcu(map, rcu);
John Fastabend9ea19482010-11-16 06:31:39 +0000906 }
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000907
Eric Dumazet33d480c2011-08-11 19:30:52 +0000908 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000909 if (flow_table) {
910 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000911 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
John Fastabend9ea19482010-11-16 06:31:39 +0000912 }
Michael Daltona953be52014-01-16 22:23:28 -0800913#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000914
John Fastabend9ea19482010-11-16 06:31:39 +0000915 memset(kobj, 0, sizeof(*kobj));
Tom Herbertfe822242010-11-09 10:47:38 +0000916 dev_put(queue->dev);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000917}
918
Weilong Chen82ef3d52014-01-16 17:24:31 +0800919static const void *rx_queue_namespace(struct kobject *kobj)
920{
921 struct netdev_rx_queue *queue = to_rx_queue(kobj);
922 struct device *dev = &queue->dev->dev;
923 const void *ns = NULL;
924
925 if (dev->class && dev->class->ns_type)
926 ns = dev->class->namespace(dev);
927
928 return ns;
929}
930
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +0000931static void rx_queue_get_ownership(struct kobject *kobj,
932 kuid_t *uid, kgid_t *gid)
933{
934 const struct net *net = rx_queue_namespace(kobj);
935
936 net_ns_get_ownership(net, uid, gid);
937}
938
stephen hemminger667e4272017-08-18 13:46:27 -0700939static struct kobj_type rx_queue_ktype __ro_after_init = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000940 .sysfs_ops = &rx_queue_sysfs_ops,
941 .release = rx_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -0400942 .default_groups = rx_queue_default_groups,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +0000943 .namespace = rx_queue_namespace,
944 .get_ownership = rx_queue_get_ownership,
Tom Herbert0a9627f2010-03-16 08:03:29 +0000945};
946
WANG Cong6b53daf2014-07-23 16:09:10 -0700947static int rx_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000948{
WANG Cong6b53daf2014-07-23 16:09:10 -0700949 struct netdev_rx_queue *queue = dev->_rx + index;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000950 struct kobject *kobj = &queue->kobj;
951 int error = 0;
952
Jouni Hoganderddd9b5e2019-12-17 13:46:34 +0200953 /* Kobject_put later will trigger rx_queue_release call which
954 * decreases dev refcount: Take that reference here
955 */
956 dev_hold(queue->dev);
957
WANG Cong6b53daf2014-07-23 16:09:10 -0700958 kobj->kset = dev->queues_kset;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000959 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -0700960 "rx-%u", index);
Michael Daltona953be52014-01-16 22:23:28 -0800961 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200962 goto err;
Michael Daltona953be52014-01-16 22:23:28 -0800963
WANG Cong6b53daf2014-07-23 16:09:10 -0700964 if (dev->sysfs_rx_queue_group) {
965 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200966 if (error)
967 goto err;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000968 }
969
970 kobject_uevent(kobj, KOBJ_ADD);
971
972 return error;
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200973
974err:
975 kobject_put(kobj);
976 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000977}
Christian Braunerd7554072020-02-27 04:37:18 +0100978
979static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid,
980 kgid_t kgid)
981{
982 struct netdev_rx_queue *queue = dev->_rx + index;
983 struct kobject *kobj = &queue->kobj;
984 int error;
985
986 error = sysfs_change_owner(kobj, kuid, kgid);
987 if (error)
988 return error;
989
990 if (dev->sysfs_rx_queue_group)
991 error = sysfs_group_change_owner(
992 kobj, dev->sysfs_rx_queue_group, kuid, kgid);
993
994 return error;
995}
Paul Bolle80dd6ea2014-02-09 14:07:11 +0100996#endif /* CONFIG_SYSFS */
Tom Herbert0a9627f2010-03-16 08:03:29 +0000997
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000998int
WANG Cong6b53daf2014-07-23 16:09:10 -0700999net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001000{
Michael Daltona953be52014-01-16 22:23:28 -08001001#ifdef CONFIG_SYSFS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001002 int i;
1003 int error = 0;
1004
Michael Daltona953be52014-01-16 22:23:28 -08001005#ifndef CONFIG_RPS
WANG Cong6b53daf2014-07-23 16:09:10 -07001006 if (!dev->sysfs_rx_queue_group)
Michael Daltona953be52014-01-16 22:23:28 -08001007 return 0;
1008#endif
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001009 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001010 error = rx_queue_add_kobject(dev, i);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001011 if (error) {
1012 new_num = old_num;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001013 break;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001014 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001015 }
1016
Michael Daltona953be52014-01-16 22:23:28 -08001017 while (--i >= new_num) {
Andrey Vagin002d8a12016-10-24 19:09:53 -07001018 struct kobject *kobj = &dev->_rx[i].kobj;
1019
Kirill Tkhai273c28b2018-01-12 18:28:31 +03001020 if (!refcount_read(&dev_net(dev)->count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001021 kobj->uevent_suppress = 1;
WANG Cong6b53daf2014-07-23 16:09:10 -07001022 if (dev->sysfs_rx_queue_group)
Andrey Vagin002d8a12016-10-24 19:09:53 -07001023 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
1024 kobject_put(kobj);
Michael Daltona953be52014-01-16 22:23:28 -08001025 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001026
1027 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001028#else
1029 return 0;
1030#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001031}
1032
Christian Braunerd7554072020-02-27 04:37:18 +01001033static int net_rx_queue_change_owner(struct net_device *dev, int num,
1034 kuid_t kuid, kgid_t kgid)
1035{
1036#ifdef CONFIG_SYSFS
1037 int error = 0;
1038 int i;
1039
1040#ifndef CONFIG_RPS
1041 if (!dev->sysfs_rx_queue_group)
1042 return 0;
1043#endif
1044 for (i = 0; i < num; i++) {
1045 error = rx_queue_change_owner(dev, i, kuid, kgid);
1046 if (error)
1047 break;
1048 }
1049
1050 return error;
1051#else
1052 return 0;
1053#endif
1054}
1055
david decotignyccf5ff62011-11-16 12:15:10 +00001056#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001057/*
1058 * netdev_queue sysfs structures and functions.
1059 */
1060struct netdev_queue_attribute {
1061 struct attribute attr;
stephen hemminger718ad682017-08-18 13:46:24 -07001062 ssize_t (*show)(struct netdev_queue *queue, char *buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001063 ssize_t (*store)(struct netdev_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -07001064 const char *buf, size_t len);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001065};
stephen hemminger6648c652017-08-18 13:46:28 -07001066#define to_netdev_queue_attr(_attr) \
1067 container_of(_attr, struct netdev_queue_attribute, attr)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001068
1069#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
1070
1071static ssize_t netdev_queue_attr_show(struct kobject *kobj,
1072 struct attribute *attr, char *buf)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001073{
stephen hemminger667e4272017-08-18 13:46:27 -07001074 const struct netdev_queue_attribute *attribute
1075 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001076 struct netdev_queue *queue = to_netdev_queue(kobj);
1077
1078 if (!attribute->show)
1079 return -EIO;
1080
stephen hemminger718ad682017-08-18 13:46:24 -07001081 return attribute->show(queue, buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001082}
1083
1084static ssize_t netdev_queue_attr_store(struct kobject *kobj,
1085 struct attribute *attr,
1086 const char *buf, size_t count)
1087{
stephen hemminger667e4272017-08-18 13:46:27 -07001088 const struct netdev_queue_attribute *attribute
1089 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001090 struct netdev_queue *queue = to_netdev_queue(kobj);
1091
1092 if (!attribute->store)
1093 return -EIO;
1094
stephen hemminger718ad682017-08-18 13:46:24 -07001095 return attribute->store(queue, buf, count);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001096}
1097
1098static const struct sysfs_ops netdev_queue_sysfs_ops = {
1099 .show = netdev_queue_attr_show,
1100 .store = netdev_queue_attr_store,
1101};
1102
stephen hemminger2b9c7582017-08-18 13:46:26 -07001103static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
david decotignyccf5ff62011-11-16 12:15:10 +00001104{
1105 unsigned long trans_timeout;
1106
1107 spin_lock_irq(&queue->_xmit_lock);
1108 trans_timeout = queue->trans_timeout;
1109 spin_unlock_irq(&queue->_xmit_lock);
1110
1111 return sprintf(buf, "%lu", trans_timeout);
1112}
1113
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001114static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
John Fastabend822b3b22015-03-18 14:57:33 +02001115{
1116 struct net_device *dev = queue->dev;
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001117 unsigned int i;
John Fastabend822b3b22015-03-18 14:57:33 +02001118
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001119 i = queue - dev->_tx;
John Fastabend822b3b22015-03-18 14:57:33 +02001120 BUG_ON(i >= dev->num_tx_queues);
1121
1122 return i;
1123}
1124
stephen hemminger2b9c7582017-08-18 13:46:26 -07001125static ssize_t traffic_class_show(struct netdev_queue *queue,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001126 char *buf)
1127{
1128 struct net_device *dev = queue->dev;
Alexander Duyckd7be9772018-07-09 12:19:32 -04001129 int index;
1130 int tc;
Alexander Duyck8d059b02016-10-28 11:43:49 -04001131
Alexander Duyckd7be9772018-07-09 12:19:32 -04001132 if (!netif_is_multiqueue(dev))
1133 return -ENOENT;
1134
1135 index = get_netdev_queue_index(queue);
Alexander Duyckffcfe252018-07-09 12:19:38 -04001136
1137 /* If queue belongs to subordinate dev use its TC mapping */
1138 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1139
Alexander Duyckd7be9772018-07-09 12:19:32 -04001140 tc = netdev_txq_to_tc(dev, index);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001141 if (tc < 0)
1142 return -EINVAL;
1143
Alexander Duyckffcfe252018-07-09 12:19:38 -04001144 /* We can report the traffic class one of two ways:
1145 * Subordinate device traffic classes are reported with the traffic
1146 * class first, and then the subordinate class so for example TC0 on
1147 * subordinate device 2 will be reported as "0-2". If the queue
1148 * belongs to the root device it will be reported with just the
1149 * traffic class, so just "0" for TC 0 for example.
1150 */
1151 return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) :
1152 sprintf(buf, "%u\n", tc);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001153}
1154
1155#ifdef CONFIG_XPS
stephen hemminger2b9c7582017-08-18 13:46:26 -07001156static ssize_t tx_maxrate_show(struct netdev_queue *queue,
John Fastabend822b3b22015-03-18 14:57:33 +02001157 char *buf)
1158{
1159 return sprintf(buf, "%lu\n", queue->tx_maxrate);
1160}
1161
stephen hemminger2b9c7582017-08-18 13:46:26 -07001162static ssize_t tx_maxrate_store(struct netdev_queue *queue,
1163 const char *buf, size_t len)
John Fastabend822b3b22015-03-18 14:57:33 +02001164{
1165 struct net_device *dev = queue->dev;
1166 int err, index = get_netdev_queue_index(queue);
1167 u32 rate = 0;
1168
Tyler Hicks3033fce2018-07-20 21:56:51 +00001169 if (!capable(CAP_NET_ADMIN))
1170 return -EPERM;
1171
John Fastabend822b3b22015-03-18 14:57:33 +02001172 err = kstrtou32(buf, 10, &rate);
1173 if (err < 0)
1174 return err;
1175
1176 if (!rtnl_trylock())
1177 return restart_syscall();
1178
1179 err = -EOPNOTSUPP;
1180 if (dev->netdev_ops->ndo_set_tx_maxrate)
1181 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1182
1183 rtnl_unlock();
1184 if (!err) {
1185 queue->tx_maxrate = rate;
1186 return len;
1187 }
1188 return err;
1189}
1190
stephen hemminger2b9c7582017-08-18 13:46:26 -07001191static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1192 = __ATTR_RW(tx_maxrate);
John Fastabend822b3b22015-03-18 14:57:33 +02001193#endif
1194
stephen hemminger2b9c7582017-08-18 13:46:26 -07001195static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1196 = __ATTR_RO(tx_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001197
stephen hemminger2b9c7582017-08-18 13:46:26 -07001198static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1199 = __ATTR_RO(traffic_class);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001200
Tom Herbert114cf582011-11-28 16:33:09 +00001201#ifdef CONFIG_BQL
1202/*
1203 * Byte queue limits sysfs structures and functions.
1204 */
1205static ssize_t bql_show(char *buf, unsigned int value)
1206{
1207 return sprintf(buf, "%u\n", value);
1208}
1209
1210static ssize_t bql_set(const char *buf, const size_t count,
1211 unsigned int *pvalue)
1212{
1213 unsigned int value;
1214 int err;
1215
stephen hemminger6648c652017-08-18 13:46:28 -07001216 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
Tom Herbert114cf582011-11-28 16:33:09 +00001217 value = DQL_MAX_LIMIT;
stephen hemminger6648c652017-08-18 13:46:28 -07001218 } else {
Tom Herbert114cf582011-11-28 16:33:09 +00001219 err = kstrtouint(buf, 10, &value);
1220 if (err < 0)
1221 return err;
1222 if (value > DQL_MAX_LIMIT)
1223 return -EINVAL;
1224 }
1225
1226 *pvalue = value;
1227
1228 return count;
1229}
1230
1231static ssize_t bql_show_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001232 char *buf)
1233{
1234 struct dql *dql = &queue->dql;
1235
1236 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1237}
1238
1239static ssize_t bql_set_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001240 const char *buf, size_t len)
1241{
1242 struct dql *dql = &queue->dql;
Eric Dumazet95c96172012-04-15 05:58:06 +00001243 unsigned int value;
Tom Herbert114cf582011-11-28 16:33:09 +00001244 int err;
1245
1246 err = kstrtouint(buf, 10, &value);
1247 if (err < 0)
1248 return err;
1249
1250 dql->slack_hold_time = msecs_to_jiffies(value);
1251
1252 return len;
1253}
1254
stephen hemminger170c6582017-08-18 13:46:25 -07001255static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -07001256 = __ATTR(hold_time, 0644,
stephen hemminger170c6582017-08-18 13:46:25 -07001257 bql_show_hold_time, bql_set_hold_time);
Tom Herbert114cf582011-11-28 16:33:09 +00001258
1259static ssize_t bql_show_inflight(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001260 char *buf)
1261{
1262 struct dql *dql = &queue->dql;
1263
1264 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1265}
1266
stephen hemminger170c6582017-08-18 13:46:25 -07001267static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
Joe Perchesd6444062018-03-23 15:54:38 -07001268 __ATTR(inflight, 0444, bql_show_inflight, NULL);
Tom Herbert114cf582011-11-28 16:33:09 +00001269
1270#define BQL_ATTR(NAME, FIELD) \
1271static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001272 char *buf) \
1273{ \
1274 return bql_show(buf, queue->dql.FIELD); \
1275} \
1276 \
1277static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001278 const char *buf, size_t len) \
1279{ \
1280 return bql_set(buf, len, &queue->dql.FIELD); \
1281} \
1282 \
stephen hemminger170c6582017-08-18 13:46:25 -07001283static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
Joe Perchesd6444062018-03-23 15:54:38 -07001284 = __ATTR(NAME, 0644, \
stephen hemminger170c6582017-08-18 13:46:25 -07001285 bql_show_ ## NAME, bql_set_ ## NAME)
Tom Herbert114cf582011-11-28 16:33:09 +00001286
stephen hemminger170c6582017-08-18 13:46:25 -07001287BQL_ATTR(limit, limit);
1288BQL_ATTR(limit_max, max_limit);
1289BQL_ATTR(limit_min, min_limit);
Tom Herbert114cf582011-11-28 16:33:09 +00001290
stephen hemminger170c6582017-08-18 13:46:25 -07001291static struct attribute *dql_attrs[] __ro_after_init = {
Tom Herbert114cf582011-11-28 16:33:09 +00001292 &bql_limit_attribute.attr,
1293 &bql_limit_max_attribute.attr,
1294 &bql_limit_min_attribute.attr,
1295 &bql_hold_time_attribute.attr,
1296 &bql_inflight_attribute.attr,
1297 NULL
1298};
1299
Arvind Yadav38ef00c2017-06-29 16:31:26 +05301300static const struct attribute_group dql_group = {
Tom Herbert114cf582011-11-28 16:33:09 +00001301 .name = "byte_queue_limits",
1302 .attrs = dql_attrs,
1303};
1304#endif /* CONFIG_BQL */
1305
david decotignyccf5ff62011-11-16 12:15:10 +00001306#ifdef CONFIG_XPS
stephen hemminger2b9c7582017-08-18 13:46:26 -07001307static ssize_t xps_cpus_show(struct netdev_queue *queue,
1308 char *buf)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001309{
1310 struct net_device *dev = queue->dev;
Alexander Duyck184c4492016-10-28 11:50:13 -04001311 int cpu, len, num_tc = 1, tc = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001312 struct xps_dev_maps *dev_maps;
1313 cpumask_var_t mask;
1314 unsigned long index;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001315
Alexander Duyckd7be9772018-07-09 12:19:32 -04001316 if (!netif_is_multiqueue(dev))
1317 return -ENOENT;
1318
Tom Herbert1d24eb42010-11-21 13:17:27 +00001319 index = get_netdev_queue_index(queue);
1320
Alexander Duyck184c4492016-10-28 11:50:13 -04001321 if (dev->num_tc) {
Alexander Duyckffcfe252018-07-09 12:19:38 -04001322 /* Do not allow XPS on subordinate device directly */
Alexander Duyck184c4492016-10-28 11:50:13 -04001323 num_tc = dev->num_tc;
Alexander Duyckffcfe252018-07-09 12:19:38 -04001324 if (num_tc < 0)
1325 return -EINVAL;
1326
1327 /* If queue belongs to subordinate dev use its map */
1328 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1329
Alexander Duyck184c4492016-10-28 11:50:13 -04001330 tc = netdev_txq_to_tc(dev, index);
1331 if (tc < 0)
1332 return -EINVAL;
1333 }
1334
Alexander Duyck664088f2018-05-31 15:59:46 -04001335 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1336 return -ENOMEM;
1337
Tom Herbert1d24eb42010-11-21 13:17:27 +00001338 rcu_read_lock();
Amritha Nambiar80d19662018-06-29 21:26:41 -07001339 dev_maps = rcu_dereference(dev->xps_cpus_map);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001340 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04001341 for_each_possible_cpu(cpu) {
1342 int i, tci = cpu * num_tc + tc;
1343 struct xps_map *map;
1344
Amritha Nambiar80d19662018-06-29 21:26:41 -07001345 map = rcu_dereference(dev_maps->attr_map[tci]);
Alexander Duyck184c4492016-10-28 11:50:13 -04001346 if (!map)
1347 continue;
1348
1349 for (i = map->len; i--;) {
1350 if (map->queues[i] == index) {
1351 cpumask_set_cpu(cpu, mask);
1352 break;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001353 }
1354 }
1355 }
1356 }
1357 rcu_read_unlock();
1358
Tejun Heof0906822015-02-13 14:37:42 -08001359 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
Tom Herbert1d24eb42010-11-21 13:17:27 +00001360 free_cpumask_var(mask);
Tejun Heof0906822015-02-13 14:37:42 -08001361 return len < PAGE_SIZE ? len : -EINVAL;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001362}
1363
stephen hemminger2b9c7582017-08-18 13:46:26 -07001364static ssize_t xps_cpus_store(struct netdev_queue *queue,
1365 const char *buf, size_t len)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001366{
1367 struct net_device *dev = queue->dev;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001368 unsigned long index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001369 cpumask_var_t mask;
1370 int err;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001371
Alexander Duyckd7be9772018-07-09 12:19:32 -04001372 if (!netif_is_multiqueue(dev))
1373 return -ENOENT;
1374
Tom Herbert1d24eb42010-11-21 13:17:27 +00001375 if (!capable(CAP_NET_ADMIN))
1376 return -EPERM;
1377
1378 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1379 return -ENOMEM;
1380
1381 index = get_netdev_queue_index(queue);
1382
1383 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1384 if (err) {
1385 free_cpumask_var(mask);
1386 return err;
1387 }
1388
Alexander Duyck537c00d2013-01-10 08:57:02 +00001389 err = netif_set_xps_queue(dev, mask, index);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001390
1391 free_cpumask_var(mask);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001392
Alexander Duyck537c00d2013-01-10 08:57:02 +00001393 return err ? : len;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001394}
1395
stephen hemminger2b9c7582017-08-18 13:46:26 -07001396static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1397 = __ATTR_RW(xps_cpus);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001398
1399static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
1400{
1401 struct net_device *dev = queue->dev;
1402 struct xps_dev_maps *dev_maps;
1403 unsigned long *mask, index;
1404 int j, len, num_tc = 1, tc = 0;
1405
1406 index = get_netdev_queue_index(queue);
1407
1408 if (dev->num_tc) {
1409 num_tc = dev->num_tc;
1410 tc = netdev_txq_to_tc(dev, index);
1411 if (tc < 0)
1412 return -EINVAL;
1413 }
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001414 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001415 if (!mask)
1416 return -ENOMEM;
1417
1418 rcu_read_lock();
1419 dev_maps = rcu_dereference(dev->xps_rxqs_map);
1420 if (!dev_maps)
1421 goto out_no_maps;
1422
1423 for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
1424 j < dev->num_rx_queues;) {
1425 int i, tci = j * num_tc + tc;
1426 struct xps_map *map;
1427
1428 map = rcu_dereference(dev_maps->attr_map[tci]);
1429 if (!map)
1430 continue;
1431
1432 for (i = map->len; i--;) {
1433 if (map->queues[i] == index) {
1434 set_bit(j, mask);
1435 break;
1436 }
1437 }
1438 }
1439out_no_maps:
1440 rcu_read_unlock();
1441
1442 len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001443 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001444
1445 return len < PAGE_SIZE ? len : -EINVAL;
1446}
1447
1448static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
1449 size_t len)
1450{
1451 struct net_device *dev = queue->dev;
1452 struct net *net = dev_net(dev);
1453 unsigned long *mask, index;
1454 int err;
1455
1456 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1457 return -EPERM;
1458
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001459 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001460 if (!mask)
1461 return -ENOMEM;
1462
1463 index = get_netdev_queue_index(queue);
1464
1465 err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
1466 if (err) {
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001467 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001468 return err;
1469 }
1470
Andrei Vagin4d99f662018-08-08 20:07:35 -07001471 cpus_read_lock();
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001472 err = __netif_set_xps_queue(dev, mask, index, true);
Andrei Vagin4d99f662018-08-08 20:07:35 -07001473 cpus_read_unlock();
1474
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001475 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001476 return err ? : len;
1477}
1478
1479static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
1480 = __ATTR_RW(xps_rxqs);
david decotignyccf5ff62011-11-16 12:15:10 +00001481#endif /* CONFIG_XPS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001482
stephen hemminger2b9c7582017-08-18 13:46:26 -07001483static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
david decotignyccf5ff62011-11-16 12:15:10 +00001484 &queue_trans_timeout.attr,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001485 &queue_traffic_class.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001486#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001487 &xps_cpus_attribute.attr,
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001488 &xps_rxqs_attribute.attr,
John Fastabend822b3b22015-03-18 14:57:33 +02001489 &queue_tx_maxrate.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001490#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001491 NULL
1492};
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001493ATTRIBUTE_GROUPS(netdev_queue_default);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001494
1495static void netdev_queue_release(struct kobject *kobj)
1496{
1497 struct netdev_queue *queue = to_netdev_queue(kobj);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001498
Tom Herbert1d24eb42010-11-21 13:17:27 +00001499 memset(kobj, 0, sizeof(*kobj));
1500 dev_put(queue->dev);
1501}
1502
Weilong Chen82ef3d52014-01-16 17:24:31 +08001503static const void *netdev_queue_namespace(struct kobject *kobj)
1504{
1505 struct netdev_queue *queue = to_netdev_queue(kobj);
1506 struct device *dev = &queue->dev->dev;
1507 const void *ns = NULL;
1508
1509 if (dev->class && dev->class->ns_type)
1510 ns = dev->class->namespace(dev);
1511
1512 return ns;
1513}
1514
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001515static void netdev_queue_get_ownership(struct kobject *kobj,
1516 kuid_t *uid, kgid_t *gid)
1517{
1518 const struct net *net = netdev_queue_namespace(kobj);
1519
1520 net_ns_get_ownership(net, uid, gid);
1521}
1522
stephen hemminger2b9c7582017-08-18 13:46:26 -07001523static struct kobj_type netdev_queue_ktype __ro_after_init = {
Tom Herbert1d24eb42010-11-21 13:17:27 +00001524 .sysfs_ops = &netdev_queue_sysfs_ops,
1525 .release = netdev_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001526 .default_groups = netdev_queue_default_groups,
Weilong Chen82ef3d52014-01-16 17:24:31 +08001527 .namespace = netdev_queue_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001528 .get_ownership = netdev_queue_get_ownership,
Tom Herbert1d24eb42010-11-21 13:17:27 +00001529};
1530
WANG Cong6b53daf2014-07-23 16:09:10 -07001531static int netdev_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001532{
WANG Cong6b53daf2014-07-23 16:09:10 -07001533 struct netdev_queue *queue = dev->_tx + index;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001534 struct kobject *kobj = &queue->kobj;
1535 int error = 0;
1536
Jouni Hogandere0b609032019-12-05 15:57:07 +02001537 /* Kobject_put later will trigger netdev_queue_release call
1538 * which decreases dev refcount: Take that reference here
1539 */
1540 dev_hold(queue->dev);
1541
WANG Cong6b53daf2014-07-23 16:09:10 -07001542 kobj->kset = dev->queues_kset;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001543 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -07001544 "tx-%u", index);
Tom Herbert114cf582011-11-28 16:33:09 +00001545 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001546 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001547
1548#ifdef CONFIG_BQL
1549 error = sysfs_create_group(kobj, &dql_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001550 if (error)
1551 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001552#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001553
1554 kobject_uevent(kobj, KOBJ_ADD);
Eric Dumazet48a322b2019-11-20 19:19:07 -08001555 return 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001556
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001557err:
1558 kobject_put(kobj);
1559 return error;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001560}
Christian Braunerd7554072020-02-27 04:37:18 +01001561
1562static int tx_queue_change_owner(struct net_device *ndev, int index,
1563 kuid_t kuid, kgid_t kgid)
1564{
1565 struct netdev_queue *queue = ndev->_tx + index;
1566 struct kobject *kobj = &queue->kobj;
1567 int error;
1568
1569 error = sysfs_change_owner(kobj, kuid, kgid);
1570 if (error)
1571 return error;
1572
1573#ifdef CONFIG_BQL
1574 error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
1575#endif
1576 return error;
1577}
david decotignyccf5ff62011-11-16 12:15:10 +00001578#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001579
1580int
WANG Cong6b53daf2014-07-23 16:09:10 -07001581netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001582{
david decotignyccf5ff62011-11-16 12:15:10 +00001583#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001584 int i;
1585 int error = 0;
1586
1587 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001588 error = netdev_queue_add_kobject(dev, i);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001589 if (error) {
1590 new_num = old_num;
1591 break;
1592 }
1593 }
1594
Tom Herbert114cf582011-11-28 16:33:09 +00001595 while (--i >= new_num) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001596 struct netdev_queue *queue = dev->_tx + i;
Tom Herbert114cf582011-11-28 16:33:09 +00001597
Kirill Tkhai273c28b2018-01-12 18:28:31 +03001598 if (!refcount_read(&dev_net(dev)->count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001599 queue->kobj.uevent_suppress = 1;
Tom Herbert114cf582011-11-28 16:33:09 +00001600#ifdef CONFIG_BQL
1601 sysfs_remove_group(&queue->kobj, &dql_group);
1602#endif
1603 kobject_put(&queue->kobj);
1604 }
Tom Herbert1d24eb42010-11-21 13:17:27 +00001605
1606 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001607#else
1608 return 0;
david decotignyccf5ff62011-11-16 12:15:10 +00001609#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001610}
1611
Christian Braunerd7554072020-02-27 04:37:18 +01001612static int net_tx_queue_change_owner(struct net_device *dev, int num,
1613 kuid_t kuid, kgid_t kgid)
1614{
1615#ifdef CONFIG_SYSFS
1616 int error = 0;
1617 int i;
1618
1619 for (i = 0; i < num; i++) {
1620 error = tx_queue_change_owner(dev, i, kuid, kgid);
1621 if (error)
1622 break;
1623 }
1624
1625 return error;
1626#else
1627 return 0;
1628#endif /* CONFIG_SYSFS */
1629}
1630
WANG Cong6b53daf2014-07-23 16:09:10 -07001631static int register_queue_kobjects(struct net_device *dev)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001632{
Tom Herbertbf264142010-11-26 08:36:09 +00001633 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001634
david decotignyccf5ff62011-11-16 12:15:10 +00001635#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001636 dev->queues_kset = kset_create_and_add("queues",
stephen hemminger6648c652017-08-18 13:46:28 -07001637 NULL, &dev->dev.kobj);
WANG Cong6b53daf2014-07-23 16:09:10 -07001638 if (!dev->queues_kset)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001639 return -ENOMEM;
WANG Cong6b53daf2014-07-23 16:09:10 -07001640 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001641#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001642 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001643
WANG Cong6b53daf2014-07-23 16:09:10 -07001644 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001645 if (error)
1646 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001647 rxq = real_rx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001648
WANG Cong6b53daf2014-07-23 16:09:10 -07001649 error = netdev_queue_update_kobjects(dev, 0, real_tx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001650 if (error)
1651 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001652 txq = real_tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001653
1654 return 0;
1655
1656error:
WANG Cong6b53daf2014-07-23 16:09:10 -07001657 netdev_queue_update_kobjects(dev, txq, 0);
1658 net_rx_queue_update_kobjects(dev, rxq, 0);
YueHaibing895a5e92019-03-02 10:34:55 +08001659#ifdef CONFIG_SYSFS
1660 kset_unregister(dev->queues_kset);
1661#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001662 return error;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001663}
1664
Christian Braunerd7554072020-02-27 04:37:18 +01001665static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid)
1666{
1667 int error = 0, real_rx = 0, real_tx = 0;
1668
1669#ifdef CONFIG_SYSFS
1670 if (ndev->queues_kset) {
1671 error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid);
1672 if (error)
1673 return error;
1674 }
1675 real_rx = ndev->real_num_rx_queues;
1676#endif
1677 real_tx = ndev->real_num_tx_queues;
1678
1679 error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid);
1680 if (error)
1681 return error;
1682
1683 error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid);
1684 if (error)
1685 return error;
1686
1687 return 0;
1688}
1689
WANG Cong6b53daf2014-07-23 16:09:10 -07001690static void remove_queue_kobjects(struct net_device *dev)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001691{
Tom Herbertbf264142010-11-26 08:36:09 +00001692 int real_rx = 0, real_tx = 0;
1693
Michael Daltona953be52014-01-16 22:23:28 -08001694#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001695 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001696#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001697 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001698
WANG Cong6b53daf2014-07-23 16:09:10 -07001699 net_rx_queue_update_kobjects(dev, real_rx, 0);
1700 netdev_queue_update_kobjects(dev, real_tx, 0);
david decotignyccf5ff62011-11-16 12:15:10 +00001701#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001702 kset_unregister(dev->queues_kset);
Tom Herbertbf264142010-11-26 08:36:09 +00001703#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001704}
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001705
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001706static bool net_current_may_mount(void)
1707{
1708 struct net *net = current->nsproxy->net_ns;
1709
1710 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1711}
1712
Al Viroa685e082011-06-08 21:13:01 -04001713static void *net_grab_current_ns(void)
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001714{
Al Viroa685e082011-06-08 21:13:01 -04001715 struct net *ns = current->nsproxy->net_ns;
1716#ifdef CONFIG_NET_NS
1717 if (ns)
Reshetova, Elenac122e142017-06-30 13:08:08 +03001718 refcount_inc(&ns->passive);
Al Viroa685e082011-06-08 21:13:01 -04001719#endif
1720 return ns;
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001721}
1722
1723static const void *net_initial_ns(void)
1724{
1725 return &init_net;
1726}
1727
1728static const void *net_netlink_ns(struct sock *sk)
1729{
1730 return sock_net(sk);
1731}
1732
stephen hemminger737aec52017-08-18 13:46:22 -07001733const struct kobj_ns_type_operations net_ns_type_operations = {
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001734 .type = KOBJ_NS_TYPE_NET,
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001735 .current_may_mount = net_current_may_mount,
Al Viroa685e082011-06-08 21:13:01 -04001736 .grab_current_ns = net_grab_current_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001737 .netlink_ns = net_netlink_ns,
1738 .initial_ns = net_initial_ns,
Al Viroa685e082011-06-08 21:13:01 -04001739 .drop_ns = net_drop_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001740};
Johannes Berg04600792010-08-05 17:45:15 +02001741EXPORT_SYMBOL_GPL(net_ns_type_operations);
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001742
Kay Sievers7eff2e72007-08-14 15:15:12 +02001743static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001745 struct net_device *dev = to_net_dev(d);
Kay Sievers7eff2e72007-08-14 15:15:12 +02001746 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Kay Sievers312c0042005-11-16 09:00:00 +01001748 /* pass interface to uevent. */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001749 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
Eric Rannaudbf624562007-03-30 22:23:12 -07001750 if (retval)
1751 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001753 /* pass ifindex to uevent.
1754 * ifindex is useful as it won't change (interface name may change)
stephen hemminger6648c652017-08-18 13:46:28 -07001755 * and is what RtNetlink uses natively.
1756 */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001757 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001758
Eric Rannaudbf624562007-03-30 22:23:12 -07001759exit:
Eric Rannaudbf624562007-03-30 22:23:12 -07001760 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
1763/*
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001764 * netdev_release -- destroy and free a dead device.
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001765 * Called when last reference to device kobject is gone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001767static void netdev_release(struct device *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001769 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771 BUG_ON(dev->reg_state != NETREG_RELEASED);
1772
Florian Westphal6c557002017-10-02 23:50:05 +02001773 /* no need to wait for rcu grace period:
1774 * device is dead and about to be freed.
1775 */
1776 kfree(rcu_access_pointer(dev->ifalias));
Eric Dumazet74d332c2013-10-30 13:10:44 -07001777 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778}
1779
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001780static const void *net_namespace(struct device *d)
1781{
Geliang Tang5c294822015-12-22 23:11:49 +08001782 struct net_device *dev = to_net_dev(d);
1783
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001784 return dev_net(dev);
1785}
1786
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001787static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
1788{
1789 struct net_device *dev = to_net_dev(d);
1790 const struct net *net = dev_net(dev);
1791
1792 net_ns_get_ownership(net, uid, gid);
1793}
1794
stephen hemmingere6d473e2017-08-18 13:46:21 -07001795static struct class net_class __ro_after_init = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 .name = "net",
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001797 .dev_release = netdev_release,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -07001798 .dev_groups = net_class_groups,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001799 .dev_uevent = netdev_uevent,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001800 .ns_type = &net_ns_type_operations,
1801 .namespace = net_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001802 .get_ownership = net_get_ownership,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803};
1804
Florian Fainelliaa836df2015-03-09 14:31:20 -07001805#ifdef CONFIG_OF_NET
1806static int of_dev_node_match(struct device *dev, const void *data)
1807{
Tobias Waldekranz2e186a22020-05-15 11:52:52 +02001808 for (; dev; dev = dev->parent) {
1809 if (dev->of_node == data)
1810 return 1;
1811 }
Florian Fainelliaa836df2015-03-09 14:31:20 -07001812
Tobias Waldekranz2e186a22020-05-15 11:52:52 +02001813 return 0;
Florian Fainelliaa836df2015-03-09 14:31:20 -07001814}
1815
Russell King9861f722015-09-24 20:36:33 +01001816/*
1817 * of_find_net_device_by_node - lookup the net device for the device node
1818 * @np: OF device node
1819 *
1820 * Looks up the net_device structure corresponding with the device node.
1821 * If successful, returns a pointer to the net_device with the embedded
1822 * struct device refcount incremented by one, or NULL on failure. The
1823 * refcount must be dropped when done with the net_device.
1824 */
Florian Fainelliaa836df2015-03-09 14:31:20 -07001825struct net_device *of_find_net_device_by_node(struct device_node *np)
1826{
1827 struct device *dev;
1828
1829 dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1830 if (!dev)
1831 return NULL;
1832
1833 return to_net_dev(dev);
1834}
1835EXPORT_SYMBOL(of_find_net_device_by_node);
1836#endif
1837
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001838/* Delete sysfs entries but hold kobject reference until after all
1839 * netdev references are gone.
1840 */
WANG Cong6b53daf2014-07-23 16:09:10 -07001841void netdev_unregister_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842{
stephen hemminger6648c652017-08-18 13:46:28 -07001843 struct device *dev = &ndev->dev;
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001844
Kirill Tkhai273c28b2018-01-12 18:28:31 +03001845 if (!refcount_read(&dev_net(ndev)->count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001846 dev_set_uevent_suppress(dev, 1);
1847
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001848 kobject_get(&dev->kobj);
Eric W. Biederman38918452008-10-27 17:51:47 -07001849
WANG Cong6b53daf2014-07-23 16:09:10 -07001850 remove_queue_kobjects(ndev);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001851
Ming Lei9802c8e2013-02-22 16:34:16 -08001852 pm_runtime_set_memalloc_noio(dev, false);
1853
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001854 device_del(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855}
1856
1857/* Create sysfs entries for network device. */
WANG Cong6b53daf2014-07-23 16:09:10 -07001858int netdev_register_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859{
stephen hemminger6648c652017-08-18 13:46:28 -07001860 struct device *dev = &ndev->dev;
WANG Cong6b53daf2014-07-23 16:09:10 -07001861 const struct attribute_group **groups = ndev->sysfs_groups;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001862 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001864 device_initialize(dev);
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001865 dev->class = &net_class;
WANG Cong6b53daf2014-07-23 16:09:10 -07001866 dev->platform_data = ndev;
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001867 dev->groups = groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
WANG Cong6b53daf2014-07-23 16:09:10 -07001869 dev_set_name(dev, "%s", ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001871#ifdef CONFIG_SYSFS
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001872 /* Allow for a device specific group */
1873 if (*groups)
1874 groups++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001876 *groups++ = &netstat_group;
Johannes Berg38c1a012012-11-16 20:46:19 +01001877
1878#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
WANG Cong6b53daf2014-07-23 16:09:10 -07001879 if (ndev->ieee80211_ptr)
Johannes Berg38c1a012012-11-16 20:46:19 +01001880 *groups++ = &wireless_group;
1881#if IS_ENABLED(CONFIG_WIRELESS_EXT)
WANG Cong6b53daf2014-07-23 16:09:10 -07001882 else if (ndev->wireless_handlers)
Johannes Berg38c1a012012-11-16 20:46:19 +01001883 *groups++ = &wireless_group;
1884#endif
1885#endif
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001886#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
Tom Herbert0a9627f2010-03-16 08:03:29 +00001888 error = device_add(dev);
1889 if (error)
Wang Hai8ed633b2019-04-12 16:36:33 -04001890 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001891
WANG Cong6b53daf2014-07-23 16:09:10 -07001892 error = register_queue_kobjects(ndev);
Wang Hai8ed633b2019-04-12 16:36:33 -04001893 if (error) {
1894 device_del(dev);
1895 return error;
1896 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001897
Ming Lei9802c8e2013-02-22 16:34:16 -08001898 pm_runtime_set_memalloc_noio(dev, true);
1899
Tom Herbert0a9627f2010-03-16 08:03:29 +00001900 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901}
1902
Christian Braunere6dee9f2020-02-27 04:37:17 +01001903/* Change owner for sysfs entries when moving network devices across network
1904 * namespaces owned by different user namespaces.
1905 */
1906int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
1907 const struct net *net_new)
1908{
1909 struct device *dev = &ndev->dev;
1910 kuid_t old_uid, new_uid;
1911 kgid_t old_gid, new_gid;
1912 int error;
1913
1914 net_ns_get_ownership(net_old, &old_uid, &old_gid);
1915 net_ns_get_ownership(net_new, &new_uid, &new_gid);
1916
1917 /* The network namespace was changed but the owning user namespace is
1918 * identical so there's no need to change the owner of sysfs entries.
1919 */
1920 if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid))
1921 return 0;
1922
1923 error = device_change_owner(dev, new_uid, new_gid);
1924 if (error)
1925 return error;
1926
Christian Braunerd7554072020-02-27 04:37:18 +01001927 error = queue_change_owner(ndev, new_uid, new_gid);
1928 if (error)
1929 return error;
1930
Christian Braunere6dee9f2020-02-27 04:37:17 +01001931 return 0;
1932}
1933
stephen hemmingerb793dc52017-08-18 13:46:20 -07001934int netdev_class_create_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04001935 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07001936{
Tejun Heo58292cbe2013-09-11 22:29:04 -04001937 return class_create_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001938}
Tejun Heo58292cbe2013-09-11 22:29:04 -04001939EXPORT_SYMBOL(netdev_class_create_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001940
stephen hemmingerb793dc52017-08-18 13:46:20 -07001941void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04001942 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07001943{
Tejun Heo58292cbe2013-09-11 22:29:04 -04001944 class_remove_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001945}
Tejun Heo58292cbe2013-09-11 22:29:04 -04001946EXPORT_SYMBOL(netdev_class_remove_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001947
Daniel Borkmanna48d4bb2014-01-06 01:20:11 +01001948int __init netdev_kobject_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949{
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001950 kobj_ns_type_register(&net_ns_type_operations);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 return class_register(&net_class);
1952}