blob: 62ca2f2c0ee61826e87b46883ab003baa399d86b [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net-sysfs.c - network device class and attributes
4 *
5 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Randy Dunlap4fc268d2006-01-11 12:17:47 -08008#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/netdevice.h>
11#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010013#include <linux/sched/signal.h>
Alex Belits07bbecb2020-06-25 18:34:43 -040014#include <linux/sched/isolation.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070015#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <net/sock.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070017#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/rtnetlink.h>
Tom Herbertfec5e652010-04-16 16:01:27 -070019#include <linux/vmalloc.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040020#include <linux/export.h>
Tom Herbert114cf582011-11-28 16:33:09 +000021#include <linux/jiffies.h>
Ming Lei9802c8e2013-02-22 16:34:16 -080022#include <linux/pm_runtime.h>
Florian Fainelliaa836df2015-03-09 14:31:20 -070023#include <linux/of.h>
Ben Dooks88832a22016-06-07 19:27:51 +010024#include <linux/of_net.h>
Andrei Vagin4d99f662018-08-08 20:07:35 -070025#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Pavel Emelyanov342709e2007-10-23 21:14:45 -070027#include "net-sysfs.h"
28
Eric W. Biederman8b41d182007-09-26 22:02:53 -070029#ifdef CONFIG_SYSFS
Linus Torvalds1da177e2005-04-16 15:20:36 -070030static const char fmt_hex[] = "%#x\n";
31static const char fmt_dec[] = "%d\n";
32static const char fmt_ulong[] = "%lu\n";
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +000033static const char fmt_u64[] = "%llu\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035static inline int dev_isalive(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -070037 return dev->reg_state <= NETREG_REGISTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038}
39
40/* use same locking rules as GIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070041static ssize_t netdev_show(const struct device *dev,
42 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 ssize_t (*format)(const struct net_device *, char *))
44{
WANG Cong6b53daf2014-07-23 16:09:10 -070045 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 ssize_t ret = -EINVAL;
47
48 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -070049 if (dev_isalive(ndev))
50 ret = (*format)(ndev, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 read_unlock(&dev_base_lock);
52
53 return ret;
54}
55
56/* generate a show function for simple field */
57#define NETDEVICE_SHOW(field, format_string) \
WANG Cong6b53daf2014-07-23 16:09:10 -070058static ssize_t format_##field(const struct net_device *dev, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{ \
WANG Cong6b53daf2014-07-23 16:09:10 -070060 return sprintf(buf, format_string, dev->field); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070061} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070062static ssize_t field##_show(struct device *dev, \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070063 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070064{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070065 return netdev_show(dev, attr, buf, format_##field); \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070066} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070068#define NETDEVICE_SHOW_RO(field, format_string) \
69NETDEVICE_SHOW(field, format_string); \
70static DEVICE_ATTR_RO(field)
71
72#define NETDEVICE_SHOW_RW(field, format_string) \
73NETDEVICE_SHOW(field, format_string); \
74static DEVICE_ATTR_RW(field)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76/* use same locking and permission rules as SIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070077static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 const char *buf, size_t len,
79 int (*set)(struct net_device *, unsigned long))
80{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000081 struct net_device *netdev = to_net_dev(dev);
82 struct net *net = dev_net(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 unsigned long new;
Colin Ian King5f0224a2020-04-09 14:41:26 +010084 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000086 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 return -EPERM;
88
Shuah Khane1e420c2012-04-12 09:28:13 +000089 ret = kstrtoul(buf, 0, &new);
90 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 goto err;
92
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000093 if (!rtnl_trylock())
Eric W. Biederman336ca572009-05-13 16:57:25 +000094 return restart_syscall();
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000095
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000096 if (dev_isalive(netdev)) {
stephen hemminger6648c652017-08-18 13:46:28 -070097 ret = (*set)(netdev, new);
98 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 ret = len;
100 }
101 rtnl_unlock();
102 err:
103 return ret;
104}
105
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700106NETDEVICE_SHOW_RO(dev_id, fmt_hex);
Amir Vadai3f859442014-02-25 18:17:50 +0200107NETDEVICE_SHOW_RO(dev_port, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700108NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109NETDEVICE_SHOW_RO(addr_len, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700110NETDEVICE_SHOW_RO(ifindex, fmt_dec);
111NETDEVICE_SHOW_RO(type, fmt_dec);
112NETDEVICE_SHOW_RO(link_mode, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200114static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
115 char *buf)
116{
117 struct net_device *ndev = to_net_dev(dev);
118
119 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
120}
121static DEVICE_ATTR_RO(iflink);
122
WANG Cong6b53daf2014-07-23 16:09:10 -0700123static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
Tom Gundersen685343f2014-07-14 16:37:22 +0200124{
WANG Cong6b53daf2014-07-23 16:09:10 -0700125 return sprintf(buf, fmt_dec, dev->name_assign_type);
Tom Gundersen685343f2014-07-14 16:37:22 +0200126}
127
128static ssize_t name_assign_type_show(struct device *dev,
129 struct device_attribute *attr,
130 char *buf)
131{
WANG Cong6b53daf2014-07-23 16:09:10 -0700132 struct net_device *ndev = to_net_dev(dev);
Tom Gundersen685343f2014-07-14 16:37:22 +0200133 ssize_t ret = -EINVAL;
134
WANG Cong6b53daf2014-07-23 16:09:10 -0700135 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
Tom Gundersen685343f2014-07-14 16:37:22 +0200136 ret = netdev_show(dev, attr, buf, format_name_assign_type);
137
138 return ret;
139}
140static DEVICE_ATTR_RO(name_assign_type);
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* use same locking rules as GIFHWADDR ioctl's */
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700143static ssize_t address_show(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700144 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
WANG Cong6b53daf2014-07-23 16:09:10 -0700146 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 ssize_t ret = -EINVAL;
148
149 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -0700150 if (dev_isalive(ndev))
151 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 read_unlock(&dev_base_lock);
153 return ret;
154}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700155static DEVICE_ATTR_RO(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700157static ssize_t broadcast_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
WANG Cong6b53daf2014-07-23 16:09:10 -0700160 struct net_device *ndev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700161
WANG Cong6b53daf2014-07-23 16:09:10 -0700162 if (dev_isalive(ndev))
163 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 return -EINVAL;
165}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700166static DEVICE_ATTR_RO(broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
WANG Cong6b53daf2014-07-23 16:09:10 -0700168static int change_carrier(struct net_device *dev, unsigned long new_carrier)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000169{
WANG Cong6b53daf2014-07-23 16:09:10 -0700170 if (!netif_running(dev))
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000171 return -EINVAL;
stephen hemminger6648c652017-08-18 13:46:28 -0700172 return dev_change_carrier(dev, (bool)new_carrier);
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000173}
174
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700175static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
176 const char *buf, size_t len)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000177{
178 return netdev_store(dev, attr, buf, len, change_carrier);
179}
180
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700181static ssize_t carrier_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700182 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
184 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700185
186 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
stephen hemminger6648c652017-08-18 13:46:28 -0700188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 return -EINVAL;
190}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700191static DEVICE_ATTR_RW(carrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700193static ssize_t speed_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000194 struct device_attribute *attr, char *buf)
195{
196 struct net_device *netdev = to_net_dev(dev);
197 int ret = -EINVAL;
198
199 if (!rtnl_trylock())
200 return restart_syscall();
201
David Decotigny8ae6daca2011-04-27 18:32:38 +0000202 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800203 struct ethtool_link_ksettings cmd;
204
205 if (!__ethtool_get_link_ksettings(netdev, &cmd))
206 ret = sprintf(buf, fmt_dec, cmd.base.speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000207 }
208 rtnl_unlock();
209 return ret;
210}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700211static DEVICE_ATTR_RO(speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000212
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700213static ssize_t duplex_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000214 struct device_attribute *attr, char *buf)
215{
216 struct net_device *netdev = to_net_dev(dev);
217 int ret = -EINVAL;
218
219 if (!rtnl_trylock())
220 return restart_syscall();
221
David Decotigny8ae6daca2011-04-27 18:32:38 +0000222 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800223 struct ethtool_link_ksettings cmd;
224
225 if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000226 const char *duplex;
David Decotigny7cad1ba2016-02-24 10:58:10 -0800227
228 switch (cmd.base.duplex) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000229 case DUPLEX_HALF:
230 duplex = "half";
231 break;
232 case DUPLEX_FULL:
233 duplex = "full";
234 break;
235 default:
236 duplex = "unknown";
237 break;
238 }
239 ret = sprintf(buf, "%s\n", duplex);
240 }
Andy Gospodarekd519e172009-10-02 09:26:12 +0000241 }
242 rtnl_unlock();
243 return ret;
244}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700245static DEVICE_ATTR_RO(duplex);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000246
Andrew Lunndb30a572020-04-20 00:11:51 +0200247static ssize_t testing_show(struct device *dev,
248 struct device_attribute *attr, char *buf)
249{
250 struct net_device *netdev = to_net_dev(dev);
251
252 if (netif_running(netdev))
253 return sprintf(buf, fmt_dec, !!netif_testing(netdev));
254
255 return -EINVAL;
256}
257static DEVICE_ATTR_RO(testing);
258
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700259static ssize_t dormant_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700260 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800261{
262 struct net_device *netdev = to_net_dev(dev);
263
264 if (netif_running(netdev))
265 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
266
267 return -EINVAL;
268}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700269static DEVICE_ATTR_RO(dormant);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800270
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700271static const char *const operstates[] = {
Stefan Rompfb00055a2006-03-20 17:09:11 -0800272 "unknown",
273 "notpresent", /* currently unused */
274 "down",
275 "lowerlayerdown",
Andrew Lunndb30a572020-04-20 00:11:51 +0200276 "testing",
Stefan Rompfb00055a2006-03-20 17:09:11 -0800277 "dormant",
278 "up"
279};
280
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700281static ssize_t operstate_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700282 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800283{
284 const struct net_device *netdev = to_net_dev(dev);
285 unsigned char operstate;
286
287 read_lock(&dev_base_lock);
288 operstate = netdev->operstate;
289 if (!netif_running(netdev))
290 operstate = IF_OPER_DOWN;
291 read_unlock(&dev_base_lock);
292
Adrian Bunke3a5cd92006-04-05 22:19:47 -0700293 if (operstate >= ARRAY_SIZE(operstates))
Stefan Rompfb00055a2006-03-20 17:09:11 -0800294 return -EINVAL; /* should not happen */
295
296 return sprintf(buf, "%s\n", operstates[operstate]);
297}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700298static DEVICE_ATTR_RO(operstate);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800299
david decotigny2d3b4792014-03-29 09:48:35 -0700300static ssize_t carrier_changes_show(struct device *dev,
301 struct device_attribute *attr,
302 char *buf)
303{
304 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700305
david decotigny2d3b4792014-03-29 09:48:35 -0700306 return sprintf(buf, fmt_dec,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800307 atomic_read(&netdev->carrier_up_count) +
308 atomic_read(&netdev->carrier_down_count));
david decotigny2d3b4792014-03-29 09:48:35 -0700309}
310static DEVICE_ATTR_RO(carrier_changes);
311
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800312static ssize_t carrier_up_count_show(struct device *dev,
313 struct device_attribute *attr,
314 char *buf)
315{
316 struct net_device *netdev = to_net_dev(dev);
317
318 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
319}
320static DEVICE_ATTR_RO(carrier_up_count);
321
322static ssize_t carrier_down_count_show(struct device *dev,
323 struct device_attribute *attr,
324 char *buf)
325{
326 struct net_device *netdev = to_net_dev(dev);
327
328 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
329}
330static DEVICE_ATTR_RO(carrier_down_count);
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332/* read-write attributes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
WANG Cong6b53daf2014-07-23 16:09:10 -0700334static int change_mtu(struct net_device *dev, unsigned long new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
stephen hemminger6648c652017-08-18 13:46:28 -0700336 return dev_set_mtu(dev, (int)new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700339static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700340 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700342 return netdev_store(dev, attr, buf, len, change_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700344NETDEVICE_SHOW_RW(mtu, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
WANG Cong6b53daf2014-07-23 16:09:10 -0700346static int change_flags(struct net_device *dev, unsigned long new_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347{
Petr Machata567c5e12018-12-06 17:05:42 +0000348 return dev_change_flags(dev, (unsigned int)new_flags, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349}
350
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700351static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700352 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700354 return netdev_store(dev, attr, buf, len, change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700356NETDEVICE_SHOW_RW(flags, fmt_hex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700358static ssize_t tx_queue_len_store(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700359 struct device_attribute *attr,
360 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000362 if (!capable(CAP_NET_ADMIN))
363 return -EPERM;
364
Cong Wang6a643dd2018-01-25 18:26:22 -0800365 return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366}
Alexey Dobriyan0cd29502017-05-17 13:30:44 +0300367NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Eric Dumazet3b47d302014-11-06 21:09:44 -0800369static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
370{
Eric Dumazet7e417a62020-04-22 09:13:28 -0700371 WRITE_ONCE(dev->gro_flush_timeout, val);
Eric Dumazet3b47d302014-11-06 21:09:44 -0800372 return 0;
373}
374
375static ssize_t gro_flush_timeout_store(struct device *dev,
stephen hemminger6648c652017-08-18 13:46:28 -0700376 struct device_attribute *attr,
377 const char *buf, size_t len)
Eric Dumazet3b47d302014-11-06 21:09:44 -0800378{
379 if (!capable(CAP_NET_ADMIN))
380 return -EPERM;
381
382 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
383}
384NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
385
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700386static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val)
387{
Eric Dumazet7e417a62020-04-22 09:13:28 -0700388 WRITE_ONCE(dev->napi_defer_hard_irqs, val);
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700389 return 0;
390}
391
392static ssize_t napi_defer_hard_irqs_store(struct device *dev,
393 struct device_attribute *attr,
394 const char *buf, size_t len)
395{
396 if (!capable(CAP_NET_ADMIN))
397 return -EPERM;
398
399 return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs);
400}
401NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec);
402
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700403static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700404 const char *buf, size_t len)
405{
406 struct net_device *netdev = to_net_dev(dev);
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000407 struct net *net = dev_net(netdev);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700408 size_t count = len;
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800409 ssize_t ret = 0;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700410
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000411 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700412 return -EPERM;
413
414 /* ignore trailing newline */
415 if (len > 0 && buf[len - 1] == '\n')
416 --count;
417
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800418 if (!rtnl_trylock())
419 return restart_syscall();
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700420
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800421 if (dev_isalive(netdev)) {
422 ret = dev_set_alias(netdev, buf, count);
423 if (ret < 0)
424 goto err;
425 ret = len;
426 netdev_state_change(netdev);
427 }
428err:
429 rtnl_unlock();
430
431 return ret;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700432}
433
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700434static ssize_t ifalias_show(struct device *dev,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700435 struct device_attribute *attr, char *buf)
436{
437 const struct net_device *netdev = to_net_dev(dev);
Florian Westphal6c557002017-10-02 23:50:05 +0200438 char tmp[IFALIASZ];
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700439 ssize_t ret = 0;
440
Florian Westphal6c557002017-10-02 23:50:05 +0200441 ret = dev_get_alias(netdev, tmp, sizeof(tmp));
442 if (ret > 0)
443 ret = sprintf(buf, "%s\n", tmp);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700444 return ret;
445}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700446static DEVICE_ATTR_RW(ifalias);
Vlad Dogarua512b922011-01-24 03:37:29 +0000447
WANG Cong6b53daf2014-07-23 16:09:10 -0700448static int change_group(struct net_device *dev, unsigned long new_group)
Vlad Dogarua512b922011-01-24 03:37:29 +0000449{
stephen hemminger6648c652017-08-18 13:46:28 -0700450 dev_set_group(dev, (int)new_group);
Vlad Dogarua512b922011-01-24 03:37:29 +0000451 return 0;
452}
453
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700454static ssize_t group_store(struct device *dev, struct device_attribute *attr,
455 const char *buf, size_t len)
Vlad Dogarua512b922011-01-24 03:37:29 +0000456{
457 return netdev_store(dev, attr, buf, len, change_group);
458}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700459NETDEVICE_SHOW(group, fmt_dec);
Joe Perchesd6444062018-03-23 15:54:38 -0700460static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
Vlad Dogarua512b922011-01-24 03:37:29 +0000461
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700462static int change_proto_down(struct net_device *dev, unsigned long proto_down)
463{
stephen hemminger6648c652017-08-18 13:46:28 -0700464 return dev_change_proto_down(dev, (bool)proto_down);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700465}
466
467static ssize_t proto_down_store(struct device *dev,
468 struct device_attribute *attr,
469 const char *buf, size_t len)
470{
471 return netdev_store(dev, attr, buf, len, change_proto_down);
472}
473NETDEVICE_SHOW_RW(proto_down, fmt_dec);
474
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700475static ssize_t phys_port_id_show(struct device *dev,
Jiri Pirkoff80e512013-07-29 18:16:51 +0200476 struct device_attribute *attr, char *buf)
477{
478 struct net_device *netdev = to_net_dev(dev);
479 ssize_t ret = -EINVAL;
480
481 if (!rtnl_trylock())
482 return restart_syscall();
483
484 if (dev_isalive(netdev)) {
Jiri Pirko02637fc2014-11-28 14:34:16 +0100485 struct netdev_phys_item_id ppid;
Jiri Pirkoff80e512013-07-29 18:16:51 +0200486
487 ret = dev_get_phys_port_id(netdev, &ppid);
488 if (!ret)
489 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
490 }
491 rtnl_unlock();
492
493 return ret;
494}
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700495static DEVICE_ATTR_RO(phys_port_id);
Jiri Pirkoff80e512013-07-29 18:16:51 +0200496
David Aherndb24a902015-03-17 20:23:15 -0600497static ssize_t phys_port_name_show(struct device *dev,
498 struct device_attribute *attr, char *buf)
499{
500 struct net_device *netdev = to_net_dev(dev);
501 ssize_t ret = -EINVAL;
502
503 if (!rtnl_trylock())
504 return restart_syscall();
505
506 if (dev_isalive(netdev)) {
507 char name[IFNAMSIZ];
508
509 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
510 if (!ret)
511 ret = sprintf(buf, "%s\n", name);
512 }
513 rtnl_unlock();
514
515 return ret;
516}
517static DEVICE_ATTR_RO(phys_port_name);
518
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100519static ssize_t phys_switch_id_show(struct device *dev,
520 struct device_attribute *attr, char *buf)
521{
522 struct net_device *netdev = to_net_dev(dev);
523 ssize_t ret = -EINVAL;
524
525 if (!rtnl_trylock())
526 return restart_syscall();
527
528 if (dev_isalive(netdev)) {
Florian Fainellibccb3022019-02-06 09:45:46 -0800529 struct netdev_phys_item_id ppid = { };
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100530
Florian Fainellibccb3022019-02-06 09:45:46 -0800531 ret = dev_get_port_parent_id(netdev, &ppid, false);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100532 if (!ret)
Florian Fainellibccb3022019-02-06 09:45:46 -0800533 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100534 }
535 rtnl_unlock();
536
537 return ret;
538}
539static DEVICE_ATTR_RO(phys_switch_id);
540
stephen hemmingerec6cc592017-08-18 13:46:23 -0700541static struct attribute *net_class_attrs[] __ro_after_init = {
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700542 &dev_attr_netdev_group.attr,
543 &dev_attr_type.attr,
544 &dev_attr_dev_id.attr,
Amir Vadai3f859442014-02-25 18:17:50 +0200545 &dev_attr_dev_port.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700546 &dev_attr_iflink.attr,
547 &dev_attr_ifindex.attr,
Tom Gundersen685343f2014-07-14 16:37:22 +0200548 &dev_attr_name_assign_type.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700549 &dev_attr_addr_assign_type.attr,
550 &dev_attr_addr_len.attr,
551 &dev_attr_link_mode.attr,
552 &dev_attr_address.attr,
553 &dev_attr_broadcast.attr,
554 &dev_attr_speed.attr,
555 &dev_attr_duplex.attr,
556 &dev_attr_dormant.attr,
Andrew Lunndb30a572020-04-20 00:11:51 +0200557 &dev_attr_testing.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700558 &dev_attr_operstate.attr,
david decotigny2d3b4792014-03-29 09:48:35 -0700559 &dev_attr_carrier_changes.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700560 &dev_attr_ifalias.attr,
561 &dev_attr_carrier.attr,
562 &dev_attr_mtu.attr,
563 &dev_attr_flags.attr,
564 &dev_attr_tx_queue_len.attr,
Eric Dumazet3b47d302014-11-06 21:09:44 -0800565 &dev_attr_gro_flush_timeout.attr,
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700566 &dev_attr_napi_defer_hard_irqs.attr,
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700567 &dev_attr_phys_port_id.attr,
David Aherndb24a902015-03-17 20:23:15 -0600568 &dev_attr_phys_port_name.attr,
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100569 &dev_attr_phys_switch_id.attr,
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700570 &dev_attr_proto_down.attr,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800571 &dev_attr_carrier_up_count.attr,
572 &dev_attr_carrier_down_count.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700573 NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574};
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700575ATTRIBUTE_GROUPS(net_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577/* Show a given an attribute in the statistics group */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700578static ssize_t netstat_show(const struct device *d,
579 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 unsigned long offset)
581{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700582 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 ssize_t ret = -EINVAL;
584
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000585 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
stephen hemminger6648c652017-08-18 13:46:28 -0700586 offset % sizeof(u64) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 read_lock(&dev_base_lock);
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700589 if (dev_isalive(dev)) {
Eric Dumazet28172732010-07-07 14:58:56 -0700590 struct rtnl_link_stats64 temp;
591 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
592
stephen hemminger6648c652017-08-18 13:46:28 -0700593 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 read_unlock(&dev_base_lock);
596 return ret;
597}
598
599/* generate a read-only statistics attribute */
600#define NETSTAT_ENTRY(name) \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700601static ssize_t name##_show(struct device *d, \
stephen hemminger6648c652017-08-18 13:46:28 -0700602 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700604 return netstat_show(d, attr, buf, \
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000605 offsetof(struct rtnl_link_stats64, name)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700607static DEVICE_ATTR_RO(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609NETSTAT_ENTRY(rx_packets);
610NETSTAT_ENTRY(tx_packets);
611NETSTAT_ENTRY(rx_bytes);
612NETSTAT_ENTRY(tx_bytes);
613NETSTAT_ENTRY(rx_errors);
614NETSTAT_ENTRY(tx_errors);
615NETSTAT_ENTRY(rx_dropped);
616NETSTAT_ENTRY(tx_dropped);
617NETSTAT_ENTRY(multicast);
618NETSTAT_ENTRY(collisions);
619NETSTAT_ENTRY(rx_length_errors);
620NETSTAT_ENTRY(rx_over_errors);
621NETSTAT_ENTRY(rx_crc_errors);
622NETSTAT_ENTRY(rx_frame_errors);
623NETSTAT_ENTRY(rx_fifo_errors);
624NETSTAT_ENTRY(rx_missed_errors);
625NETSTAT_ENTRY(tx_aborted_errors);
626NETSTAT_ENTRY(tx_carrier_errors);
627NETSTAT_ENTRY(tx_fifo_errors);
628NETSTAT_ENTRY(tx_heartbeat_errors);
629NETSTAT_ENTRY(tx_window_errors);
630NETSTAT_ENTRY(rx_compressed);
631NETSTAT_ENTRY(tx_compressed);
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500632NETSTAT_ENTRY(rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
stephen hemmingerec6cc592017-08-18 13:46:23 -0700634static struct attribute *netstat_attrs[] __ro_after_init = {
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700635 &dev_attr_rx_packets.attr,
636 &dev_attr_tx_packets.attr,
637 &dev_attr_rx_bytes.attr,
638 &dev_attr_tx_bytes.attr,
639 &dev_attr_rx_errors.attr,
640 &dev_attr_tx_errors.attr,
641 &dev_attr_rx_dropped.attr,
642 &dev_attr_tx_dropped.attr,
643 &dev_attr_multicast.attr,
644 &dev_attr_collisions.attr,
645 &dev_attr_rx_length_errors.attr,
646 &dev_attr_rx_over_errors.attr,
647 &dev_attr_rx_crc_errors.attr,
648 &dev_attr_rx_frame_errors.attr,
649 &dev_attr_rx_fifo_errors.attr,
650 &dev_attr_rx_missed_errors.attr,
651 &dev_attr_tx_aborted_errors.attr,
652 &dev_attr_tx_carrier_errors.attr,
653 &dev_attr_tx_fifo_errors.attr,
654 &dev_attr_tx_heartbeat_errors.attr,
655 &dev_attr_tx_window_errors.attr,
656 &dev_attr_rx_compressed.attr,
657 &dev_attr_tx_compressed.attr,
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500658 &dev_attr_rx_nohandler.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 NULL
660};
661
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530662static const struct attribute_group netstat_group = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 .name = "statistics",
664 .attrs = netstat_attrs,
665};
Johannes Berg38c1a012012-11-16 20:46:19 +0100666
667#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
668static struct attribute *wireless_attrs[] = {
669 NULL
670};
671
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530672static const struct attribute_group wireless_group = {
Johannes Berg38c1a012012-11-16 20:46:19 +0100673 .name = "wireless",
674 .attrs = wireless_attrs,
675};
676#endif
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700677
678#else /* CONFIG_SYSFS */
679#define net_class_groups NULL
Eric W. Biedermand6523dd2010-05-16 21:59:45 -0700680#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Michael Daltona953be52014-01-16 22:23:28 -0800682#ifdef CONFIG_SYSFS
stephen hemminger6648c652017-08-18 13:46:28 -0700683#define to_rx_queue_attr(_attr) \
684 container_of(_attr, struct rx_queue_attribute, attr)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000685
686#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
687
688static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
689 char *buf)
690{
stephen hemminger667e4272017-08-18 13:46:27 -0700691 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000692 struct netdev_rx_queue *queue = to_rx_queue(kobj);
693
694 if (!attribute->show)
695 return -EIO;
696
stephen hemminger718ad682017-08-18 13:46:24 -0700697 return attribute->show(queue, buf);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000698}
699
700static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
701 const char *buf, size_t count)
702{
stephen hemminger667e4272017-08-18 13:46:27 -0700703 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000704 struct netdev_rx_queue *queue = to_rx_queue(kobj);
705
706 if (!attribute->store)
707 return -EIO;
708
stephen hemminger718ad682017-08-18 13:46:24 -0700709 return attribute->store(queue, buf, count);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000710}
711
stephen hemmingerfa50d642010-08-31 12:14:13 +0000712static const struct sysfs_ops rx_queue_sysfs_ops = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000713 .show = rx_queue_attr_show,
714 .store = rx_queue_attr_store,
715};
716
Michael Daltona953be52014-01-16 22:23:28 -0800717#ifdef CONFIG_RPS
stephen hemminger718ad682017-08-18 13:46:24 -0700718static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000719{
720 struct rps_map *map;
721 cpumask_var_t mask;
Tejun Heof0906822015-02-13 14:37:42 -0800722 int i, len;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000723
724 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
725 return -ENOMEM;
726
727 rcu_read_lock();
728 map = rcu_dereference(queue->rps_map);
729 if (map)
730 for (i = 0; i < map->len; i++)
731 cpumask_set_cpu(map->cpus[i], mask);
732
Tejun Heof0906822015-02-13 14:37:42 -0800733 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000734 rcu_read_unlock();
Tom Herbert0a9627f2010-03-16 08:03:29 +0000735 free_cpumask_var(mask);
Tejun Heof0906822015-02-13 14:37:42 -0800736
737 return len < PAGE_SIZE ? len : -EINVAL;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000738}
739
Eric Dumazetf5acb902010-04-19 14:40:57 -0700740static ssize_t store_rps_map(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700741 const char *buf, size_t len)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000742{
743 struct rps_map *old_map, *map;
744 cpumask_var_t mask;
Alex Belits07bbecb2020-06-25 18:34:43 -0400745 int err, cpu, i, hk_flags;
Sasha Levinda65ad12015-08-13 14:03:16 -0400746 static DEFINE_MUTEX(rps_map_mutex);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000747
748 if (!capable(CAP_NET_ADMIN))
749 return -EPERM;
750
751 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
752 return -ENOMEM;
753
754 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
755 if (err) {
756 free_cpumask_var(mask);
757 return err;
758 }
759
Eric Dumazet2e0d8fe2020-08-11 18:34:40 -0700760 if (!cpumask_empty(mask)) {
761 hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
762 cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
763 if (cpumask_empty(mask)) {
764 free_cpumask_var(mask);
765 return -EINVAL;
766 }
Alex Belits07bbecb2020-06-25 18:34:43 -0400767 }
768
Eric Dumazet95c96172012-04-15 05:58:06 +0000769 map = kzalloc(max_t(unsigned int,
stephen hemminger6648c652017-08-18 13:46:28 -0700770 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
771 GFP_KERNEL);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000772 if (!map) {
773 free_cpumask_var(mask);
774 return -ENOMEM;
775 }
776
777 i = 0;
778 for_each_cpu_and(cpu, mask, cpu_online_mask)
779 map->cpus[i++] = cpu;
780
stephen hemminger6648c652017-08-18 13:46:28 -0700781 if (i) {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000782 map->len = i;
stephen hemminger6648c652017-08-18 13:46:28 -0700783 } else {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000784 kfree(map);
785 map = NULL;
786 }
787
Sasha Levinda65ad12015-08-13 14:03:16 -0400788 mutex_lock(&rps_map_mutex);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000789 old_map = rcu_dereference_protected(queue->rps_map,
Sasha Levinda65ad12015-08-13 14:03:16 -0400790 mutex_is_locked(&rps_map_mutex));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000791 rcu_assign_pointer(queue->rps_map, map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000792
Eric Dumazetadc93002011-11-17 03:13:26 +0000793 if (map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700794 static_branch_inc(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700795 if (old_map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700796 static_branch_dec(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700797
Sasha Levinda65ad12015-08-13 14:03:16 -0400798 mutex_unlock(&rps_map_mutex);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700799
800 if (old_map)
801 kfree_rcu(old_map, rcu);
802
Tom Herbert0a9627f2010-03-16 08:03:29 +0000803 free_cpumask_var(mask);
804 return len;
805}
806
Tom Herbertfec5e652010-04-16 16:01:27 -0700807static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
Tom Herbertfec5e652010-04-16 16:01:27 -0700808 char *buf)
809{
810 struct rps_dev_flow_table *flow_table;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000811 unsigned long val = 0;
Tom Herbertfec5e652010-04-16 16:01:27 -0700812
813 rcu_read_lock();
814 flow_table = rcu_dereference(queue->rps_flow_table);
815 if (flow_table)
Eric Dumazet60b778c2011-12-24 06:56:49 +0000816 val = (unsigned long)flow_table->mask + 1;
Tom Herbertfec5e652010-04-16 16:01:27 -0700817 rcu_read_unlock();
818
Eric Dumazet60b778c2011-12-24 06:56:49 +0000819 return sprintf(buf, "%lu\n", val);
Tom Herbertfec5e652010-04-16 16:01:27 -0700820}
821
Tom Herbertfec5e652010-04-16 16:01:27 -0700822static void rps_dev_flow_table_release(struct rcu_head *rcu)
823{
824 struct rps_dev_flow_table *table = container_of(rcu,
825 struct rps_dev_flow_table, rcu);
Al Viro243198d2013-05-05 16:05:55 +0000826 vfree(table);
Tom Herbertfec5e652010-04-16 16:01:27 -0700827}
828
Eric Dumazetf5acb902010-04-19 14:40:57 -0700829static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700830 const char *buf, size_t len)
Tom Herbertfec5e652010-04-16 16:01:27 -0700831{
Eric Dumazet60b778c2011-12-24 06:56:49 +0000832 unsigned long mask, count;
Tom Herbertfec5e652010-04-16 16:01:27 -0700833 struct rps_dev_flow_table *table, *old_table;
834 static DEFINE_SPINLOCK(rps_dev_flow_lock);
Eric Dumazet60b778c2011-12-24 06:56:49 +0000835 int rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700836
837 if (!capable(CAP_NET_ADMIN))
838 return -EPERM;
839
Eric Dumazet60b778c2011-12-24 06:56:49 +0000840 rc = kstrtoul(buf, 0, &count);
841 if (rc < 0)
842 return rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700843
844 if (count) {
Eric Dumazet60b778c2011-12-24 06:56:49 +0000845 mask = count - 1;
846 /* mask = roundup_pow_of_two(count) - 1;
847 * without overflows...
848 */
849 while ((mask | (mask >> 1)) != mask)
850 mask |= (mask >> 1);
851 /* On 64 bit arches, must check mask fits in table->mask (u32),
stephen hemminger8e3bff92013-12-08 12:15:44 -0800852 * and on 32bit arches, must check
853 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
Eric Dumazet60b778c2011-12-24 06:56:49 +0000854 */
855#if BITS_PER_LONG > 32
856 if (mask > (unsigned long)(u32)mask)
Xi Wanga0a129f2011-12-22 13:35:22 +0000857 return -EINVAL;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000858#else
859 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
Xi Wanga0a129f2011-12-22 13:35:22 +0000860 / sizeof(struct rps_dev_flow)) {
Tom Herbertfec5e652010-04-16 16:01:27 -0700861 /* Enforce a limit to prevent overflow */
862 return -EINVAL;
863 }
Eric Dumazet60b778c2011-12-24 06:56:49 +0000864#endif
865 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
Tom Herbertfec5e652010-04-16 16:01:27 -0700866 if (!table)
867 return -ENOMEM;
868
Eric Dumazet60b778c2011-12-24 06:56:49 +0000869 table->mask = mask;
870 for (count = 0; count <= mask; count++)
871 table->flows[count].cpu = RPS_NO_CPU;
stephen hemminger6648c652017-08-18 13:46:28 -0700872 } else {
Tom Herbertfec5e652010-04-16 16:01:27 -0700873 table = NULL;
stephen hemminger6648c652017-08-18 13:46:28 -0700874 }
Tom Herbertfec5e652010-04-16 16:01:27 -0700875
876 spin_lock(&rps_dev_flow_lock);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000877 old_table = rcu_dereference_protected(queue->rps_flow_table,
878 lockdep_is_held(&rps_dev_flow_lock));
Tom Herbertfec5e652010-04-16 16:01:27 -0700879 rcu_assign_pointer(queue->rps_flow_table, table);
880 spin_unlock(&rps_dev_flow_lock);
881
882 if (old_table)
883 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
884
885 return len;
886}
887
stephen hemminger667e4272017-08-18 13:46:27 -0700888static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700889 = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000890
stephen hemminger667e4272017-08-18 13:46:27 -0700891static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700892 = __ATTR(rps_flow_cnt, 0644,
stephen hemminger667e4272017-08-18 13:46:27 -0700893 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
Michael Daltona953be52014-01-16 22:23:28 -0800894#endif /* CONFIG_RPS */
Tom Herbertfec5e652010-04-16 16:01:27 -0700895
stephen hemminger667e4272017-08-18 13:46:27 -0700896static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
Michael Daltona953be52014-01-16 22:23:28 -0800897#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000898 &rps_cpus_attribute.attr,
Tom Herbertfec5e652010-04-16 16:01:27 -0700899 &rps_dev_flow_table_cnt_attribute.attr,
Michael Daltona953be52014-01-16 22:23:28 -0800900#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000901 NULL
902};
Kimberly Brownbe0d6922019-04-01 22:51:35 -0400903ATTRIBUTE_GROUPS(rx_queue_default);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000904
905static void rx_queue_release(struct kobject *kobj)
906{
907 struct netdev_rx_queue *queue = to_rx_queue(kobj);
Michael Daltona953be52014-01-16 22:23:28 -0800908#ifdef CONFIG_RPS
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000909 struct rps_map *map;
910 struct rps_dev_flow_table *flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000911
Eric Dumazet33d480c2011-08-11 19:30:52 +0000912 map = rcu_dereference_protected(queue->rps_map, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000913 if (map) {
914 RCU_INIT_POINTER(queue->rps_map, NULL);
Lai Jiangshanf6f80232011-03-18 12:01:31 +0800915 kfree_rcu(map, rcu);
John Fastabend9ea19482010-11-16 06:31:39 +0000916 }
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000917
Eric Dumazet33d480c2011-08-11 19:30:52 +0000918 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000919 if (flow_table) {
920 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000921 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
John Fastabend9ea19482010-11-16 06:31:39 +0000922 }
Michael Daltona953be52014-01-16 22:23:28 -0800923#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000924
John Fastabend9ea19482010-11-16 06:31:39 +0000925 memset(kobj, 0, sizeof(*kobj));
Tom Herbertfe822242010-11-09 10:47:38 +0000926 dev_put(queue->dev);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000927}
928
Weilong Chen82ef3d52014-01-16 17:24:31 +0800929static const void *rx_queue_namespace(struct kobject *kobj)
930{
931 struct netdev_rx_queue *queue = to_rx_queue(kobj);
932 struct device *dev = &queue->dev->dev;
933 const void *ns = NULL;
934
935 if (dev->class && dev->class->ns_type)
936 ns = dev->class->namespace(dev);
937
938 return ns;
939}
940
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +0000941static void rx_queue_get_ownership(struct kobject *kobj,
942 kuid_t *uid, kgid_t *gid)
943{
944 const struct net *net = rx_queue_namespace(kobj);
945
946 net_ns_get_ownership(net, uid, gid);
947}
948
stephen hemminger667e4272017-08-18 13:46:27 -0700949static struct kobj_type rx_queue_ktype __ro_after_init = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000950 .sysfs_ops = &rx_queue_sysfs_ops,
951 .release = rx_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -0400952 .default_groups = rx_queue_default_groups,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +0000953 .namespace = rx_queue_namespace,
954 .get_ownership = rx_queue_get_ownership,
Tom Herbert0a9627f2010-03-16 08:03:29 +0000955};
956
WANG Cong6b53daf2014-07-23 16:09:10 -0700957static int rx_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000958{
WANG Cong6b53daf2014-07-23 16:09:10 -0700959 struct netdev_rx_queue *queue = dev->_rx + index;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000960 struct kobject *kobj = &queue->kobj;
961 int error = 0;
962
Jouni Hoganderddd9b5e2019-12-17 13:46:34 +0200963 /* Kobject_put later will trigger rx_queue_release call which
964 * decreases dev refcount: Take that reference here
965 */
966 dev_hold(queue->dev);
967
WANG Cong6b53daf2014-07-23 16:09:10 -0700968 kobj->kset = dev->queues_kset;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000969 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -0700970 "rx-%u", index);
Michael Daltona953be52014-01-16 22:23:28 -0800971 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200972 goto err;
Michael Daltona953be52014-01-16 22:23:28 -0800973
WANG Cong6b53daf2014-07-23 16:09:10 -0700974 if (dev->sysfs_rx_queue_group) {
975 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200976 if (error)
977 goto err;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000978 }
979
980 kobject_uevent(kobj, KOBJ_ADD);
981
982 return error;
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200983
984err:
985 kobject_put(kobj);
986 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000987}
Christian Braunerd7554072020-02-27 04:37:18 +0100988
989static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid,
990 kgid_t kgid)
991{
992 struct netdev_rx_queue *queue = dev->_rx + index;
993 struct kobject *kobj = &queue->kobj;
994 int error;
995
996 error = sysfs_change_owner(kobj, kuid, kgid);
997 if (error)
998 return error;
999
1000 if (dev->sysfs_rx_queue_group)
1001 error = sysfs_group_change_owner(
1002 kobj, dev->sysfs_rx_queue_group, kuid, kgid);
1003
1004 return error;
1005}
Paul Bolle80dd6ea2014-02-09 14:07:11 +01001006#endif /* CONFIG_SYSFS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001007
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001008int
WANG Cong6b53daf2014-07-23 16:09:10 -07001009net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001010{
Michael Daltona953be52014-01-16 22:23:28 -08001011#ifdef CONFIG_SYSFS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001012 int i;
1013 int error = 0;
1014
Michael Daltona953be52014-01-16 22:23:28 -08001015#ifndef CONFIG_RPS
WANG Cong6b53daf2014-07-23 16:09:10 -07001016 if (!dev->sysfs_rx_queue_group)
Michael Daltona953be52014-01-16 22:23:28 -08001017 return 0;
1018#endif
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001019 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001020 error = rx_queue_add_kobject(dev, i);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001021 if (error) {
1022 new_num = old_num;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001023 break;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001024 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001025 }
1026
Michael Daltona953be52014-01-16 22:23:28 -08001027 while (--i >= new_num) {
Andrey Vagin002d8a12016-10-24 19:09:53 -07001028 struct kobject *kobj = &dev->_rx[i].kobj;
1029
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001030 if (!refcount_read(&dev_net(dev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001031 kobj->uevent_suppress = 1;
WANG Cong6b53daf2014-07-23 16:09:10 -07001032 if (dev->sysfs_rx_queue_group)
Andrey Vagin002d8a12016-10-24 19:09:53 -07001033 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
1034 kobject_put(kobj);
Michael Daltona953be52014-01-16 22:23:28 -08001035 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001036
1037 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001038#else
1039 return 0;
1040#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001041}
1042
Christian Braunerd7554072020-02-27 04:37:18 +01001043static int net_rx_queue_change_owner(struct net_device *dev, int num,
1044 kuid_t kuid, kgid_t kgid)
1045{
1046#ifdef CONFIG_SYSFS
1047 int error = 0;
1048 int i;
1049
1050#ifndef CONFIG_RPS
1051 if (!dev->sysfs_rx_queue_group)
1052 return 0;
1053#endif
1054 for (i = 0; i < num; i++) {
1055 error = rx_queue_change_owner(dev, i, kuid, kgid);
1056 if (error)
1057 break;
1058 }
1059
1060 return error;
1061#else
1062 return 0;
1063#endif
1064}
1065
david decotignyccf5ff62011-11-16 12:15:10 +00001066#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001067/*
1068 * netdev_queue sysfs structures and functions.
1069 */
1070struct netdev_queue_attribute {
1071 struct attribute attr;
stephen hemminger718ad682017-08-18 13:46:24 -07001072 ssize_t (*show)(struct netdev_queue *queue, char *buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001073 ssize_t (*store)(struct netdev_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -07001074 const char *buf, size_t len);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001075};
stephen hemminger6648c652017-08-18 13:46:28 -07001076#define to_netdev_queue_attr(_attr) \
1077 container_of(_attr, struct netdev_queue_attribute, attr)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001078
1079#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
1080
1081static ssize_t netdev_queue_attr_show(struct kobject *kobj,
1082 struct attribute *attr, char *buf)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001083{
stephen hemminger667e4272017-08-18 13:46:27 -07001084 const struct netdev_queue_attribute *attribute
1085 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001086 struct netdev_queue *queue = to_netdev_queue(kobj);
1087
1088 if (!attribute->show)
1089 return -EIO;
1090
stephen hemminger718ad682017-08-18 13:46:24 -07001091 return attribute->show(queue, buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001092}
1093
1094static ssize_t netdev_queue_attr_store(struct kobject *kobj,
1095 struct attribute *attr,
1096 const char *buf, size_t count)
1097{
stephen hemminger667e4272017-08-18 13:46:27 -07001098 const struct netdev_queue_attribute *attribute
1099 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001100 struct netdev_queue *queue = to_netdev_queue(kobj);
1101
1102 if (!attribute->store)
1103 return -EIO;
1104
stephen hemminger718ad682017-08-18 13:46:24 -07001105 return attribute->store(queue, buf, count);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001106}
1107
1108static const struct sysfs_ops netdev_queue_sysfs_ops = {
1109 .show = netdev_queue_attr_show,
1110 .store = netdev_queue_attr_store,
1111};
1112
stephen hemminger2b9c7582017-08-18 13:46:26 -07001113static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
david decotignyccf5ff62011-11-16 12:15:10 +00001114{
1115 unsigned long trans_timeout;
1116
1117 spin_lock_irq(&queue->_xmit_lock);
1118 trans_timeout = queue->trans_timeout;
1119 spin_unlock_irq(&queue->_xmit_lock);
1120
Xiongfeng Wang9bb5fbe2020-07-21 15:02:57 +08001121 return sprintf(buf, fmt_ulong, trans_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001122}
1123
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001124static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
John Fastabend822b3b22015-03-18 14:57:33 +02001125{
1126 struct net_device *dev = queue->dev;
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001127 unsigned int i;
John Fastabend822b3b22015-03-18 14:57:33 +02001128
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001129 i = queue - dev->_tx;
John Fastabend822b3b22015-03-18 14:57:33 +02001130 BUG_ON(i >= dev->num_tx_queues);
1131
1132 return i;
1133}
1134
stephen hemminger2b9c7582017-08-18 13:46:26 -07001135static ssize_t traffic_class_show(struct netdev_queue *queue,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001136 char *buf)
1137{
1138 struct net_device *dev = queue->dev;
Alexander Duyckd7be9772018-07-09 12:19:32 -04001139 int index;
1140 int tc;
Alexander Duyck8d059b02016-10-28 11:43:49 -04001141
Alexander Duyckd7be9772018-07-09 12:19:32 -04001142 if (!netif_is_multiqueue(dev))
1143 return -ENOENT;
1144
1145 index = get_netdev_queue_index(queue);
Alexander Duyckffcfe252018-07-09 12:19:38 -04001146
1147 /* If queue belongs to subordinate dev use its TC mapping */
1148 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1149
Alexander Duyckd7be9772018-07-09 12:19:32 -04001150 tc = netdev_txq_to_tc(dev, index);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001151 if (tc < 0)
1152 return -EINVAL;
1153
Alexander Duyckffcfe252018-07-09 12:19:38 -04001154 /* We can report the traffic class one of two ways:
1155 * Subordinate device traffic classes are reported with the traffic
1156 * class first, and then the subordinate class so for example TC0 on
1157 * subordinate device 2 will be reported as "0-2". If the queue
1158 * belongs to the root device it will be reported with just the
1159 * traffic class, so just "0" for TC 0 for example.
1160 */
Ye Bin000fe262020-09-30 09:08:38 +08001161 return dev->num_tc < 0 ? sprintf(buf, "%d%d\n", tc, dev->num_tc) :
1162 sprintf(buf, "%d\n", tc);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001163}
1164
1165#ifdef CONFIG_XPS
stephen hemminger2b9c7582017-08-18 13:46:26 -07001166static ssize_t tx_maxrate_show(struct netdev_queue *queue,
John Fastabend822b3b22015-03-18 14:57:33 +02001167 char *buf)
1168{
1169 return sprintf(buf, "%lu\n", queue->tx_maxrate);
1170}
1171
stephen hemminger2b9c7582017-08-18 13:46:26 -07001172static ssize_t tx_maxrate_store(struct netdev_queue *queue,
1173 const char *buf, size_t len)
John Fastabend822b3b22015-03-18 14:57:33 +02001174{
1175 struct net_device *dev = queue->dev;
1176 int err, index = get_netdev_queue_index(queue);
1177 u32 rate = 0;
1178
Tyler Hicks3033fce2018-07-20 21:56:51 +00001179 if (!capable(CAP_NET_ADMIN))
1180 return -EPERM;
1181
John Fastabend822b3b22015-03-18 14:57:33 +02001182 err = kstrtou32(buf, 10, &rate);
1183 if (err < 0)
1184 return err;
1185
1186 if (!rtnl_trylock())
1187 return restart_syscall();
1188
1189 err = -EOPNOTSUPP;
1190 if (dev->netdev_ops->ndo_set_tx_maxrate)
1191 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1192
1193 rtnl_unlock();
1194 if (!err) {
1195 queue->tx_maxrate = rate;
1196 return len;
1197 }
1198 return err;
1199}
1200
stephen hemminger2b9c7582017-08-18 13:46:26 -07001201static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1202 = __ATTR_RW(tx_maxrate);
John Fastabend822b3b22015-03-18 14:57:33 +02001203#endif
1204
stephen hemminger2b9c7582017-08-18 13:46:26 -07001205static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1206 = __ATTR_RO(tx_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001207
stephen hemminger2b9c7582017-08-18 13:46:26 -07001208static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1209 = __ATTR_RO(traffic_class);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001210
Tom Herbert114cf582011-11-28 16:33:09 +00001211#ifdef CONFIG_BQL
1212/*
1213 * Byte queue limits sysfs structures and functions.
1214 */
1215static ssize_t bql_show(char *buf, unsigned int value)
1216{
1217 return sprintf(buf, "%u\n", value);
1218}
1219
1220static ssize_t bql_set(const char *buf, const size_t count,
1221 unsigned int *pvalue)
1222{
1223 unsigned int value;
1224 int err;
1225
stephen hemminger6648c652017-08-18 13:46:28 -07001226 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
Tom Herbert114cf582011-11-28 16:33:09 +00001227 value = DQL_MAX_LIMIT;
stephen hemminger6648c652017-08-18 13:46:28 -07001228 } else {
Tom Herbert114cf582011-11-28 16:33:09 +00001229 err = kstrtouint(buf, 10, &value);
1230 if (err < 0)
1231 return err;
1232 if (value > DQL_MAX_LIMIT)
1233 return -EINVAL;
1234 }
1235
1236 *pvalue = value;
1237
1238 return count;
1239}
1240
1241static ssize_t bql_show_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001242 char *buf)
1243{
1244 struct dql *dql = &queue->dql;
1245
1246 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1247}
1248
1249static ssize_t bql_set_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001250 const char *buf, size_t len)
1251{
1252 struct dql *dql = &queue->dql;
Eric Dumazet95c96172012-04-15 05:58:06 +00001253 unsigned int value;
Tom Herbert114cf582011-11-28 16:33:09 +00001254 int err;
1255
1256 err = kstrtouint(buf, 10, &value);
1257 if (err < 0)
1258 return err;
1259
1260 dql->slack_hold_time = msecs_to_jiffies(value);
1261
1262 return len;
1263}
1264
stephen hemminger170c6582017-08-18 13:46:25 -07001265static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -07001266 = __ATTR(hold_time, 0644,
stephen hemminger170c6582017-08-18 13:46:25 -07001267 bql_show_hold_time, bql_set_hold_time);
Tom Herbert114cf582011-11-28 16:33:09 +00001268
1269static ssize_t bql_show_inflight(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001270 char *buf)
1271{
1272 struct dql *dql = &queue->dql;
1273
1274 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1275}
1276
stephen hemminger170c6582017-08-18 13:46:25 -07001277static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
Joe Perchesd6444062018-03-23 15:54:38 -07001278 __ATTR(inflight, 0444, bql_show_inflight, NULL);
Tom Herbert114cf582011-11-28 16:33:09 +00001279
1280#define BQL_ATTR(NAME, FIELD) \
1281static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001282 char *buf) \
1283{ \
1284 return bql_show(buf, queue->dql.FIELD); \
1285} \
1286 \
1287static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001288 const char *buf, size_t len) \
1289{ \
1290 return bql_set(buf, len, &queue->dql.FIELD); \
1291} \
1292 \
stephen hemminger170c6582017-08-18 13:46:25 -07001293static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
Joe Perchesd6444062018-03-23 15:54:38 -07001294 = __ATTR(NAME, 0644, \
stephen hemminger170c6582017-08-18 13:46:25 -07001295 bql_show_ ## NAME, bql_set_ ## NAME)
Tom Herbert114cf582011-11-28 16:33:09 +00001296
stephen hemminger170c6582017-08-18 13:46:25 -07001297BQL_ATTR(limit, limit);
1298BQL_ATTR(limit_max, max_limit);
1299BQL_ATTR(limit_min, min_limit);
Tom Herbert114cf582011-11-28 16:33:09 +00001300
stephen hemminger170c6582017-08-18 13:46:25 -07001301static struct attribute *dql_attrs[] __ro_after_init = {
Tom Herbert114cf582011-11-28 16:33:09 +00001302 &bql_limit_attribute.attr,
1303 &bql_limit_max_attribute.attr,
1304 &bql_limit_min_attribute.attr,
1305 &bql_hold_time_attribute.attr,
1306 &bql_inflight_attribute.attr,
1307 NULL
1308};
1309
Arvind Yadav38ef00c2017-06-29 16:31:26 +05301310static const struct attribute_group dql_group = {
Tom Herbert114cf582011-11-28 16:33:09 +00001311 .name = "byte_queue_limits",
1312 .attrs = dql_attrs,
1313};
1314#endif /* CONFIG_BQL */
1315
david decotignyccf5ff62011-11-16 12:15:10 +00001316#ifdef CONFIG_XPS
stephen hemminger2b9c7582017-08-18 13:46:26 -07001317static ssize_t xps_cpus_show(struct netdev_queue *queue,
1318 char *buf)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001319{
Antoine Tenartfb250382020-12-23 22:23:21 +01001320 int cpu, len, ret, num_tc = 1, tc = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001321 struct net_device *dev = queue->dev;
1322 struct xps_dev_maps *dev_maps;
1323 cpumask_var_t mask;
1324 unsigned long index;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001325
Alexander Duyckd7be9772018-07-09 12:19:32 -04001326 if (!netif_is_multiqueue(dev))
1327 return -ENOENT;
1328
Tom Herbert1d24eb42010-11-21 13:17:27 +00001329 index = get_netdev_queue_index(queue);
1330
Antoine Tenartfb250382020-12-23 22:23:21 +01001331 if (!rtnl_trylock())
1332 return restart_syscall();
1333
Alexander Duyck184c4492016-10-28 11:50:13 -04001334 if (dev->num_tc) {
Alexander Duyckffcfe252018-07-09 12:19:38 -04001335 /* Do not allow XPS on subordinate device directly */
Alexander Duyck184c4492016-10-28 11:50:13 -04001336 num_tc = dev->num_tc;
Antoine Tenartfb250382020-12-23 22:23:21 +01001337 if (num_tc < 0) {
1338 ret = -EINVAL;
1339 goto err_rtnl_unlock;
1340 }
Alexander Duyckffcfe252018-07-09 12:19:38 -04001341
1342 /* If queue belongs to subordinate dev use its map */
1343 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1344
Alexander Duyck184c4492016-10-28 11:50:13 -04001345 tc = netdev_txq_to_tc(dev, index);
Antoine Tenartfb250382020-12-23 22:23:21 +01001346 if (tc < 0) {
1347 ret = -EINVAL;
1348 goto err_rtnl_unlock;
1349 }
Alexander Duyck184c4492016-10-28 11:50:13 -04001350 }
1351
Antoine Tenartfb250382020-12-23 22:23:21 +01001352 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
1353 ret = -ENOMEM;
1354 goto err_rtnl_unlock;
1355 }
Alexander Duyck664088f2018-05-31 15:59:46 -04001356
Tom Herbert1d24eb42010-11-21 13:17:27 +00001357 rcu_read_lock();
Amritha Nambiar80d19662018-06-29 21:26:41 -07001358 dev_maps = rcu_dereference(dev->xps_cpus_map);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001359 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04001360 for_each_possible_cpu(cpu) {
1361 int i, tci = cpu * num_tc + tc;
1362 struct xps_map *map;
1363
Amritha Nambiar80d19662018-06-29 21:26:41 -07001364 map = rcu_dereference(dev_maps->attr_map[tci]);
Alexander Duyck184c4492016-10-28 11:50:13 -04001365 if (!map)
1366 continue;
1367
1368 for (i = map->len; i--;) {
1369 if (map->queues[i] == index) {
1370 cpumask_set_cpu(cpu, mask);
1371 break;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001372 }
1373 }
1374 }
1375 }
1376 rcu_read_unlock();
1377
Antoine Tenartfb250382020-12-23 22:23:21 +01001378 rtnl_unlock();
1379
Tejun Heof0906822015-02-13 14:37:42 -08001380 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
Tom Herbert1d24eb42010-11-21 13:17:27 +00001381 free_cpumask_var(mask);
Tejun Heof0906822015-02-13 14:37:42 -08001382 return len < PAGE_SIZE ? len : -EINVAL;
Antoine Tenartfb250382020-12-23 22:23:21 +01001383
1384err_rtnl_unlock:
1385 rtnl_unlock();
1386 return ret;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001387}
1388
stephen hemminger2b9c7582017-08-18 13:46:26 -07001389static ssize_t xps_cpus_store(struct netdev_queue *queue,
1390 const char *buf, size_t len)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001391{
1392 struct net_device *dev = queue->dev;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001393 unsigned long index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001394 cpumask_var_t mask;
1395 int err;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001396
Alexander Duyckd7be9772018-07-09 12:19:32 -04001397 if (!netif_is_multiqueue(dev))
1398 return -ENOENT;
1399
Tom Herbert1d24eb42010-11-21 13:17:27 +00001400 if (!capable(CAP_NET_ADMIN))
1401 return -EPERM;
1402
1403 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1404 return -ENOMEM;
1405
1406 index = get_netdev_queue_index(queue);
1407
1408 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1409 if (err) {
1410 free_cpumask_var(mask);
1411 return err;
1412 }
1413
Antoine Tenart1ad582252020-12-23 22:23:20 +01001414 if (!rtnl_trylock()) {
1415 free_cpumask_var(mask);
1416 return restart_syscall();
1417 }
1418
Alexander Duyck537c00d2013-01-10 08:57:02 +00001419 err = netif_set_xps_queue(dev, mask, index);
Antoine Tenart1ad582252020-12-23 22:23:20 +01001420 rtnl_unlock();
Tom Herbert1d24eb42010-11-21 13:17:27 +00001421
1422 free_cpumask_var(mask);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001423
Alexander Duyck537c00d2013-01-10 08:57:02 +00001424 return err ? : len;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001425}
1426
stephen hemminger2b9c7582017-08-18 13:46:26 -07001427static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1428 = __ATTR_RW(xps_cpus);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001429
1430static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
1431{
1432 struct net_device *dev = queue->dev;
1433 struct xps_dev_maps *dev_maps;
1434 unsigned long *mask, index;
1435 int j, len, num_tc = 1, tc = 0;
1436
1437 index = get_netdev_queue_index(queue);
1438
1439 if (dev->num_tc) {
1440 num_tc = dev->num_tc;
1441 tc = netdev_txq_to_tc(dev, index);
1442 if (tc < 0)
1443 return -EINVAL;
1444 }
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001445 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001446 if (!mask)
1447 return -ENOMEM;
1448
1449 rcu_read_lock();
1450 dev_maps = rcu_dereference(dev->xps_rxqs_map);
1451 if (!dev_maps)
1452 goto out_no_maps;
1453
1454 for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
1455 j < dev->num_rx_queues;) {
1456 int i, tci = j * num_tc + tc;
1457 struct xps_map *map;
1458
1459 map = rcu_dereference(dev_maps->attr_map[tci]);
1460 if (!map)
1461 continue;
1462
1463 for (i = map->len; i--;) {
1464 if (map->queues[i] == index) {
1465 set_bit(j, mask);
1466 break;
1467 }
1468 }
1469 }
1470out_no_maps:
1471 rcu_read_unlock();
1472
1473 len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001474 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001475
1476 return len < PAGE_SIZE ? len : -EINVAL;
1477}
1478
1479static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
1480 size_t len)
1481{
1482 struct net_device *dev = queue->dev;
1483 struct net *net = dev_net(dev);
1484 unsigned long *mask, index;
1485 int err;
1486
1487 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1488 return -EPERM;
1489
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001490 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001491 if (!mask)
1492 return -ENOMEM;
1493
1494 index = get_netdev_queue_index(queue);
1495
1496 err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
1497 if (err) {
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001498 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001499 return err;
1500 }
1501
Antoine Tenart2d57b4f2020-12-23 22:23:22 +01001502 if (!rtnl_trylock()) {
1503 bitmap_free(mask);
1504 return restart_syscall();
1505 }
1506
Andrei Vagin4d99f662018-08-08 20:07:35 -07001507 cpus_read_lock();
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001508 err = __netif_set_xps_queue(dev, mask, index, true);
Andrei Vagin4d99f662018-08-08 20:07:35 -07001509 cpus_read_unlock();
1510
Antoine Tenart2d57b4f2020-12-23 22:23:22 +01001511 rtnl_unlock();
1512
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001513 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001514 return err ? : len;
1515}
1516
1517static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
1518 = __ATTR_RW(xps_rxqs);
david decotignyccf5ff62011-11-16 12:15:10 +00001519#endif /* CONFIG_XPS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001520
stephen hemminger2b9c7582017-08-18 13:46:26 -07001521static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
david decotignyccf5ff62011-11-16 12:15:10 +00001522 &queue_trans_timeout.attr,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001523 &queue_traffic_class.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001524#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001525 &xps_cpus_attribute.attr,
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001526 &xps_rxqs_attribute.attr,
John Fastabend822b3b22015-03-18 14:57:33 +02001527 &queue_tx_maxrate.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001528#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001529 NULL
1530};
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001531ATTRIBUTE_GROUPS(netdev_queue_default);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001532
1533static void netdev_queue_release(struct kobject *kobj)
1534{
1535 struct netdev_queue *queue = to_netdev_queue(kobj);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001536
Tom Herbert1d24eb42010-11-21 13:17:27 +00001537 memset(kobj, 0, sizeof(*kobj));
1538 dev_put(queue->dev);
1539}
1540
Weilong Chen82ef3d52014-01-16 17:24:31 +08001541static const void *netdev_queue_namespace(struct kobject *kobj)
1542{
1543 struct netdev_queue *queue = to_netdev_queue(kobj);
1544 struct device *dev = &queue->dev->dev;
1545 const void *ns = NULL;
1546
1547 if (dev->class && dev->class->ns_type)
1548 ns = dev->class->namespace(dev);
1549
1550 return ns;
1551}
1552
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001553static void netdev_queue_get_ownership(struct kobject *kobj,
1554 kuid_t *uid, kgid_t *gid)
1555{
1556 const struct net *net = netdev_queue_namespace(kobj);
1557
1558 net_ns_get_ownership(net, uid, gid);
1559}
1560
stephen hemminger2b9c7582017-08-18 13:46:26 -07001561static struct kobj_type netdev_queue_ktype __ro_after_init = {
Tom Herbert1d24eb42010-11-21 13:17:27 +00001562 .sysfs_ops = &netdev_queue_sysfs_ops,
1563 .release = netdev_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001564 .default_groups = netdev_queue_default_groups,
Weilong Chen82ef3d52014-01-16 17:24:31 +08001565 .namespace = netdev_queue_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001566 .get_ownership = netdev_queue_get_ownership,
Tom Herbert1d24eb42010-11-21 13:17:27 +00001567};
1568
WANG Cong6b53daf2014-07-23 16:09:10 -07001569static int netdev_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001570{
WANG Cong6b53daf2014-07-23 16:09:10 -07001571 struct netdev_queue *queue = dev->_tx + index;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001572 struct kobject *kobj = &queue->kobj;
1573 int error = 0;
1574
Jouni Hogandere0b609032019-12-05 15:57:07 +02001575 /* Kobject_put later will trigger netdev_queue_release call
1576 * which decreases dev refcount: Take that reference here
1577 */
1578 dev_hold(queue->dev);
1579
WANG Cong6b53daf2014-07-23 16:09:10 -07001580 kobj->kset = dev->queues_kset;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001581 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -07001582 "tx-%u", index);
Tom Herbert114cf582011-11-28 16:33:09 +00001583 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001584 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001585
1586#ifdef CONFIG_BQL
1587 error = sysfs_create_group(kobj, &dql_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001588 if (error)
1589 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001590#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001591
1592 kobject_uevent(kobj, KOBJ_ADD);
Eric Dumazet48a322b2019-11-20 19:19:07 -08001593 return 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001594
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001595err:
1596 kobject_put(kobj);
1597 return error;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001598}
Christian Braunerd7554072020-02-27 04:37:18 +01001599
1600static int tx_queue_change_owner(struct net_device *ndev, int index,
1601 kuid_t kuid, kgid_t kgid)
1602{
1603 struct netdev_queue *queue = ndev->_tx + index;
1604 struct kobject *kobj = &queue->kobj;
1605 int error;
1606
1607 error = sysfs_change_owner(kobj, kuid, kgid);
1608 if (error)
1609 return error;
1610
1611#ifdef CONFIG_BQL
1612 error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
1613#endif
1614 return error;
1615}
david decotignyccf5ff62011-11-16 12:15:10 +00001616#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001617
1618int
WANG Cong6b53daf2014-07-23 16:09:10 -07001619netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001620{
david decotignyccf5ff62011-11-16 12:15:10 +00001621#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001622 int i;
1623 int error = 0;
1624
1625 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001626 error = netdev_queue_add_kobject(dev, i);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001627 if (error) {
1628 new_num = old_num;
1629 break;
1630 }
1631 }
1632
Tom Herbert114cf582011-11-28 16:33:09 +00001633 while (--i >= new_num) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001634 struct netdev_queue *queue = dev->_tx + i;
Tom Herbert114cf582011-11-28 16:33:09 +00001635
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001636 if (!refcount_read(&dev_net(dev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001637 queue->kobj.uevent_suppress = 1;
Tom Herbert114cf582011-11-28 16:33:09 +00001638#ifdef CONFIG_BQL
1639 sysfs_remove_group(&queue->kobj, &dql_group);
1640#endif
1641 kobject_put(&queue->kobj);
1642 }
Tom Herbert1d24eb42010-11-21 13:17:27 +00001643
1644 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001645#else
1646 return 0;
david decotignyccf5ff62011-11-16 12:15:10 +00001647#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001648}
1649
Christian Braunerd7554072020-02-27 04:37:18 +01001650static int net_tx_queue_change_owner(struct net_device *dev, int num,
1651 kuid_t kuid, kgid_t kgid)
1652{
1653#ifdef CONFIG_SYSFS
1654 int error = 0;
1655 int i;
1656
1657 for (i = 0; i < num; i++) {
1658 error = tx_queue_change_owner(dev, i, kuid, kgid);
1659 if (error)
1660 break;
1661 }
1662
1663 return error;
1664#else
1665 return 0;
1666#endif /* CONFIG_SYSFS */
1667}
1668
WANG Cong6b53daf2014-07-23 16:09:10 -07001669static int register_queue_kobjects(struct net_device *dev)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001670{
Tom Herbertbf264142010-11-26 08:36:09 +00001671 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001672
david decotignyccf5ff62011-11-16 12:15:10 +00001673#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001674 dev->queues_kset = kset_create_and_add("queues",
stephen hemminger6648c652017-08-18 13:46:28 -07001675 NULL, &dev->dev.kobj);
WANG Cong6b53daf2014-07-23 16:09:10 -07001676 if (!dev->queues_kset)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001677 return -ENOMEM;
WANG Cong6b53daf2014-07-23 16:09:10 -07001678 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001679#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001680 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001681
WANG Cong6b53daf2014-07-23 16:09:10 -07001682 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001683 if (error)
1684 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001685 rxq = real_rx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001686
WANG Cong6b53daf2014-07-23 16:09:10 -07001687 error = netdev_queue_update_kobjects(dev, 0, real_tx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001688 if (error)
1689 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001690 txq = real_tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001691
1692 return 0;
1693
1694error:
WANG Cong6b53daf2014-07-23 16:09:10 -07001695 netdev_queue_update_kobjects(dev, txq, 0);
1696 net_rx_queue_update_kobjects(dev, rxq, 0);
YueHaibing895a5e92019-03-02 10:34:55 +08001697#ifdef CONFIG_SYSFS
1698 kset_unregister(dev->queues_kset);
1699#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001700 return error;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001701}
1702
Christian Braunerd7554072020-02-27 04:37:18 +01001703static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid)
1704{
1705 int error = 0, real_rx = 0, real_tx = 0;
1706
1707#ifdef CONFIG_SYSFS
1708 if (ndev->queues_kset) {
1709 error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid);
1710 if (error)
1711 return error;
1712 }
1713 real_rx = ndev->real_num_rx_queues;
1714#endif
1715 real_tx = ndev->real_num_tx_queues;
1716
1717 error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid);
1718 if (error)
1719 return error;
1720
1721 error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid);
1722 if (error)
1723 return error;
1724
1725 return 0;
1726}
1727
WANG Cong6b53daf2014-07-23 16:09:10 -07001728static void remove_queue_kobjects(struct net_device *dev)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001729{
Tom Herbertbf264142010-11-26 08:36:09 +00001730 int real_rx = 0, real_tx = 0;
1731
Michael Daltona953be52014-01-16 22:23:28 -08001732#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001733 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001734#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001735 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001736
WANG Cong6b53daf2014-07-23 16:09:10 -07001737 net_rx_queue_update_kobjects(dev, real_rx, 0);
1738 netdev_queue_update_kobjects(dev, real_tx, 0);
david decotignyccf5ff62011-11-16 12:15:10 +00001739#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001740 kset_unregister(dev->queues_kset);
Tom Herbertbf264142010-11-26 08:36:09 +00001741#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001742}
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001743
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001744static bool net_current_may_mount(void)
1745{
1746 struct net *net = current->nsproxy->net_ns;
1747
1748 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1749}
1750
Al Viroa685e082011-06-08 21:13:01 -04001751static void *net_grab_current_ns(void)
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001752{
Al Viroa685e082011-06-08 21:13:01 -04001753 struct net *ns = current->nsproxy->net_ns;
1754#ifdef CONFIG_NET_NS
1755 if (ns)
Reshetova, Elenac122e142017-06-30 13:08:08 +03001756 refcount_inc(&ns->passive);
Al Viroa685e082011-06-08 21:13:01 -04001757#endif
1758 return ns;
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001759}
1760
1761static const void *net_initial_ns(void)
1762{
1763 return &init_net;
1764}
1765
1766static const void *net_netlink_ns(struct sock *sk)
1767{
1768 return sock_net(sk);
1769}
1770
stephen hemminger737aec52017-08-18 13:46:22 -07001771const struct kobj_ns_type_operations net_ns_type_operations = {
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001772 .type = KOBJ_NS_TYPE_NET,
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001773 .current_may_mount = net_current_may_mount,
Al Viroa685e082011-06-08 21:13:01 -04001774 .grab_current_ns = net_grab_current_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001775 .netlink_ns = net_netlink_ns,
1776 .initial_ns = net_initial_ns,
Al Viroa685e082011-06-08 21:13:01 -04001777 .drop_ns = net_drop_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001778};
Johannes Berg04600792010-08-05 17:45:15 +02001779EXPORT_SYMBOL_GPL(net_ns_type_operations);
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001780
Kay Sievers7eff2e72007-08-14 15:15:12 +02001781static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001783 struct net_device *dev = to_net_dev(d);
Kay Sievers7eff2e72007-08-14 15:15:12 +02001784 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785
Kay Sievers312c0042005-11-16 09:00:00 +01001786 /* pass interface to uevent. */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001787 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
Eric Rannaudbf624562007-03-30 22:23:12 -07001788 if (retval)
1789 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001791 /* pass ifindex to uevent.
1792 * ifindex is useful as it won't change (interface name may change)
stephen hemminger6648c652017-08-18 13:46:28 -07001793 * and is what RtNetlink uses natively.
1794 */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001795 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001796
Eric Rannaudbf624562007-03-30 22:23:12 -07001797exit:
Eric Rannaudbf624562007-03-30 22:23:12 -07001798 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
1801/*
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001802 * netdev_release -- destroy and free a dead device.
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001803 * Called when last reference to device kobject is gone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001805static void netdev_release(struct device *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001807 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 BUG_ON(dev->reg_state != NETREG_RELEASED);
1810
Florian Westphal6c557002017-10-02 23:50:05 +02001811 /* no need to wait for rcu grace period:
1812 * device is dead and about to be freed.
1813 */
1814 kfree(rcu_access_pointer(dev->ifalias));
Eric Dumazet74d332c2013-10-30 13:10:44 -07001815 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816}
1817
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001818static const void *net_namespace(struct device *d)
1819{
Geliang Tang5c294822015-12-22 23:11:49 +08001820 struct net_device *dev = to_net_dev(d);
1821
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001822 return dev_net(dev);
1823}
1824
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001825static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
1826{
1827 struct net_device *dev = to_net_dev(d);
1828 const struct net *net = dev_net(dev);
1829
1830 net_ns_get_ownership(net, uid, gid);
1831}
1832
stephen hemmingere6d473e2017-08-18 13:46:21 -07001833static struct class net_class __ro_after_init = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 .name = "net",
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001835 .dev_release = netdev_release,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -07001836 .dev_groups = net_class_groups,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001837 .dev_uevent = netdev_uevent,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001838 .ns_type = &net_ns_type_operations,
1839 .namespace = net_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001840 .get_ownership = net_get_ownership,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841};
1842
Florian Fainelliaa836df2015-03-09 14:31:20 -07001843#ifdef CONFIG_OF_NET
1844static int of_dev_node_match(struct device *dev, const void *data)
1845{
Tobias Waldekranz2e186a22020-05-15 11:52:52 +02001846 for (; dev; dev = dev->parent) {
1847 if (dev->of_node == data)
1848 return 1;
1849 }
Florian Fainelliaa836df2015-03-09 14:31:20 -07001850
Tobias Waldekranz2e186a22020-05-15 11:52:52 +02001851 return 0;
Florian Fainelliaa836df2015-03-09 14:31:20 -07001852}
1853
Russell King9861f722015-09-24 20:36:33 +01001854/*
1855 * of_find_net_device_by_node - lookup the net device for the device node
1856 * @np: OF device node
1857 *
1858 * Looks up the net_device structure corresponding with the device node.
1859 * If successful, returns a pointer to the net_device with the embedded
1860 * struct device refcount incremented by one, or NULL on failure. The
1861 * refcount must be dropped when done with the net_device.
1862 */
Florian Fainelliaa836df2015-03-09 14:31:20 -07001863struct net_device *of_find_net_device_by_node(struct device_node *np)
1864{
1865 struct device *dev;
1866
1867 dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1868 if (!dev)
1869 return NULL;
1870
1871 return to_net_dev(dev);
1872}
1873EXPORT_SYMBOL(of_find_net_device_by_node);
1874#endif
1875
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001876/* Delete sysfs entries but hold kobject reference until after all
1877 * netdev references are gone.
1878 */
WANG Cong6b53daf2014-07-23 16:09:10 -07001879void netdev_unregister_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880{
stephen hemminger6648c652017-08-18 13:46:28 -07001881 struct device *dev = &ndev->dev;
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001882
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001883 if (!refcount_read(&dev_net(ndev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001884 dev_set_uevent_suppress(dev, 1);
1885
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001886 kobject_get(&dev->kobj);
Eric W. Biederman38918452008-10-27 17:51:47 -07001887
WANG Cong6b53daf2014-07-23 16:09:10 -07001888 remove_queue_kobjects(ndev);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001889
Ming Lei9802c8e2013-02-22 16:34:16 -08001890 pm_runtime_set_memalloc_noio(dev, false);
1891
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001892 device_del(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893}
1894
1895/* Create sysfs entries for network device. */
WANG Cong6b53daf2014-07-23 16:09:10 -07001896int netdev_register_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897{
stephen hemminger6648c652017-08-18 13:46:28 -07001898 struct device *dev = &ndev->dev;
WANG Cong6b53daf2014-07-23 16:09:10 -07001899 const struct attribute_group **groups = ndev->sysfs_groups;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001900 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001902 device_initialize(dev);
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001903 dev->class = &net_class;
WANG Cong6b53daf2014-07-23 16:09:10 -07001904 dev->platform_data = ndev;
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001905 dev->groups = groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
WANG Cong6b53daf2014-07-23 16:09:10 -07001907 dev_set_name(dev, "%s", ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001909#ifdef CONFIG_SYSFS
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001910 /* Allow for a device specific group */
1911 if (*groups)
1912 groups++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001914 *groups++ = &netstat_group;
Johannes Berg38c1a012012-11-16 20:46:19 +01001915
1916#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
WANG Cong6b53daf2014-07-23 16:09:10 -07001917 if (ndev->ieee80211_ptr)
Johannes Berg38c1a012012-11-16 20:46:19 +01001918 *groups++ = &wireless_group;
1919#if IS_ENABLED(CONFIG_WIRELESS_EXT)
WANG Cong6b53daf2014-07-23 16:09:10 -07001920 else if (ndev->wireless_handlers)
Johannes Berg38c1a012012-11-16 20:46:19 +01001921 *groups++ = &wireless_group;
1922#endif
1923#endif
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001924#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
Tom Herbert0a9627f2010-03-16 08:03:29 +00001926 error = device_add(dev);
1927 if (error)
Wang Hai8ed633b2019-04-12 16:36:33 -04001928 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001929
WANG Cong6b53daf2014-07-23 16:09:10 -07001930 error = register_queue_kobjects(ndev);
Wang Hai8ed633b2019-04-12 16:36:33 -04001931 if (error) {
1932 device_del(dev);
1933 return error;
1934 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001935
Ming Lei9802c8e2013-02-22 16:34:16 -08001936 pm_runtime_set_memalloc_noio(dev, true);
1937
Tom Herbert0a9627f2010-03-16 08:03:29 +00001938 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939}
1940
Christian Braunere6dee9f2020-02-27 04:37:17 +01001941/* Change owner for sysfs entries when moving network devices across network
1942 * namespaces owned by different user namespaces.
1943 */
1944int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
1945 const struct net *net_new)
1946{
1947 struct device *dev = &ndev->dev;
1948 kuid_t old_uid, new_uid;
1949 kgid_t old_gid, new_gid;
1950 int error;
1951
1952 net_ns_get_ownership(net_old, &old_uid, &old_gid);
1953 net_ns_get_ownership(net_new, &new_uid, &new_gid);
1954
1955 /* The network namespace was changed but the owning user namespace is
1956 * identical so there's no need to change the owner of sysfs entries.
1957 */
1958 if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid))
1959 return 0;
1960
1961 error = device_change_owner(dev, new_uid, new_gid);
1962 if (error)
1963 return error;
1964
Christian Braunerd7554072020-02-27 04:37:18 +01001965 error = queue_change_owner(ndev, new_uid, new_gid);
1966 if (error)
1967 return error;
1968
Christian Braunere6dee9f2020-02-27 04:37:17 +01001969 return 0;
1970}
1971
stephen hemmingerb793dc52017-08-18 13:46:20 -07001972int netdev_class_create_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04001973 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07001974{
Tejun Heo58292cbe2013-09-11 22:29:04 -04001975 return class_create_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001976}
Tejun Heo58292cbe2013-09-11 22:29:04 -04001977EXPORT_SYMBOL(netdev_class_create_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001978
stephen hemmingerb793dc52017-08-18 13:46:20 -07001979void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04001980 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07001981{
Tejun Heo58292cbe2013-09-11 22:29:04 -04001982 class_remove_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001983}
Tejun Heo58292cbe2013-09-11 22:29:04 -04001984EXPORT_SYMBOL(netdev_class_remove_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001985
Daniel Borkmanna48d4bb2014-01-06 01:20:11 +01001986int __init netdev_kobject_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987{
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001988 kobj_ns_type_register(&net_ns_type_operations);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 return class_register(&net_class);
1990}