blob: 53ea262ecafd0c1e537a7e63f313745fb7ba53bb [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net-sysfs.c - network device class and attributes
4 *
5 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Randy Dunlap4fc268d2006-01-11 12:17:47 -08008#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/netdevice.h>
11#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010013#include <linux/sched/signal.h>
Alex Belits07bbecb2020-06-25 18:34:43 -040014#include <linux/sched/isolation.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070015#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <net/sock.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070017#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/rtnetlink.h>
Tom Herbertfec5e652010-04-16 16:01:27 -070019#include <linux/vmalloc.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040020#include <linux/export.h>
Tom Herbert114cf582011-11-28 16:33:09 +000021#include <linux/jiffies.h>
Ming Lei9802c8e2013-02-22 16:34:16 -080022#include <linux/pm_runtime.h>
Florian Fainelliaa836df2015-03-09 14:31:20 -070023#include <linux/of.h>
Ben Dooks88832a22016-06-07 19:27:51 +010024#include <linux/of_net.h>
Andrei Vagin4d99f662018-08-08 20:07:35 -070025#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Pavel Emelyanov342709e2007-10-23 21:14:45 -070027#include "net-sysfs.h"
28
Eric W. Biederman8b41d182007-09-26 22:02:53 -070029#ifdef CONFIG_SYSFS
Linus Torvalds1da177e2005-04-16 15:20:36 -070030static const char fmt_hex[] = "%#x\n";
31static const char fmt_dec[] = "%d\n";
32static const char fmt_ulong[] = "%lu\n";
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +000033static const char fmt_u64[] = "%llu\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035static inline int dev_isalive(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -070037 return dev->reg_state <= NETREG_REGISTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038}
39
40/* use same locking rules as GIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070041static ssize_t netdev_show(const struct device *dev,
42 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 ssize_t (*format)(const struct net_device *, char *))
44{
WANG Cong6b53daf2014-07-23 16:09:10 -070045 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 ssize_t ret = -EINVAL;
47
48 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -070049 if (dev_isalive(ndev))
50 ret = (*format)(ndev, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 read_unlock(&dev_base_lock);
52
53 return ret;
54}
55
56/* generate a show function for simple field */
57#define NETDEVICE_SHOW(field, format_string) \
WANG Cong6b53daf2014-07-23 16:09:10 -070058static ssize_t format_##field(const struct net_device *dev, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{ \
WANG Cong6b53daf2014-07-23 16:09:10 -070060 return sprintf(buf, format_string, dev->field); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070061} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070062static ssize_t field##_show(struct device *dev, \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070063 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070064{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070065 return netdev_show(dev, attr, buf, format_##field); \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070066} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070068#define NETDEVICE_SHOW_RO(field, format_string) \
69NETDEVICE_SHOW(field, format_string); \
70static DEVICE_ATTR_RO(field)
71
72#define NETDEVICE_SHOW_RW(field, format_string) \
73NETDEVICE_SHOW(field, format_string); \
74static DEVICE_ATTR_RW(field)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76/* use same locking and permission rules as SIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070077static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 const char *buf, size_t len,
79 int (*set)(struct net_device *, unsigned long))
80{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000081 struct net_device *netdev = to_net_dev(dev);
82 struct net *net = dev_net(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 unsigned long new;
Colin Ian King5f0224a2020-04-09 14:41:26 +010084 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000086 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 return -EPERM;
88
Shuah Khane1e420c2012-04-12 09:28:13 +000089 ret = kstrtoul(buf, 0, &new);
90 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 goto err;
92
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000093 if (!rtnl_trylock())
Eric W. Biederman336ca572009-05-13 16:57:25 +000094 return restart_syscall();
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000095
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000096 if (dev_isalive(netdev)) {
stephen hemminger6648c652017-08-18 13:46:28 -070097 ret = (*set)(netdev, new);
98 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 ret = len;
100 }
101 rtnl_unlock();
102 err:
103 return ret;
104}
105
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700106NETDEVICE_SHOW_RO(dev_id, fmt_hex);
Amir Vadai3f859442014-02-25 18:17:50 +0200107NETDEVICE_SHOW_RO(dev_port, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700108NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109NETDEVICE_SHOW_RO(addr_len, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700110NETDEVICE_SHOW_RO(ifindex, fmt_dec);
111NETDEVICE_SHOW_RO(type, fmt_dec);
112NETDEVICE_SHOW_RO(link_mode, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200114static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
115 char *buf)
116{
117 struct net_device *ndev = to_net_dev(dev);
118
119 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
120}
121static DEVICE_ATTR_RO(iflink);
122
WANG Cong6b53daf2014-07-23 16:09:10 -0700123static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
Tom Gundersen685343f2014-07-14 16:37:22 +0200124{
WANG Cong6b53daf2014-07-23 16:09:10 -0700125 return sprintf(buf, fmt_dec, dev->name_assign_type);
Tom Gundersen685343f2014-07-14 16:37:22 +0200126}
127
128static ssize_t name_assign_type_show(struct device *dev,
129 struct device_attribute *attr,
130 char *buf)
131{
WANG Cong6b53daf2014-07-23 16:09:10 -0700132 struct net_device *ndev = to_net_dev(dev);
Tom Gundersen685343f2014-07-14 16:37:22 +0200133 ssize_t ret = -EINVAL;
134
WANG Cong6b53daf2014-07-23 16:09:10 -0700135 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
Tom Gundersen685343f2014-07-14 16:37:22 +0200136 ret = netdev_show(dev, attr, buf, format_name_assign_type);
137
138 return ret;
139}
140static DEVICE_ATTR_RO(name_assign_type);
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* use same locking rules as GIFHWADDR ioctl's */
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700143static ssize_t address_show(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700144 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
WANG Cong6b53daf2014-07-23 16:09:10 -0700146 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 ssize_t ret = -EINVAL;
148
149 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -0700150 if (dev_isalive(ndev))
151 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 read_unlock(&dev_base_lock);
153 return ret;
154}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700155static DEVICE_ATTR_RO(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700157static ssize_t broadcast_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
WANG Cong6b53daf2014-07-23 16:09:10 -0700160 struct net_device *ndev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700161
WANG Cong6b53daf2014-07-23 16:09:10 -0700162 if (dev_isalive(ndev))
163 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 return -EINVAL;
165}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700166static DEVICE_ATTR_RO(broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
WANG Cong6b53daf2014-07-23 16:09:10 -0700168static int change_carrier(struct net_device *dev, unsigned long new_carrier)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000169{
WANG Cong6b53daf2014-07-23 16:09:10 -0700170 if (!netif_running(dev))
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000171 return -EINVAL;
stephen hemminger6648c652017-08-18 13:46:28 -0700172 return dev_change_carrier(dev, (bool)new_carrier);
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000173}
174
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700175static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
176 const char *buf, size_t len)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000177{
Antoine Tenart146e5e72021-10-07 16:00:51 +0200178 struct net_device *netdev = to_net_dev(dev);
179
180 /* The check is also done in change_carrier; this helps returning early
181 * without hitting the trylock/restart in netdev_store.
182 */
183 if (!netdev->netdev_ops->ndo_change_carrier)
184 return -EOPNOTSUPP;
185
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000186 return netdev_store(dev, attr, buf, len, change_carrier);
187}
188
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700189static ssize_t carrier_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700190 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700193
194 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
stephen hemminger6648c652017-08-18 13:46:28 -0700196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return -EINVAL;
198}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700199static DEVICE_ATTR_RW(carrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700201static ssize_t speed_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000202 struct device_attribute *attr, char *buf)
203{
204 struct net_device *netdev = to_net_dev(dev);
205 int ret = -EINVAL;
206
Antoine Tenart146e5e72021-10-07 16:00:51 +0200207 /* The check is also done in __ethtool_get_link_ksettings; this helps
208 * returning early without hitting the trylock/restart below.
209 */
210 if (!netdev->ethtool_ops->get_link_ksettings)
211 return ret;
212
Andy Gospodarekd519e172009-10-02 09:26:12 +0000213 if (!rtnl_trylock())
214 return restart_syscall();
215
David Decotigny8ae6daca2011-04-27 18:32:38 +0000216 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800217 struct ethtool_link_ksettings cmd;
218
219 if (!__ethtool_get_link_ksettings(netdev, &cmd))
220 ret = sprintf(buf, fmt_dec, cmd.base.speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000221 }
222 rtnl_unlock();
223 return ret;
224}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700225static DEVICE_ATTR_RO(speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000226
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700227static ssize_t duplex_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000228 struct device_attribute *attr, char *buf)
229{
230 struct net_device *netdev = to_net_dev(dev);
231 int ret = -EINVAL;
232
Antoine Tenart146e5e72021-10-07 16:00:51 +0200233 /* The check is also done in __ethtool_get_link_ksettings; this helps
234 * returning early without hitting the trylock/restart below.
235 */
236 if (!netdev->ethtool_ops->get_link_ksettings)
237 return ret;
238
Andy Gospodarekd519e172009-10-02 09:26:12 +0000239 if (!rtnl_trylock())
240 return restart_syscall();
241
David Decotigny8ae6daca2011-04-27 18:32:38 +0000242 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800243 struct ethtool_link_ksettings cmd;
244
245 if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000246 const char *duplex;
David Decotigny7cad1ba2016-02-24 10:58:10 -0800247
248 switch (cmd.base.duplex) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000249 case DUPLEX_HALF:
250 duplex = "half";
251 break;
252 case DUPLEX_FULL:
253 duplex = "full";
254 break;
255 default:
256 duplex = "unknown";
257 break;
258 }
259 ret = sprintf(buf, "%s\n", duplex);
260 }
Andy Gospodarekd519e172009-10-02 09:26:12 +0000261 }
262 rtnl_unlock();
263 return ret;
264}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700265static DEVICE_ATTR_RO(duplex);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000266
Andrew Lunndb30a572020-04-20 00:11:51 +0200267static ssize_t testing_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 struct net_device *netdev = to_net_dev(dev);
271
272 if (netif_running(netdev))
273 return sprintf(buf, fmt_dec, !!netif_testing(netdev));
274
275 return -EINVAL;
276}
277static DEVICE_ATTR_RO(testing);
278
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700279static ssize_t dormant_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700280 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800281{
282 struct net_device *netdev = to_net_dev(dev);
283
284 if (netif_running(netdev))
285 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
286
287 return -EINVAL;
288}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700289static DEVICE_ATTR_RO(dormant);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800290
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700291static const char *const operstates[] = {
Stefan Rompfb00055a2006-03-20 17:09:11 -0800292 "unknown",
293 "notpresent", /* currently unused */
294 "down",
295 "lowerlayerdown",
Andrew Lunndb30a572020-04-20 00:11:51 +0200296 "testing",
Stefan Rompfb00055a2006-03-20 17:09:11 -0800297 "dormant",
298 "up"
299};
300
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700301static ssize_t operstate_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700302 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800303{
304 const struct net_device *netdev = to_net_dev(dev);
305 unsigned char operstate;
306
307 read_lock(&dev_base_lock);
308 operstate = netdev->operstate;
309 if (!netif_running(netdev))
310 operstate = IF_OPER_DOWN;
311 read_unlock(&dev_base_lock);
312
Adrian Bunke3a5cd92006-04-05 22:19:47 -0700313 if (operstate >= ARRAY_SIZE(operstates))
Stefan Rompfb00055a2006-03-20 17:09:11 -0800314 return -EINVAL; /* should not happen */
315
316 return sprintf(buf, "%s\n", operstates[operstate]);
317}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700318static DEVICE_ATTR_RO(operstate);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800319
david decotigny2d3b4792014-03-29 09:48:35 -0700320static ssize_t carrier_changes_show(struct device *dev,
321 struct device_attribute *attr,
322 char *buf)
323{
324 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700325
david decotigny2d3b4792014-03-29 09:48:35 -0700326 return sprintf(buf, fmt_dec,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800327 atomic_read(&netdev->carrier_up_count) +
328 atomic_read(&netdev->carrier_down_count));
david decotigny2d3b4792014-03-29 09:48:35 -0700329}
330static DEVICE_ATTR_RO(carrier_changes);
331
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800332static ssize_t carrier_up_count_show(struct device *dev,
333 struct device_attribute *attr,
334 char *buf)
335{
336 struct net_device *netdev = to_net_dev(dev);
337
338 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
339}
340static DEVICE_ATTR_RO(carrier_up_count);
341
342static ssize_t carrier_down_count_show(struct device *dev,
343 struct device_attribute *attr,
344 char *buf)
345{
346 struct net_device *netdev = to_net_dev(dev);
347
348 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
349}
350static DEVICE_ATTR_RO(carrier_down_count);
351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352/* read-write attributes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
WANG Cong6b53daf2014-07-23 16:09:10 -0700354static int change_mtu(struct net_device *dev, unsigned long new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
stephen hemminger6648c652017-08-18 13:46:28 -0700356 return dev_set_mtu(dev, (int)new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700359static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700360 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700362 return netdev_store(dev, attr, buf, len, change_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700364NETDEVICE_SHOW_RW(mtu, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
WANG Cong6b53daf2014-07-23 16:09:10 -0700366static int change_flags(struct net_device *dev, unsigned long new_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Petr Machata567c5e12018-12-06 17:05:42 +0000368 return dev_change_flags(dev, (unsigned int)new_flags, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
370
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700371static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700372 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700374 return netdev_store(dev, attr, buf, len, change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700376NETDEVICE_SHOW_RW(flags, fmt_hex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700378static ssize_t tx_queue_len_store(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700379 struct device_attribute *attr,
380 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000382 if (!capable(CAP_NET_ADMIN))
383 return -EPERM;
384
Cong Wang6a643dd2018-01-25 18:26:22 -0800385 return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
Alexey Dobriyan0cd29502017-05-17 13:30:44 +0300387NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Eric Dumazet3b47d302014-11-06 21:09:44 -0800389static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
390{
Eric Dumazet7e417a62020-04-22 09:13:28 -0700391 WRITE_ONCE(dev->gro_flush_timeout, val);
Eric Dumazet3b47d302014-11-06 21:09:44 -0800392 return 0;
393}
394
395static ssize_t gro_flush_timeout_store(struct device *dev,
stephen hemminger6648c652017-08-18 13:46:28 -0700396 struct device_attribute *attr,
397 const char *buf, size_t len)
Eric Dumazet3b47d302014-11-06 21:09:44 -0800398{
399 if (!capable(CAP_NET_ADMIN))
400 return -EPERM;
401
402 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
403}
404NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
405
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700406static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val)
407{
Eric Dumazet7e417a62020-04-22 09:13:28 -0700408 WRITE_ONCE(dev->napi_defer_hard_irqs, val);
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700409 return 0;
410}
411
412static ssize_t napi_defer_hard_irqs_store(struct device *dev,
413 struct device_attribute *attr,
414 const char *buf, size_t len)
415{
416 if (!capable(CAP_NET_ADMIN))
417 return -EPERM;
418
419 return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs);
420}
421NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec);
422
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700423static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700424 const char *buf, size_t len)
425{
426 struct net_device *netdev = to_net_dev(dev);
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000427 struct net *net = dev_net(netdev);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700428 size_t count = len;
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800429 ssize_t ret = 0;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700430
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000431 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700432 return -EPERM;
433
434 /* ignore trailing newline */
435 if (len > 0 && buf[len - 1] == '\n')
436 --count;
437
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800438 if (!rtnl_trylock())
439 return restart_syscall();
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700440
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800441 if (dev_isalive(netdev)) {
442 ret = dev_set_alias(netdev, buf, count);
443 if (ret < 0)
444 goto err;
445 ret = len;
446 netdev_state_change(netdev);
447 }
448err:
449 rtnl_unlock();
450
451 return ret;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700452}
453
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700454static ssize_t ifalias_show(struct device *dev,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700455 struct device_attribute *attr, char *buf)
456{
457 const struct net_device *netdev = to_net_dev(dev);
Florian Westphal6c557002017-10-02 23:50:05 +0200458 char tmp[IFALIASZ];
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700459 ssize_t ret = 0;
460
Florian Westphal6c557002017-10-02 23:50:05 +0200461 ret = dev_get_alias(netdev, tmp, sizeof(tmp));
462 if (ret > 0)
463 ret = sprintf(buf, "%s\n", tmp);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700464 return ret;
465}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700466static DEVICE_ATTR_RW(ifalias);
Vlad Dogarua512b922011-01-24 03:37:29 +0000467
WANG Cong6b53daf2014-07-23 16:09:10 -0700468static int change_group(struct net_device *dev, unsigned long new_group)
Vlad Dogarua512b922011-01-24 03:37:29 +0000469{
stephen hemminger6648c652017-08-18 13:46:28 -0700470 dev_set_group(dev, (int)new_group);
Vlad Dogarua512b922011-01-24 03:37:29 +0000471 return 0;
472}
473
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700474static ssize_t group_store(struct device *dev, struct device_attribute *attr,
475 const char *buf, size_t len)
Vlad Dogarua512b922011-01-24 03:37:29 +0000476{
477 return netdev_store(dev, attr, buf, len, change_group);
478}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700479NETDEVICE_SHOW(group, fmt_dec);
Joe Perchesd6444062018-03-23 15:54:38 -0700480static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
Vlad Dogarua512b922011-01-24 03:37:29 +0000481
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700482static int change_proto_down(struct net_device *dev, unsigned long proto_down)
483{
stephen hemminger6648c652017-08-18 13:46:28 -0700484 return dev_change_proto_down(dev, (bool)proto_down);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700485}
486
487static ssize_t proto_down_store(struct device *dev,
488 struct device_attribute *attr,
489 const char *buf, size_t len)
490{
491 return netdev_store(dev, attr, buf, len, change_proto_down);
492}
493NETDEVICE_SHOW_RW(proto_down, fmt_dec);
494
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700495static ssize_t phys_port_id_show(struct device *dev,
Jiri Pirkoff80e512013-07-29 18:16:51 +0200496 struct device_attribute *attr, char *buf)
497{
498 struct net_device *netdev = to_net_dev(dev);
499 ssize_t ret = -EINVAL;
500
Antoine Tenart146e5e72021-10-07 16:00:51 +0200501 /* The check is also done in dev_get_phys_port_id; this helps returning
502 * early without hitting the trylock/restart below.
503 */
504 if (!netdev->netdev_ops->ndo_get_phys_port_id)
505 return -EOPNOTSUPP;
506
Jiri Pirkoff80e512013-07-29 18:16:51 +0200507 if (!rtnl_trylock())
508 return restart_syscall();
509
510 if (dev_isalive(netdev)) {
Jiri Pirko02637fc2014-11-28 14:34:16 +0100511 struct netdev_phys_item_id ppid;
Jiri Pirkoff80e512013-07-29 18:16:51 +0200512
513 ret = dev_get_phys_port_id(netdev, &ppid);
514 if (!ret)
515 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
516 }
517 rtnl_unlock();
518
519 return ret;
520}
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700521static DEVICE_ATTR_RO(phys_port_id);
Jiri Pirkoff80e512013-07-29 18:16:51 +0200522
David Aherndb24a902015-03-17 20:23:15 -0600523static ssize_t phys_port_name_show(struct device *dev,
524 struct device_attribute *attr, char *buf)
525{
526 struct net_device *netdev = to_net_dev(dev);
527 ssize_t ret = -EINVAL;
528
Antoine Tenart146e5e72021-10-07 16:00:51 +0200529 /* The checks are also done in dev_get_phys_port_name; this helps
530 * returning early without hitting the trylock/restart below.
531 */
532 if (!netdev->netdev_ops->ndo_get_phys_port_name &&
533 !netdev->netdev_ops->ndo_get_devlink_port)
534 return -EOPNOTSUPP;
535
David Aherndb24a902015-03-17 20:23:15 -0600536 if (!rtnl_trylock())
537 return restart_syscall();
538
539 if (dev_isalive(netdev)) {
540 char name[IFNAMSIZ];
541
542 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
543 if (!ret)
544 ret = sprintf(buf, "%s\n", name);
545 }
546 rtnl_unlock();
547
548 return ret;
549}
550static DEVICE_ATTR_RO(phys_port_name);
551
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100552static ssize_t phys_switch_id_show(struct device *dev,
553 struct device_attribute *attr, char *buf)
554{
555 struct net_device *netdev = to_net_dev(dev);
556 ssize_t ret = -EINVAL;
557
Antoine Tenart146e5e72021-10-07 16:00:51 +0200558 /* The checks are also done in dev_get_phys_port_name; this helps
559 * returning early without hitting the trylock/restart below. This works
560 * because recurse is false when calling dev_get_port_parent_id.
561 */
562 if (!netdev->netdev_ops->ndo_get_port_parent_id &&
563 !netdev->netdev_ops->ndo_get_devlink_port)
564 return -EOPNOTSUPP;
565
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100566 if (!rtnl_trylock())
567 return restart_syscall();
568
569 if (dev_isalive(netdev)) {
Florian Fainellibccb3022019-02-06 09:45:46 -0800570 struct netdev_phys_item_id ppid = { };
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100571
Florian Fainellibccb3022019-02-06 09:45:46 -0800572 ret = dev_get_port_parent_id(netdev, &ppid, false);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100573 if (!ret)
Florian Fainellibccb3022019-02-06 09:45:46 -0800574 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100575 }
576 rtnl_unlock();
577
578 return ret;
579}
580static DEVICE_ATTR_RO(phys_switch_id);
581
Wei Wang5fdd2f02021-02-08 11:34:10 -0800582static ssize_t threaded_show(struct device *dev,
583 struct device_attribute *attr, char *buf)
584{
585 struct net_device *netdev = to_net_dev(dev);
586 ssize_t ret = -EINVAL;
587
588 if (!rtnl_trylock())
589 return restart_syscall();
590
591 if (dev_isalive(netdev))
592 ret = sprintf(buf, fmt_dec, netdev->threaded);
593
594 rtnl_unlock();
595 return ret;
596}
597
598static int modify_napi_threaded(struct net_device *dev, unsigned long val)
599{
600 int ret;
601
602 if (list_empty(&dev->napi_list))
603 return -EOPNOTSUPP;
604
605 if (val != 0 && val != 1)
606 return -EOPNOTSUPP;
607
608 ret = dev_set_threaded(dev, val);
609
610 return ret;
611}
612
613static ssize_t threaded_store(struct device *dev,
614 struct device_attribute *attr,
615 const char *buf, size_t len)
616{
617 return netdev_store(dev, attr, buf, len, modify_napi_threaded);
618}
619static DEVICE_ATTR_RW(threaded);
620
stephen hemmingerec6cc592017-08-18 13:46:23 -0700621static struct attribute *net_class_attrs[] __ro_after_init = {
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700622 &dev_attr_netdev_group.attr,
623 &dev_attr_type.attr,
624 &dev_attr_dev_id.attr,
Amir Vadai3f859442014-02-25 18:17:50 +0200625 &dev_attr_dev_port.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700626 &dev_attr_iflink.attr,
627 &dev_attr_ifindex.attr,
Tom Gundersen685343f2014-07-14 16:37:22 +0200628 &dev_attr_name_assign_type.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700629 &dev_attr_addr_assign_type.attr,
630 &dev_attr_addr_len.attr,
631 &dev_attr_link_mode.attr,
632 &dev_attr_address.attr,
633 &dev_attr_broadcast.attr,
634 &dev_attr_speed.attr,
635 &dev_attr_duplex.attr,
636 &dev_attr_dormant.attr,
Andrew Lunndb30a572020-04-20 00:11:51 +0200637 &dev_attr_testing.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700638 &dev_attr_operstate.attr,
david decotigny2d3b4792014-03-29 09:48:35 -0700639 &dev_attr_carrier_changes.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700640 &dev_attr_ifalias.attr,
641 &dev_attr_carrier.attr,
642 &dev_attr_mtu.attr,
643 &dev_attr_flags.attr,
644 &dev_attr_tx_queue_len.attr,
Eric Dumazet3b47d302014-11-06 21:09:44 -0800645 &dev_attr_gro_flush_timeout.attr,
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700646 &dev_attr_napi_defer_hard_irqs.attr,
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700647 &dev_attr_phys_port_id.attr,
David Aherndb24a902015-03-17 20:23:15 -0600648 &dev_attr_phys_port_name.attr,
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100649 &dev_attr_phys_switch_id.attr,
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700650 &dev_attr_proto_down.attr,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800651 &dev_attr_carrier_up_count.attr,
652 &dev_attr_carrier_down_count.attr,
Wei Wang5fdd2f02021-02-08 11:34:10 -0800653 &dev_attr_threaded.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700654 NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655};
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700656ATTRIBUTE_GROUPS(net_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
658/* Show a given an attribute in the statistics group */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700659static ssize_t netstat_show(const struct device *d,
660 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 unsigned long offset)
662{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700663 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 ssize_t ret = -EINVAL;
665
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000666 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
stephen hemminger6648c652017-08-18 13:46:28 -0700667 offset % sizeof(u64) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 read_lock(&dev_base_lock);
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700670 if (dev_isalive(dev)) {
Eric Dumazet28172732010-07-07 14:58:56 -0700671 struct rtnl_link_stats64 temp;
672 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
673
stephen hemminger6648c652017-08-18 13:46:28 -0700674 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 read_unlock(&dev_base_lock);
677 return ret;
678}
679
680/* generate a read-only statistics attribute */
681#define NETSTAT_ENTRY(name) \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700682static ssize_t name##_show(struct device *d, \
stephen hemminger6648c652017-08-18 13:46:28 -0700683 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700685 return netstat_show(d, attr, buf, \
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000686 offsetof(struct rtnl_link_stats64, name)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700688static DEVICE_ATTR_RO(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690NETSTAT_ENTRY(rx_packets);
691NETSTAT_ENTRY(tx_packets);
692NETSTAT_ENTRY(rx_bytes);
693NETSTAT_ENTRY(tx_bytes);
694NETSTAT_ENTRY(rx_errors);
695NETSTAT_ENTRY(tx_errors);
696NETSTAT_ENTRY(rx_dropped);
697NETSTAT_ENTRY(tx_dropped);
698NETSTAT_ENTRY(multicast);
699NETSTAT_ENTRY(collisions);
700NETSTAT_ENTRY(rx_length_errors);
701NETSTAT_ENTRY(rx_over_errors);
702NETSTAT_ENTRY(rx_crc_errors);
703NETSTAT_ENTRY(rx_frame_errors);
704NETSTAT_ENTRY(rx_fifo_errors);
705NETSTAT_ENTRY(rx_missed_errors);
706NETSTAT_ENTRY(tx_aborted_errors);
707NETSTAT_ENTRY(tx_carrier_errors);
708NETSTAT_ENTRY(tx_fifo_errors);
709NETSTAT_ENTRY(tx_heartbeat_errors);
710NETSTAT_ENTRY(tx_window_errors);
711NETSTAT_ENTRY(rx_compressed);
712NETSTAT_ENTRY(tx_compressed);
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500713NETSTAT_ENTRY(rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
stephen hemmingerec6cc592017-08-18 13:46:23 -0700715static struct attribute *netstat_attrs[] __ro_after_init = {
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700716 &dev_attr_rx_packets.attr,
717 &dev_attr_tx_packets.attr,
718 &dev_attr_rx_bytes.attr,
719 &dev_attr_tx_bytes.attr,
720 &dev_attr_rx_errors.attr,
721 &dev_attr_tx_errors.attr,
722 &dev_attr_rx_dropped.attr,
723 &dev_attr_tx_dropped.attr,
724 &dev_attr_multicast.attr,
725 &dev_attr_collisions.attr,
726 &dev_attr_rx_length_errors.attr,
727 &dev_attr_rx_over_errors.attr,
728 &dev_attr_rx_crc_errors.attr,
729 &dev_attr_rx_frame_errors.attr,
730 &dev_attr_rx_fifo_errors.attr,
731 &dev_attr_rx_missed_errors.attr,
732 &dev_attr_tx_aborted_errors.attr,
733 &dev_attr_tx_carrier_errors.attr,
734 &dev_attr_tx_fifo_errors.attr,
735 &dev_attr_tx_heartbeat_errors.attr,
736 &dev_attr_tx_window_errors.attr,
737 &dev_attr_rx_compressed.attr,
738 &dev_attr_tx_compressed.attr,
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500739 &dev_attr_rx_nohandler.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 NULL
741};
742
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530743static const struct attribute_group netstat_group = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 .name = "statistics",
745 .attrs = netstat_attrs,
746};
Johannes Berg38c1a012012-11-16 20:46:19 +0100747
748#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
749static struct attribute *wireless_attrs[] = {
750 NULL
751};
752
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530753static const struct attribute_group wireless_group = {
Johannes Berg38c1a012012-11-16 20:46:19 +0100754 .name = "wireless",
755 .attrs = wireless_attrs,
756};
757#endif
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700758
759#else /* CONFIG_SYSFS */
760#define net_class_groups NULL
Eric W. Biedermand6523dd2010-05-16 21:59:45 -0700761#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Michael Daltona953be52014-01-16 22:23:28 -0800763#ifdef CONFIG_SYSFS
stephen hemminger6648c652017-08-18 13:46:28 -0700764#define to_rx_queue_attr(_attr) \
765 container_of(_attr, struct rx_queue_attribute, attr)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000766
767#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
768
769static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
770 char *buf)
771{
stephen hemminger667e4272017-08-18 13:46:27 -0700772 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000773 struct netdev_rx_queue *queue = to_rx_queue(kobj);
774
775 if (!attribute->show)
776 return -EIO;
777
stephen hemminger718ad682017-08-18 13:46:24 -0700778 return attribute->show(queue, buf);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000779}
780
781static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
782 const char *buf, size_t count)
783{
stephen hemminger667e4272017-08-18 13:46:27 -0700784 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000785 struct netdev_rx_queue *queue = to_rx_queue(kobj);
786
787 if (!attribute->store)
788 return -EIO;
789
stephen hemminger718ad682017-08-18 13:46:24 -0700790 return attribute->store(queue, buf, count);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000791}
792
stephen hemmingerfa50d642010-08-31 12:14:13 +0000793static const struct sysfs_ops rx_queue_sysfs_ops = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000794 .show = rx_queue_attr_show,
795 .store = rx_queue_attr_store,
796};
797
Michael Daltona953be52014-01-16 22:23:28 -0800798#ifdef CONFIG_RPS
stephen hemminger718ad682017-08-18 13:46:24 -0700799static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000800{
801 struct rps_map *map;
802 cpumask_var_t mask;
Tejun Heof0906822015-02-13 14:37:42 -0800803 int i, len;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000804
805 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
806 return -ENOMEM;
807
808 rcu_read_lock();
809 map = rcu_dereference(queue->rps_map);
810 if (map)
811 for (i = 0; i < map->len; i++)
812 cpumask_set_cpu(map->cpus[i], mask);
813
Tejun Heof0906822015-02-13 14:37:42 -0800814 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000815 rcu_read_unlock();
Tom Herbert0a9627f2010-03-16 08:03:29 +0000816 free_cpumask_var(mask);
Tejun Heof0906822015-02-13 14:37:42 -0800817
818 return len < PAGE_SIZE ? len : -EINVAL;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000819}
820
Eric Dumazetf5acb902010-04-19 14:40:57 -0700821static ssize_t store_rps_map(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700822 const char *buf, size_t len)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000823{
824 struct rps_map *old_map, *map;
825 cpumask_var_t mask;
Alex Belits07bbecb2020-06-25 18:34:43 -0400826 int err, cpu, i, hk_flags;
Sasha Levinda65ad12015-08-13 14:03:16 -0400827 static DEFINE_MUTEX(rps_map_mutex);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000828
829 if (!capable(CAP_NET_ADMIN))
830 return -EPERM;
831
832 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
833 return -ENOMEM;
834
835 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
836 if (err) {
837 free_cpumask_var(mask);
838 return err;
839 }
840
Eric Dumazet2e0d8fe2020-08-11 18:34:40 -0700841 if (!cpumask_empty(mask)) {
842 hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
843 cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
844 if (cpumask_empty(mask)) {
845 free_cpumask_var(mask);
846 return -EINVAL;
847 }
Alex Belits07bbecb2020-06-25 18:34:43 -0400848 }
849
Eric Dumazet95c96172012-04-15 05:58:06 +0000850 map = kzalloc(max_t(unsigned int,
stephen hemminger6648c652017-08-18 13:46:28 -0700851 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
852 GFP_KERNEL);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000853 if (!map) {
854 free_cpumask_var(mask);
855 return -ENOMEM;
856 }
857
858 i = 0;
859 for_each_cpu_and(cpu, mask, cpu_online_mask)
860 map->cpus[i++] = cpu;
861
stephen hemminger6648c652017-08-18 13:46:28 -0700862 if (i) {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000863 map->len = i;
stephen hemminger6648c652017-08-18 13:46:28 -0700864 } else {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000865 kfree(map);
866 map = NULL;
867 }
868
Sasha Levinda65ad12015-08-13 14:03:16 -0400869 mutex_lock(&rps_map_mutex);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000870 old_map = rcu_dereference_protected(queue->rps_map,
Sasha Levinda65ad12015-08-13 14:03:16 -0400871 mutex_is_locked(&rps_map_mutex));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000872 rcu_assign_pointer(queue->rps_map, map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000873
Eric Dumazetadc93002011-11-17 03:13:26 +0000874 if (map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700875 static_branch_inc(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700876 if (old_map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700877 static_branch_dec(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700878
Sasha Levinda65ad12015-08-13 14:03:16 -0400879 mutex_unlock(&rps_map_mutex);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700880
881 if (old_map)
882 kfree_rcu(old_map, rcu);
883
Tom Herbert0a9627f2010-03-16 08:03:29 +0000884 free_cpumask_var(mask);
885 return len;
886}
887
Tom Herbertfec5e652010-04-16 16:01:27 -0700888static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
Tom Herbertfec5e652010-04-16 16:01:27 -0700889 char *buf)
890{
891 struct rps_dev_flow_table *flow_table;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000892 unsigned long val = 0;
Tom Herbertfec5e652010-04-16 16:01:27 -0700893
894 rcu_read_lock();
895 flow_table = rcu_dereference(queue->rps_flow_table);
896 if (flow_table)
Eric Dumazet60b778c2011-12-24 06:56:49 +0000897 val = (unsigned long)flow_table->mask + 1;
Tom Herbertfec5e652010-04-16 16:01:27 -0700898 rcu_read_unlock();
899
Eric Dumazet60b778c2011-12-24 06:56:49 +0000900 return sprintf(buf, "%lu\n", val);
Tom Herbertfec5e652010-04-16 16:01:27 -0700901}
902
Tom Herbertfec5e652010-04-16 16:01:27 -0700903static void rps_dev_flow_table_release(struct rcu_head *rcu)
904{
905 struct rps_dev_flow_table *table = container_of(rcu,
906 struct rps_dev_flow_table, rcu);
Al Viro243198d2013-05-05 16:05:55 +0000907 vfree(table);
Tom Herbertfec5e652010-04-16 16:01:27 -0700908}
909
Eric Dumazetf5acb902010-04-19 14:40:57 -0700910static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700911 const char *buf, size_t len)
Tom Herbertfec5e652010-04-16 16:01:27 -0700912{
Eric Dumazet60b778c2011-12-24 06:56:49 +0000913 unsigned long mask, count;
Tom Herbertfec5e652010-04-16 16:01:27 -0700914 struct rps_dev_flow_table *table, *old_table;
915 static DEFINE_SPINLOCK(rps_dev_flow_lock);
Eric Dumazet60b778c2011-12-24 06:56:49 +0000916 int rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700917
918 if (!capable(CAP_NET_ADMIN))
919 return -EPERM;
920
Eric Dumazet60b778c2011-12-24 06:56:49 +0000921 rc = kstrtoul(buf, 0, &count);
922 if (rc < 0)
923 return rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700924
925 if (count) {
Eric Dumazet60b778c2011-12-24 06:56:49 +0000926 mask = count - 1;
927 /* mask = roundup_pow_of_two(count) - 1;
928 * without overflows...
929 */
930 while ((mask | (mask >> 1)) != mask)
931 mask |= (mask >> 1);
932 /* On 64 bit arches, must check mask fits in table->mask (u32),
stephen hemminger8e3bff92013-12-08 12:15:44 -0800933 * and on 32bit arches, must check
934 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
Eric Dumazet60b778c2011-12-24 06:56:49 +0000935 */
936#if BITS_PER_LONG > 32
937 if (mask > (unsigned long)(u32)mask)
Xi Wanga0a129f2011-12-22 13:35:22 +0000938 return -EINVAL;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000939#else
940 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
Xi Wanga0a129f2011-12-22 13:35:22 +0000941 / sizeof(struct rps_dev_flow)) {
Tom Herbertfec5e652010-04-16 16:01:27 -0700942 /* Enforce a limit to prevent overflow */
943 return -EINVAL;
944 }
Eric Dumazet60b778c2011-12-24 06:56:49 +0000945#endif
946 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
Tom Herbertfec5e652010-04-16 16:01:27 -0700947 if (!table)
948 return -ENOMEM;
949
Eric Dumazet60b778c2011-12-24 06:56:49 +0000950 table->mask = mask;
951 for (count = 0; count <= mask; count++)
952 table->flows[count].cpu = RPS_NO_CPU;
stephen hemminger6648c652017-08-18 13:46:28 -0700953 } else {
Tom Herbertfec5e652010-04-16 16:01:27 -0700954 table = NULL;
stephen hemminger6648c652017-08-18 13:46:28 -0700955 }
Tom Herbertfec5e652010-04-16 16:01:27 -0700956
957 spin_lock(&rps_dev_flow_lock);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000958 old_table = rcu_dereference_protected(queue->rps_flow_table,
959 lockdep_is_held(&rps_dev_flow_lock));
Tom Herbertfec5e652010-04-16 16:01:27 -0700960 rcu_assign_pointer(queue->rps_flow_table, table);
961 spin_unlock(&rps_dev_flow_lock);
962
963 if (old_table)
964 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
965
966 return len;
967}
968
stephen hemminger667e4272017-08-18 13:46:27 -0700969static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700970 = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000971
stephen hemminger667e4272017-08-18 13:46:27 -0700972static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700973 = __ATTR(rps_flow_cnt, 0644,
stephen hemminger667e4272017-08-18 13:46:27 -0700974 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
Michael Daltona953be52014-01-16 22:23:28 -0800975#endif /* CONFIG_RPS */
Tom Herbertfec5e652010-04-16 16:01:27 -0700976
stephen hemminger667e4272017-08-18 13:46:27 -0700977static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
Michael Daltona953be52014-01-16 22:23:28 -0800978#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000979 &rps_cpus_attribute.attr,
Tom Herbertfec5e652010-04-16 16:01:27 -0700980 &rps_dev_flow_table_cnt_attribute.attr,
Michael Daltona953be52014-01-16 22:23:28 -0800981#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000982 NULL
983};
Kimberly Brownbe0d6922019-04-01 22:51:35 -0400984ATTRIBUTE_GROUPS(rx_queue_default);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000985
986static void rx_queue_release(struct kobject *kobj)
987{
988 struct netdev_rx_queue *queue = to_rx_queue(kobj);
Michael Daltona953be52014-01-16 22:23:28 -0800989#ifdef CONFIG_RPS
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000990 struct rps_map *map;
991 struct rps_dev_flow_table *flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000992
Eric Dumazet33d480c2011-08-11 19:30:52 +0000993 map = rcu_dereference_protected(queue->rps_map, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000994 if (map) {
995 RCU_INIT_POINTER(queue->rps_map, NULL);
Lai Jiangshanf6f80232011-03-18 12:01:31 +0800996 kfree_rcu(map, rcu);
John Fastabend9ea19482010-11-16 06:31:39 +0000997 }
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000998
Eric Dumazet33d480c2011-08-11 19:30:52 +0000999 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
John Fastabend9ea19482010-11-16 06:31:39 +00001000 if (flow_table) {
1001 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00001002 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
John Fastabend9ea19482010-11-16 06:31:39 +00001003 }
Michael Daltona953be52014-01-16 22:23:28 -08001004#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001005
John Fastabend9ea19482010-11-16 06:31:39 +00001006 memset(kobj, 0, sizeof(*kobj));
Eric Dumazet80e89212021-12-04 20:21:58 -08001007 dev_put_track(queue->dev, &queue->dev_tracker);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001008}
1009
Weilong Chen82ef3d52014-01-16 17:24:31 +08001010static const void *rx_queue_namespace(struct kobject *kobj)
1011{
1012 struct netdev_rx_queue *queue = to_rx_queue(kobj);
1013 struct device *dev = &queue->dev->dev;
1014 const void *ns = NULL;
1015
1016 if (dev->class && dev->class->ns_type)
1017 ns = dev->class->namespace(dev);
1018
1019 return ns;
1020}
1021
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001022static void rx_queue_get_ownership(struct kobject *kobj,
1023 kuid_t *uid, kgid_t *gid)
1024{
1025 const struct net *net = rx_queue_namespace(kobj);
1026
1027 net_ns_get_ownership(net, uid, gid);
1028}
1029
stephen hemminger667e4272017-08-18 13:46:27 -07001030static struct kobj_type rx_queue_ktype __ro_after_init = {
Tom Herbert0a9627f2010-03-16 08:03:29 +00001031 .sysfs_ops = &rx_queue_sysfs_ops,
1032 .release = rx_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001033 .default_groups = rx_queue_default_groups,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001034 .namespace = rx_queue_namespace,
1035 .get_ownership = rx_queue_get_ownership,
Tom Herbert0a9627f2010-03-16 08:03:29 +00001036};
1037
WANG Cong6b53daf2014-07-23 16:09:10 -07001038static int rx_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001039{
WANG Cong6b53daf2014-07-23 16:09:10 -07001040 struct netdev_rx_queue *queue = dev->_rx + index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001041 struct kobject *kobj = &queue->kobj;
1042 int error = 0;
1043
Jouni Hoganderddd9b5e2019-12-17 13:46:34 +02001044 /* Kobject_put later will trigger rx_queue_release call which
1045 * decreases dev refcount: Take that reference here
1046 */
Eric Dumazet80e89212021-12-04 20:21:58 -08001047 dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL);
Jouni Hoganderddd9b5e2019-12-17 13:46:34 +02001048
WANG Cong6b53daf2014-07-23 16:09:10 -07001049 kobj->kset = dev->queues_kset;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001050 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -07001051 "rx-%u", index);
Michael Daltona953be52014-01-16 22:23:28 -08001052 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001053 goto err;
Michael Daltona953be52014-01-16 22:23:28 -08001054
WANG Cong6b53daf2014-07-23 16:09:10 -07001055 if (dev->sysfs_rx_queue_group) {
1056 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001057 if (error)
1058 goto err;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001059 }
1060
1061 kobject_uevent(kobj, KOBJ_ADD);
1062
1063 return error;
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001064
1065err:
1066 kobject_put(kobj);
1067 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001068}
Christian Braunerd7554072020-02-27 04:37:18 +01001069
1070static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid,
1071 kgid_t kgid)
1072{
1073 struct netdev_rx_queue *queue = dev->_rx + index;
1074 struct kobject *kobj = &queue->kobj;
1075 int error;
1076
1077 error = sysfs_change_owner(kobj, kuid, kgid);
1078 if (error)
1079 return error;
1080
1081 if (dev->sysfs_rx_queue_group)
1082 error = sysfs_group_change_owner(
1083 kobj, dev->sysfs_rx_queue_group, kuid, kgid);
1084
1085 return error;
1086}
Paul Bolle80dd6ea2014-02-09 14:07:11 +01001087#endif /* CONFIG_SYSFS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001088
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001089int
WANG Cong6b53daf2014-07-23 16:09:10 -07001090net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001091{
Michael Daltona953be52014-01-16 22:23:28 -08001092#ifdef CONFIG_SYSFS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001093 int i;
1094 int error = 0;
1095
Michael Daltona953be52014-01-16 22:23:28 -08001096#ifndef CONFIG_RPS
WANG Cong6b53daf2014-07-23 16:09:10 -07001097 if (!dev->sysfs_rx_queue_group)
Michael Daltona953be52014-01-16 22:23:28 -08001098 return 0;
1099#endif
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001100 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001101 error = rx_queue_add_kobject(dev, i);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001102 if (error) {
1103 new_num = old_num;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001104 break;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001105 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001106 }
1107
Michael Daltona953be52014-01-16 22:23:28 -08001108 while (--i >= new_num) {
Andrey Vagin002d8a12016-10-24 19:09:53 -07001109 struct kobject *kobj = &dev->_rx[i].kobj;
1110
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001111 if (!refcount_read(&dev_net(dev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001112 kobj->uevent_suppress = 1;
WANG Cong6b53daf2014-07-23 16:09:10 -07001113 if (dev->sysfs_rx_queue_group)
Andrey Vagin002d8a12016-10-24 19:09:53 -07001114 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
1115 kobject_put(kobj);
Michael Daltona953be52014-01-16 22:23:28 -08001116 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001117
1118 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001119#else
1120 return 0;
1121#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001122}
1123
Christian Braunerd7554072020-02-27 04:37:18 +01001124static int net_rx_queue_change_owner(struct net_device *dev, int num,
1125 kuid_t kuid, kgid_t kgid)
1126{
1127#ifdef CONFIG_SYSFS
1128 int error = 0;
1129 int i;
1130
1131#ifndef CONFIG_RPS
1132 if (!dev->sysfs_rx_queue_group)
1133 return 0;
1134#endif
1135 for (i = 0; i < num; i++) {
1136 error = rx_queue_change_owner(dev, i, kuid, kgid);
1137 if (error)
1138 break;
1139 }
1140
1141 return error;
1142#else
1143 return 0;
1144#endif
1145}
1146
david decotignyccf5ff62011-11-16 12:15:10 +00001147#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001148/*
1149 * netdev_queue sysfs structures and functions.
1150 */
1151struct netdev_queue_attribute {
1152 struct attribute attr;
stephen hemminger718ad682017-08-18 13:46:24 -07001153 ssize_t (*show)(struct netdev_queue *queue, char *buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001154 ssize_t (*store)(struct netdev_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -07001155 const char *buf, size_t len);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001156};
stephen hemminger6648c652017-08-18 13:46:28 -07001157#define to_netdev_queue_attr(_attr) \
1158 container_of(_attr, struct netdev_queue_attribute, attr)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001159
1160#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
1161
1162static ssize_t netdev_queue_attr_show(struct kobject *kobj,
1163 struct attribute *attr, char *buf)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001164{
stephen hemminger667e4272017-08-18 13:46:27 -07001165 const struct netdev_queue_attribute *attribute
1166 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001167 struct netdev_queue *queue = to_netdev_queue(kobj);
1168
1169 if (!attribute->show)
1170 return -EIO;
1171
stephen hemminger718ad682017-08-18 13:46:24 -07001172 return attribute->show(queue, buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001173}
1174
1175static ssize_t netdev_queue_attr_store(struct kobject *kobj,
1176 struct attribute *attr,
1177 const char *buf, size_t count)
1178{
stephen hemminger667e4272017-08-18 13:46:27 -07001179 const struct netdev_queue_attribute *attribute
1180 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001181 struct netdev_queue *queue = to_netdev_queue(kobj);
1182
1183 if (!attribute->store)
1184 return -EIO;
1185
stephen hemminger718ad682017-08-18 13:46:24 -07001186 return attribute->store(queue, buf, count);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001187}
1188
1189static const struct sysfs_ops netdev_queue_sysfs_ops = {
1190 .show = netdev_queue_attr_show,
1191 .store = netdev_queue_attr_store,
1192};
1193
stephen hemminger2b9c7582017-08-18 13:46:26 -07001194static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
david decotignyccf5ff62011-11-16 12:15:10 +00001195{
Eric Dumazet8160fb42021-11-16 19:29:21 -08001196 unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001197
Xiongfeng Wang9bb5fbe2020-07-21 15:02:57 +08001198 return sprintf(buf, fmt_ulong, trans_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001199}
1200
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001201static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
John Fastabend822b3b22015-03-18 14:57:33 +02001202{
1203 struct net_device *dev = queue->dev;
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001204 unsigned int i;
John Fastabend822b3b22015-03-18 14:57:33 +02001205
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001206 i = queue - dev->_tx;
John Fastabend822b3b22015-03-18 14:57:33 +02001207 BUG_ON(i >= dev->num_tx_queues);
1208
1209 return i;
1210}
1211
stephen hemminger2b9c7582017-08-18 13:46:26 -07001212static ssize_t traffic_class_show(struct netdev_queue *queue,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001213 char *buf)
1214{
1215 struct net_device *dev = queue->dev;
Alexander Duyckb2f17562021-02-08 14:29:18 -08001216 int num_tc, tc;
Alexander Duyckd7be9772018-07-09 12:19:32 -04001217 int index;
Alexander Duyck8d059b02016-10-28 11:43:49 -04001218
Alexander Duyckd7be9772018-07-09 12:19:32 -04001219 if (!netif_is_multiqueue(dev))
1220 return -ENOENT;
1221
Alexander Duyckb2f17562021-02-08 14:29:18 -08001222 if (!rtnl_trylock())
1223 return restart_syscall();
1224
Alexander Duyckd7be9772018-07-09 12:19:32 -04001225 index = get_netdev_queue_index(queue);
Alexander Duyckffcfe252018-07-09 12:19:38 -04001226
1227 /* If queue belongs to subordinate dev use its TC mapping */
1228 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1229
Alexander Duyckb2f17562021-02-08 14:29:18 -08001230 num_tc = dev->num_tc;
Alexander Duyckd7be9772018-07-09 12:19:32 -04001231 tc = netdev_txq_to_tc(dev, index);
Alexander Duyckb2f17562021-02-08 14:29:18 -08001232
1233 rtnl_unlock();
1234
Alexander Duyck8d059b02016-10-28 11:43:49 -04001235 if (tc < 0)
1236 return -EINVAL;
1237
Alexander Duyckffcfe252018-07-09 12:19:38 -04001238 /* We can report the traffic class one of two ways:
1239 * Subordinate device traffic classes are reported with the traffic
1240 * class first, and then the subordinate class so for example TC0 on
1241 * subordinate device 2 will be reported as "0-2". If the queue
1242 * belongs to the root device it will be reported with just the
1243 * traffic class, so just "0" for TC 0 for example.
1244 */
Alexander Duyckb2f17562021-02-08 14:29:18 -08001245 return num_tc < 0 ? sprintf(buf, "%d%d\n", tc, num_tc) :
1246 sprintf(buf, "%d\n", tc);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001247}
1248
1249#ifdef CONFIG_XPS
stephen hemminger2b9c7582017-08-18 13:46:26 -07001250static ssize_t tx_maxrate_show(struct netdev_queue *queue,
John Fastabend822b3b22015-03-18 14:57:33 +02001251 char *buf)
1252{
1253 return sprintf(buf, "%lu\n", queue->tx_maxrate);
1254}
1255
stephen hemminger2b9c7582017-08-18 13:46:26 -07001256static ssize_t tx_maxrate_store(struct netdev_queue *queue,
1257 const char *buf, size_t len)
John Fastabend822b3b22015-03-18 14:57:33 +02001258{
1259 struct net_device *dev = queue->dev;
1260 int err, index = get_netdev_queue_index(queue);
1261 u32 rate = 0;
1262
Tyler Hicks3033fce2018-07-20 21:56:51 +00001263 if (!capable(CAP_NET_ADMIN))
1264 return -EPERM;
1265
Antoine Tenart146e5e72021-10-07 16:00:51 +02001266 /* The check is also done later; this helps returning early without
1267 * hitting the trylock/restart below.
1268 */
1269 if (!dev->netdev_ops->ndo_set_tx_maxrate)
1270 return -EOPNOTSUPP;
1271
John Fastabend822b3b22015-03-18 14:57:33 +02001272 err = kstrtou32(buf, 10, &rate);
1273 if (err < 0)
1274 return err;
1275
1276 if (!rtnl_trylock())
1277 return restart_syscall();
1278
1279 err = -EOPNOTSUPP;
1280 if (dev->netdev_ops->ndo_set_tx_maxrate)
1281 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1282
1283 rtnl_unlock();
1284 if (!err) {
1285 queue->tx_maxrate = rate;
1286 return len;
1287 }
1288 return err;
1289}
1290
stephen hemminger2b9c7582017-08-18 13:46:26 -07001291static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1292 = __ATTR_RW(tx_maxrate);
John Fastabend822b3b22015-03-18 14:57:33 +02001293#endif
1294
stephen hemminger2b9c7582017-08-18 13:46:26 -07001295static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1296 = __ATTR_RO(tx_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001297
stephen hemminger2b9c7582017-08-18 13:46:26 -07001298static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1299 = __ATTR_RO(traffic_class);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001300
Tom Herbert114cf582011-11-28 16:33:09 +00001301#ifdef CONFIG_BQL
1302/*
1303 * Byte queue limits sysfs structures and functions.
1304 */
1305static ssize_t bql_show(char *buf, unsigned int value)
1306{
1307 return sprintf(buf, "%u\n", value);
1308}
1309
1310static ssize_t bql_set(const char *buf, const size_t count,
1311 unsigned int *pvalue)
1312{
1313 unsigned int value;
1314 int err;
1315
stephen hemminger6648c652017-08-18 13:46:28 -07001316 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
Tom Herbert114cf582011-11-28 16:33:09 +00001317 value = DQL_MAX_LIMIT;
stephen hemminger6648c652017-08-18 13:46:28 -07001318 } else {
Tom Herbert114cf582011-11-28 16:33:09 +00001319 err = kstrtouint(buf, 10, &value);
1320 if (err < 0)
1321 return err;
1322 if (value > DQL_MAX_LIMIT)
1323 return -EINVAL;
1324 }
1325
1326 *pvalue = value;
1327
1328 return count;
1329}
1330
1331static ssize_t bql_show_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001332 char *buf)
1333{
1334 struct dql *dql = &queue->dql;
1335
1336 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1337}
1338
1339static ssize_t bql_set_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001340 const char *buf, size_t len)
1341{
1342 struct dql *dql = &queue->dql;
Eric Dumazet95c96172012-04-15 05:58:06 +00001343 unsigned int value;
Tom Herbert114cf582011-11-28 16:33:09 +00001344 int err;
1345
1346 err = kstrtouint(buf, 10, &value);
1347 if (err < 0)
1348 return err;
1349
1350 dql->slack_hold_time = msecs_to_jiffies(value);
1351
1352 return len;
1353}
1354
stephen hemminger170c6582017-08-18 13:46:25 -07001355static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -07001356 = __ATTR(hold_time, 0644,
stephen hemminger170c6582017-08-18 13:46:25 -07001357 bql_show_hold_time, bql_set_hold_time);
Tom Herbert114cf582011-11-28 16:33:09 +00001358
1359static ssize_t bql_show_inflight(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001360 char *buf)
1361{
1362 struct dql *dql = &queue->dql;
1363
1364 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1365}
1366
stephen hemminger170c6582017-08-18 13:46:25 -07001367static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
Joe Perchesd6444062018-03-23 15:54:38 -07001368 __ATTR(inflight, 0444, bql_show_inflight, NULL);
Tom Herbert114cf582011-11-28 16:33:09 +00001369
1370#define BQL_ATTR(NAME, FIELD) \
1371static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001372 char *buf) \
1373{ \
1374 return bql_show(buf, queue->dql.FIELD); \
1375} \
1376 \
1377static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001378 const char *buf, size_t len) \
1379{ \
1380 return bql_set(buf, len, &queue->dql.FIELD); \
1381} \
1382 \
stephen hemminger170c6582017-08-18 13:46:25 -07001383static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
Joe Perchesd6444062018-03-23 15:54:38 -07001384 = __ATTR(NAME, 0644, \
stephen hemminger170c6582017-08-18 13:46:25 -07001385 bql_show_ ## NAME, bql_set_ ## NAME)
Tom Herbert114cf582011-11-28 16:33:09 +00001386
stephen hemminger170c6582017-08-18 13:46:25 -07001387BQL_ATTR(limit, limit);
1388BQL_ATTR(limit_max, max_limit);
1389BQL_ATTR(limit_min, min_limit);
Tom Herbert114cf582011-11-28 16:33:09 +00001390
stephen hemminger170c6582017-08-18 13:46:25 -07001391static struct attribute *dql_attrs[] __ro_after_init = {
Tom Herbert114cf582011-11-28 16:33:09 +00001392 &bql_limit_attribute.attr,
1393 &bql_limit_max_attribute.attr,
1394 &bql_limit_min_attribute.attr,
1395 &bql_hold_time_attribute.attr,
1396 &bql_inflight_attribute.attr,
1397 NULL
1398};
1399
Arvind Yadav38ef00c2017-06-29 16:31:26 +05301400static const struct attribute_group dql_group = {
Tom Herbert114cf582011-11-28 16:33:09 +00001401 .name = "byte_queue_limits",
1402 .attrs = dql_attrs,
1403};
1404#endif /* CONFIG_BQL */
1405
david decotignyccf5ff62011-11-16 12:15:10 +00001406#ifdef CONFIG_XPS
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001407static ssize_t xps_queue_show(struct net_device *dev, unsigned int index,
1408 int tc, char *buf, enum xps_map_type type)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001409{
Tom Herbert1d24eb42010-11-21 13:17:27 +00001410 struct xps_dev_maps *dev_maps;
Antoine Tenartd9a063d22021-03-18 19:37:41 +01001411 unsigned long *mask;
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001412 unsigned int nr_ids;
1413 int j, len;
Antoine Tenartd7be87a2021-03-18 19:37:49 +01001414
Tom Herbert1d24eb42010-11-21 13:17:27 +00001415 rcu_read_lock();
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001416 dev_maps = rcu_dereference(dev->xps_maps[type]);
1417
1418 /* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0
1419 * when dev_maps hasn't been allocated yet, to be backward compatible.
1420 */
1421 nr_ids = dev_maps ? dev_maps->nr_ids :
1422 (type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues);
Antoine Tenart5478fcd2021-03-18 19:37:44 +01001423
Antoine Tenart7f08ec62021-03-22 16:43:29 +01001424 mask = bitmap_zalloc(nr_ids, GFP_NOWAIT);
Antoine Tenart5478fcd2021-03-18 19:37:44 +01001425 if (!mask) {
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001426 rcu_read_unlock();
1427 return -ENOMEM;
Antoine Tenart5478fcd2021-03-18 19:37:44 +01001428 }
1429
Antoine Tenart255c04a2021-03-18 19:37:43 +01001430 if (!dev_maps || tc >= dev_maps->num_tc)
Antoine Tenart73f5e522021-03-18 19:37:42 +01001431 goto out_no_maps;
Alexander Duyck184c4492016-10-28 11:50:13 -04001432
Antoine Tenart6f361582021-03-18 19:37:45 +01001433 for (j = 0; j < nr_ids; j++) {
Antoine Tenart255c04a2021-03-18 19:37:43 +01001434 int i, tci = j * dev_maps->num_tc + tc;
Antoine Tenart73f5e522021-03-18 19:37:42 +01001435 struct xps_map *map;
Alexander Duyck184c4492016-10-28 11:50:13 -04001436
Antoine Tenart73f5e522021-03-18 19:37:42 +01001437 map = rcu_dereference(dev_maps->attr_map[tci]);
1438 if (!map)
1439 continue;
1440
1441 for (i = map->len; i--;) {
1442 if (map->queues[i] == index) {
Christophe JAILLET08a7abf2021-11-21 19:01:03 +01001443 __set_bit(j, mask);
Antoine Tenart73f5e522021-03-18 19:37:42 +01001444 break;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001445 }
1446 }
1447 }
Antoine Tenart73f5e522021-03-18 19:37:42 +01001448out_no_maps:
Tom Herbert1d24eb42010-11-21 13:17:27 +00001449 rcu_read_unlock();
Antoine Tenartfb250382020-12-23 22:23:21 +01001450
Antoine Tenart5478fcd2021-03-18 19:37:44 +01001451 len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids);
Antoine Tenartea4fe7e2021-03-18 19:37:40 +01001452 bitmap_free(mask);
Antoine Tenartfb250382020-12-23 22:23:21 +01001453
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001454 return len < PAGE_SIZE ? len : -EINVAL;
1455}
1456
1457static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf)
1458{
1459 struct net_device *dev = queue->dev;
1460 unsigned int index;
1461 int len, tc;
1462
1463 if (!netif_is_multiqueue(dev))
1464 return -ENOENT;
1465
1466 index = get_netdev_queue_index(queue);
1467
1468 if (!rtnl_trylock())
1469 return restart_syscall();
1470
1471 /* If queue belongs to subordinate dev use its map */
1472 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1473
1474 tc = netdev_txq_to_tc(dev, index);
1475 if (tc < 0) {
1476 rtnl_unlock();
1477 return -EINVAL;
1478 }
1479
1480 /* Make sure the subordinate device can't be freed */
1481 get_device(&dev->dev);
1482 rtnl_unlock();
1483
1484 len = xps_queue_show(dev, index, tc, buf, XPS_CPUS);
1485
Antoine Tenartd7be87a2021-03-18 19:37:49 +01001486 put_device(&dev->dev);
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001487 return len;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001488}
1489
stephen hemminger2b9c7582017-08-18 13:46:26 -07001490static ssize_t xps_cpus_store(struct netdev_queue *queue,
1491 const char *buf, size_t len)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001492{
1493 struct net_device *dev = queue->dev;
Antoine Tenartd9a063d22021-03-18 19:37:41 +01001494 unsigned int index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001495 cpumask_var_t mask;
1496 int err;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001497
Alexander Duyckd7be9772018-07-09 12:19:32 -04001498 if (!netif_is_multiqueue(dev))
1499 return -ENOENT;
1500
Tom Herbert1d24eb42010-11-21 13:17:27 +00001501 if (!capable(CAP_NET_ADMIN))
1502 return -EPERM;
1503
1504 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1505 return -ENOMEM;
1506
1507 index = get_netdev_queue_index(queue);
1508
1509 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1510 if (err) {
1511 free_cpumask_var(mask);
1512 return err;
1513 }
1514
Antoine Tenart1ad582252020-12-23 22:23:20 +01001515 if (!rtnl_trylock()) {
1516 free_cpumask_var(mask);
1517 return restart_syscall();
1518 }
1519
Alexander Duyck537c00d2013-01-10 08:57:02 +00001520 err = netif_set_xps_queue(dev, mask, index);
Antoine Tenart1ad582252020-12-23 22:23:20 +01001521 rtnl_unlock();
Tom Herbert1d24eb42010-11-21 13:17:27 +00001522
1523 free_cpumask_var(mask);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001524
Alexander Duyck537c00d2013-01-10 08:57:02 +00001525 return err ? : len;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001526}
1527
stephen hemminger2b9c7582017-08-18 13:46:26 -07001528static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1529 = __ATTR_RW(xps_cpus);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001530
1531static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
1532{
1533 struct net_device *dev = queue->dev;
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001534 unsigned int index;
1535 int tc;
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001536
1537 index = get_netdev_queue_index(queue);
1538
Antoine Tenart4ae2bb82020-12-23 22:23:23 +01001539 if (!rtnl_trylock())
1540 return restart_syscall();
1541
Antoine Tenart255c04a2021-03-18 19:37:43 +01001542 tc = netdev_txq_to_tc(dev, index);
Antoine Tenartd7be87a2021-03-18 19:37:49 +01001543 rtnl_unlock();
1544 if (tc < 0)
1545 return -EINVAL;
Antoine Tenart255c04a2021-03-18 19:37:43 +01001546
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001547 return xps_queue_show(dev, index, tc, buf, XPS_RXQS);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001548}
1549
1550static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
1551 size_t len)
1552{
1553 struct net_device *dev = queue->dev;
1554 struct net *net = dev_net(dev);
Antoine Tenartd9a063d22021-03-18 19:37:41 +01001555 unsigned long *mask;
1556 unsigned int index;
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001557 int err;
1558
1559 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1560 return -EPERM;
1561
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001562 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001563 if (!mask)
1564 return -ENOMEM;
1565
1566 index = get_netdev_queue_index(queue);
1567
1568 err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
1569 if (err) {
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001570 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001571 return err;
1572 }
1573
Antoine Tenart2d57b4f2020-12-23 22:23:22 +01001574 if (!rtnl_trylock()) {
1575 bitmap_free(mask);
1576 return restart_syscall();
1577 }
1578
Andrei Vagin4d99f662018-08-08 20:07:35 -07001579 cpus_read_lock();
Antoine Tenart044ab862021-03-18 19:37:46 +01001580 err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS);
Andrei Vagin4d99f662018-08-08 20:07:35 -07001581 cpus_read_unlock();
1582
Antoine Tenart2d57b4f2020-12-23 22:23:22 +01001583 rtnl_unlock();
1584
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001585 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001586 return err ? : len;
1587}
1588
1589static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
1590 = __ATTR_RW(xps_rxqs);
david decotignyccf5ff62011-11-16 12:15:10 +00001591#endif /* CONFIG_XPS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001592
stephen hemminger2b9c7582017-08-18 13:46:26 -07001593static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
david decotignyccf5ff62011-11-16 12:15:10 +00001594 &queue_trans_timeout.attr,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001595 &queue_traffic_class.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001596#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001597 &xps_cpus_attribute.attr,
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001598 &xps_rxqs_attribute.attr,
John Fastabend822b3b22015-03-18 14:57:33 +02001599 &queue_tx_maxrate.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001600#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001601 NULL
1602};
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001603ATTRIBUTE_GROUPS(netdev_queue_default);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001604
1605static void netdev_queue_release(struct kobject *kobj)
1606{
1607 struct netdev_queue *queue = to_netdev_queue(kobj);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001608
Tom Herbert1d24eb42010-11-21 13:17:27 +00001609 memset(kobj, 0, sizeof(*kobj));
Eric Dumazet0b688f22021-12-04 20:21:59 -08001610 dev_put_track(queue->dev, &queue->dev_tracker);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001611}
1612
Weilong Chen82ef3d52014-01-16 17:24:31 +08001613static const void *netdev_queue_namespace(struct kobject *kobj)
1614{
1615 struct netdev_queue *queue = to_netdev_queue(kobj);
1616 struct device *dev = &queue->dev->dev;
1617 const void *ns = NULL;
1618
1619 if (dev->class && dev->class->ns_type)
1620 ns = dev->class->namespace(dev);
1621
1622 return ns;
1623}
1624
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001625static void netdev_queue_get_ownership(struct kobject *kobj,
1626 kuid_t *uid, kgid_t *gid)
1627{
1628 const struct net *net = netdev_queue_namespace(kobj);
1629
1630 net_ns_get_ownership(net, uid, gid);
1631}
1632
stephen hemminger2b9c7582017-08-18 13:46:26 -07001633static struct kobj_type netdev_queue_ktype __ro_after_init = {
Tom Herbert1d24eb42010-11-21 13:17:27 +00001634 .sysfs_ops = &netdev_queue_sysfs_ops,
1635 .release = netdev_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001636 .default_groups = netdev_queue_default_groups,
Weilong Chen82ef3d52014-01-16 17:24:31 +08001637 .namespace = netdev_queue_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001638 .get_ownership = netdev_queue_get_ownership,
Tom Herbert1d24eb42010-11-21 13:17:27 +00001639};
1640
WANG Cong6b53daf2014-07-23 16:09:10 -07001641static int netdev_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001642{
WANG Cong6b53daf2014-07-23 16:09:10 -07001643 struct netdev_queue *queue = dev->_tx + index;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001644 struct kobject *kobj = &queue->kobj;
1645 int error = 0;
1646
Jouni Hogandere0b609032019-12-05 15:57:07 +02001647 /* Kobject_put later will trigger netdev_queue_release call
1648 * which decreases dev refcount: Take that reference here
1649 */
Eric Dumazet0b688f22021-12-04 20:21:59 -08001650 dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL);
Jouni Hogandere0b609032019-12-05 15:57:07 +02001651
WANG Cong6b53daf2014-07-23 16:09:10 -07001652 kobj->kset = dev->queues_kset;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001653 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -07001654 "tx-%u", index);
Tom Herbert114cf582011-11-28 16:33:09 +00001655 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001656 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001657
1658#ifdef CONFIG_BQL
1659 error = sysfs_create_group(kobj, &dql_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001660 if (error)
1661 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001662#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001663
1664 kobject_uevent(kobj, KOBJ_ADD);
Eric Dumazet48a322b2019-11-20 19:19:07 -08001665 return 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001666
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001667err:
1668 kobject_put(kobj);
1669 return error;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001670}
Christian Braunerd7554072020-02-27 04:37:18 +01001671
1672static int tx_queue_change_owner(struct net_device *ndev, int index,
1673 kuid_t kuid, kgid_t kgid)
1674{
1675 struct netdev_queue *queue = ndev->_tx + index;
1676 struct kobject *kobj = &queue->kobj;
1677 int error;
1678
1679 error = sysfs_change_owner(kobj, kuid, kgid);
1680 if (error)
1681 return error;
1682
1683#ifdef CONFIG_BQL
1684 error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
1685#endif
1686 return error;
1687}
david decotignyccf5ff62011-11-16 12:15:10 +00001688#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001689
1690int
WANG Cong6b53daf2014-07-23 16:09:10 -07001691netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001692{
david decotignyccf5ff62011-11-16 12:15:10 +00001693#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001694 int i;
1695 int error = 0;
1696
Antoine Tenart5f1c8022021-12-07 15:57:25 +01001697 /* Tx queue kobjects are allowed to be updated when a device is being
1698 * unregistered, but solely to remove queues from qdiscs. Any path
1699 * adding queues should be fixed.
1700 */
1701 WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num,
1702 "New queues can't be registered after device unregistration.");
1703
Tom Herbert1d24eb42010-11-21 13:17:27 +00001704 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001705 error = netdev_queue_add_kobject(dev, i);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001706 if (error) {
1707 new_num = old_num;
1708 break;
1709 }
1710 }
1711
Tom Herbert114cf582011-11-28 16:33:09 +00001712 while (--i >= new_num) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001713 struct netdev_queue *queue = dev->_tx + i;
Tom Herbert114cf582011-11-28 16:33:09 +00001714
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001715 if (!refcount_read(&dev_net(dev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001716 queue->kobj.uevent_suppress = 1;
Tom Herbert114cf582011-11-28 16:33:09 +00001717#ifdef CONFIG_BQL
1718 sysfs_remove_group(&queue->kobj, &dql_group);
1719#endif
1720 kobject_put(&queue->kobj);
1721 }
Tom Herbert1d24eb42010-11-21 13:17:27 +00001722
1723 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001724#else
1725 return 0;
david decotignyccf5ff62011-11-16 12:15:10 +00001726#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001727}
1728
Christian Braunerd7554072020-02-27 04:37:18 +01001729static int net_tx_queue_change_owner(struct net_device *dev, int num,
1730 kuid_t kuid, kgid_t kgid)
1731{
1732#ifdef CONFIG_SYSFS
1733 int error = 0;
1734 int i;
1735
1736 for (i = 0; i < num; i++) {
1737 error = tx_queue_change_owner(dev, i, kuid, kgid);
1738 if (error)
1739 break;
1740 }
1741
1742 return error;
1743#else
1744 return 0;
1745#endif /* CONFIG_SYSFS */
1746}
1747
WANG Cong6b53daf2014-07-23 16:09:10 -07001748static int register_queue_kobjects(struct net_device *dev)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001749{
Tom Herbertbf264142010-11-26 08:36:09 +00001750 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001751
david decotignyccf5ff62011-11-16 12:15:10 +00001752#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001753 dev->queues_kset = kset_create_and_add("queues",
stephen hemminger6648c652017-08-18 13:46:28 -07001754 NULL, &dev->dev.kobj);
WANG Cong6b53daf2014-07-23 16:09:10 -07001755 if (!dev->queues_kset)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001756 return -ENOMEM;
WANG Cong6b53daf2014-07-23 16:09:10 -07001757 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001758#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001759 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001760
WANG Cong6b53daf2014-07-23 16:09:10 -07001761 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001762 if (error)
1763 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001764 rxq = real_rx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001765
WANG Cong6b53daf2014-07-23 16:09:10 -07001766 error = netdev_queue_update_kobjects(dev, 0, real_tx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001767 if (error)
1768 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001769 txq = real_tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001770
1771 return 0;
1772
1773error:
WANG Cong6b53daf2014-07-23 16:09:10 -07001774 netdev_queue_update_kobjects(dev, txq, 0);
1775 net_rx_queue_update_kobjects(dev, rxq, 0);
YueHaibing895a5e92019-03-02 10:34:55 +08001776#ifdef CONFIG_SYSFS
1777 kset_unregister(dev->queues_kset);
1778#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001779 return error;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001780}
1781
Christian Braunerd7554072020-02-27 04:37:18 +01001782static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid)
1783{
1784 int error = 0, real_rx = 0, real_tx = 0;
1785
1786#ifdef CONFIG_SYSFS
1787 if (ndev->queues_kset) {
1788 error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid);
1789 if (error)
1790 return error;
1791 }
1792 real_rx = ndev->real_num_rx_queues;
1793#endif
1794 real_tx = ndev->real_num_tx_queues;
1795
1796 error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid);
1797 if (error)
1798 return error;
1799
1800 error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid);
1801 if (error)
1802 return error;
1803
1804 return 0;
1805}
1806
WANG Cong6b53daf2014-07-23 16:09:10 -07001807static void remove_queue_kobjects(struct net_device *dev)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001808{
Tom Herbertbf264142010-11-26 08:36:09 +00001809 int real_rx = 0, real_tx = 0;
1810
Michael Daltona953be52014-01-16 22:23:28 -08001811#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001812 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001813#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001814 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001815
WANG Cong6b53daf2014-07-23 16:09:10 -07001816 net_rx_queue_update_kobjects(dev, real_rx, 0);
1817 netdev_queue_update_kobjects(dev, real_tx, 0);
Antoine Tenartd7dac0832021-12-07 15:57:24 +01001818
1819 dev->real_num_rx_queues = 0;
1820 dev->real_num_tx_queues = 0;
david decotignyccf5ff62011-11-16 12:15:10 +00001821#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001822 kset_unregister(dev->queues_kset);
Tom Herbertbf264142010-11-26 08:36:09 +00001823#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001824}
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001825
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001826static bool net_current_may_mount(void)
1827{
1828 struct net *net = current->nsproxy->net_ns;
1829
1830 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1831}
1832
Al Viroa685e082011-06-08 21:13:01 -04001833static void *net_grab_current_ns(void)
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001834{
Al Viroa685e082011-06-08 21:13:01 -04001835 struct net *ns = current->nsproxy->net_ns;
1836#ifdef CONFIG_NET_NS
1837 if (ns)
Reshetova, Elenac122e142017-06-30 13:08:08 +03001838 refcount_inc(&ns->passive);
Al Viroa685e082011-06-08 21:13:01 -04001839#endif
1840 return ns;
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001841}
1842
1843static const void *net_initial_ns(void)
1844{
1845 return &init_net;
1846}
1847
1848static const void *net_netlink_ns(struct sock *sk)
1849{
1850 return sock_net(sk);
1851}
1852
stephen hemminger737aec52017-08-18 13:46:22 -07001853const struct kobj_ns_type_operations net_ns_type_operations = {
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001854 .type = KOBJ_NS_TYPE_NET,
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001855 .current_may_mount = net_current_may_mount,
Al Viroa685e082011-06-08 21:13:01 -04001856 .grab_current_ns = net_grab_current_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001857 .netlink_ns = net_netlink_ns,
1858 .initial_ns = net_initial_ns,
Al Viroa685e082011-06-08 21:13:01 -04001859 .drop_ns = net_drop_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001860};
Johannes Berg04600792010-08-05 17:45:15 +02001861EXPORT_SYMBOL_GPL(net_ns_type_operations);
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001862
Kay Sievers7eff2e72007-08-14 15:15:12 +02001863static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001865 struct net_device *dev = to_net_dev(d);
Kay Sievers7eff2e72007-08-14 15:15:12 +02001866 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
Kay Sievers312c0042005-11-16 09:00:00 +01001868 /* pass interface to uevent. */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001869 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
Eric Rannaudbf624562007-03-30 22:23:12 -07001870 if (retval)
1871 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001873 /* pass ifindex to uevent.
1874 * ifindex is useful as it won't change (interface name may change)
stephen hemminger6648c652017-08-18 13:46:28 -07001875 * and is what RtNetlink uses natively.
1876 */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001877 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001878
Eric Rannaudbf624562007-03-30 22:23:12 -07001879exit:
Eric Rannaudbf624562007-03-30 22:23:12 -07001880 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883/*
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001884 * netdev_release -- destroy and free a dead device.
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001885 * Called when last reference to device kobject is gone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001887static void netdev_release(struct device *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001889 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
1891 BUG_ON(dev->reg_state != NETREG_RELEASED);
1892
Florian Westphal6c557002017-10-02 23:50:05 +02001893 /* no need to wait for rcu grace period:
1894 * device is dead and about to be freed.
1895 */
1896 kfree(rcu_access_pointer(dev->ifalias));
Eric Dumazet74d332c2013-10-30 13:10:44 -07001897 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898}
1899
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001900static const void *net_namespace(struct device *d)
1901{
Geliang Tang5c294822015-12-22 23:11:49 +08001902 struct net_device *dev = to_net_dev(d);
1903
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001904 return dev_net(dev);
1905}
1906
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001907static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
1908{
1909 struct net_device *dev = to_net_dev(d);
1910 const struct net *net = dev_net(dev);
1911
1912 net_ns_get_ownership(net, uid, gid);
1913}
1914
stephen hemmingere6d473e2017-08-18 13:46:21 -07001915static struct class net_class __ro_after_init = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 .name = "net",
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001917 .dev_release = netdev_release,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -07001918 .dev_groups = net_class_groups,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001919 .dev_uevent = netdev_uevent,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001920 .ns_type = &net_ns_type_operations,
1921 .namespace = net_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001922 .get_ownership = net_get_ownership,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923};
1924
Jakub Kicinskie330fb12021-10-06 18:06:54 -07001925#ifdef CONFIG_OF
Florian Fainelliaa836df2015-03-09 14:31:20 -07001926static int of_dev_node_match(struct device *dev, const void *data)
1927{
Tobias Waldekranz2e186a22020-05-15 11:52:52 +02001928 for (; dev; dev = dev->parent) {
1929 if (dev->of_node == data)
1930 return 1;
1931 }
Florian Fainelliaa836df2015-03-09 14:31:20 -07001932
Tobias Waldekranz2e186a22020-05-15 11:52:52 +02001933 return 0;
Florian Fainelliaa836df2015-03-09 14:31:20 -07001934}
1935
Russell King9861f722015-09-24 20:36:33 +01001936/*
1937 * of_find_net_device_by_node - lookup the net device for the device node
1938 * @np: OF device node
1939 *
1940 * Looks up the net_device structure corresponding with the device node.
1941 * If successful, returns a pointer to the net_device with the embedded
1942 * struct device refcount incremented by one, or NULL on failure. The
1943 * refcount must be dropped when done with the net_device.
1944 */
Florian Fainelliaa836df2015-03-09 14:31:20 -07001945struct net_device *of_find_net_device_by_node(struct device_node *np)
1946{
1947 struct device *dev;
1948
1949 dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1950 if (!dev)
1951 return NULL;
1952
1953 return to_net_dev(dev);
1954}
1955EXPORT_SYMBOL(of_find_net_device_by_node);
1956#endif
1957
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001958/* Delete sysfs entries but hold kobject reference until after all
1959 * netdev references are gone.
1960 */
WANG Cong6b53daf2014-07-23 16:09:10 -07001961void netdev_unregister_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962{
stephen hemminger6648c652017-08-18 13:46:28 -07001963 struct device *dev = &ndev->dev;
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001964
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001965 if (!refcount_read(&dev_net(ndev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001966 dev_set_uevent_suppress(dev, 1);
1967
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001968 kobject_get(&dev->kobj);
Eric W. Biederman38918452008-10-27 17:51:47 -07001969
WANG Cong6b53daf2014-07-23 16:09:10 -07001970 remove_queue_kobjects(ndev);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001971
Ming Lei9802c8e2013-02-22 16:34:16 -08001972 pm_runtime_set_memalloc_noio(dev, false);
1973
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001974 device_del(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975}
1976
1977/* Create sysfs entries for network device. */
WANG Cong6b53daf2014-07-23 16:09:10 -07001978int netdev_register_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979{
stephen hemminger6648c652017-08-18 13:46:28 -07001980 struct device *dev = &ndev->dev;
WANG Cong6b53daf2014-07-23 16:09:10 -07001981 const struct attribute_group **groups = ndev->sysfs_groups;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001982 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001984 device_initialize(dev);
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001985 dev->class = &net_class;
WANG Cong6b53daf2014-07-23 16:09:10 -07001986 dev->platform_data = ndev;
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001987 dev->groups = groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
WANG Cong6b53daf2014-07-23 16:09:10 -07001989 dev_set_name(dev, "%s", ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001991#ifdef CONFIG_SYSFS
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001992 /* Allow for a device specific group */
1993 if (*groups)
1994 groups++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001996 *groups++ = &netstat_group;
Johannes Berg38c1a012012-11-16 20:46:19 +01001997
1998#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
WANG Cong6b53daf2014-07-23 16:09:10 -07001999 if (ndev->ieee80211_ptr)
Johannes Berg38c1a012012-11-16 20:46:19 +01002000 *groups++ = &wireless_group;
2001#if IS_ENABLED(CONFIG_WIRELESS_EXT)
WANG Cong6b53daf2014-07-23 16:09:10 -07002002 else if (ndev->wireless_handlers)
Johannes Berg38c1a012012-11-16 20:46:19 +01002003 *groups++ = &wireless_group;
2004#endif
2005#endif
Eric W. Biederman8b41d182007-09-26 22:02:53 -07002006#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
Tom Herbert0a9627f2010-03-16 08:03:29 +00002008 error = device_add(dev);
2009 if (error)
Wang Hai8ed633b2019-04-12 16:36:33 -04002010 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002011
WANG Cong6b53daf2014-07-23 16:09:10 -07002012 error = register_queue_kobjects(ndev);
Wang Hai8ed633b2019-04-12 16:36:33 -04002013 if (error) {
2014 device_del(dev);
2015 return error;
2016 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00002017
Ming Lei9802c8e2013-02-22 16:34:16 -08002018 pm_runtime_set_memalloc_noio(dev, true);
2019
Tom Herbert0a9627f2010-03-16 08:03:29 +00002020 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021}
2022
Christian Braunere6dee9f2020-02-27 04:37:17 +01002023/* Change owner for sysfs entries when moving network devices across network
2024 * namespaces owned by different user namespaces.
2025 */
2026int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
2027 const struct net *net_new)
2028{
Xin Longf7a1e762021-10-25 02:31:48 -04002029 kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
2030 kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
Christian Braunere6dee9f2020-02-27 04:37:17 +01002031 struct device *dev = &ndev->dev;
Christian Braunere6dee9f2020-02-27 04:37:17 +01002032 int error;
2033
2034 net_ns_get_ownership(net_old, &old_uid, &old_gid);
2035 net_ns_get_ownership(net_new, &new_uid, &new_gid);
2036
2037 /* The network namespace was changed but the owning user namespace is
2038 * identical so there's no need to change the owner of sysfs entries.
2039 */
2040 if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid))
2041 return 0;
2042
2043 error = device_change_owner(dev, new_uid, new_gid);
2044 if (error)
2045 return error;
2046
Christian Braunerd7554072020-02-27 04:37:18 +01002047 error = queue_change_owner(ndev, new_uid, new_gid);
2048 if (error)
2049 return error;
2050
Christian Braunere6dee9f2020-02-27 04:37:17 +01002051 return 0;
2052}
2053
stephen hemmingerb793dc52017-08-18 13:46:20 -07002054int netdev_class_create_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04002055 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07002056{
Tejun Heo58292cbe2013-09-11 22:29:04 -04002057 return class_create_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07002058}
Tejun Heo58292cbe2013-09-11 22:29:04 -04002059EXPORT_SYMBOL(netdev_class_create_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07002060
stephen hemmingerb793dc52017-08-18 13:46:20 -07002061void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04002062 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07002063{
Tejun Heo58292cbe2013-09-11 22:29:04 -04002064 class_remove_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07002065}
Tejun Heo58292cbe2013-09-11 22:29:04 -04002066EXPORT_SYMBOL(netdev_class_remove_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07002067
Daniel Borkmanna48d4bb2014-01-06 01:20:11 +01002068int __init netdev_kobject_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069{
Eric W. Biederman608b4b92010-05-04 17:36:45 -07002070 kobj_ns_type_register(&net_ns_type_operations);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 return class_register(&net_class);
2072}