blob: 4edd58d34f16608351651eae9ebf587ef59ac685 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net-sysfs.c - network device class and attributes
4 *
5 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Randy Dunlap4fc268d2006-01-11 12:17:47 -08008#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/netdevice.h>
11#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010013#include <linux/sched/signal.h>
Alex Belits07bbecb2020-06-25 18:34:43 -040014#include <linux/sched/isolation.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070015#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <net/sock.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070017#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/rtnetlink.h>
Tom Herbertfec5e652010-04-16 16:01:27 -070019#include <linux/vmalloc.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040020#include <linux/export.h>
Tom Herbert114cf582011-11-28 16:33:09 +000021#include <linux/jiffies.h>
Ming Lei9802c8e2013-02-22 16:34:16 -080022#include <linux/pm_runtime.h>
Florian Fainelliaa836df2015-03-09 14:31:20 -070023#include <linux/of.h>
Ben Dooks88832a22016-06-07 19:27:51 +010024#include <linux/of_net.h>
Andrei Vagin4d99f662018-08-08 20:07:35 -070025#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Pavel Emelyanov342709e2007-10-23 21:14:45 -070027#include "net-sysfs.h"
28
Eric W. Biederman8b41d182007-09-26 22:02:53 -070029#ifdef CONFIG_SYSFS
Linus Torvalds1da177e2005-04-16 15:20:36 -070030static const char fmt_hex[] = "%#x\n";
31static const char fmt_dec[] = "%d\n";
32static const char fmt_ulong[] = "%lu\n";
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +000033static const char fmt_u64[] = "%llu\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035static inline int dev_isalive(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -070037 return dev->reg_state <= NETREG_REGISTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038}
39
40/* use same locking rules as GIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070041static ssize_t netdev_show(const struct device *dev,
42 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 ssize_t (*format)(const struct net_device *, char *))
44{
WANG Cong6b53daf2014-07-23 16:09:10 -070045 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 ssize_t ret = -EINVAL;
47
48 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -070049 if (dev_isalive(ndev))
50 ret = (*format)(ndev, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 read_unlock(&dev_base_lock);
52
53 return ret;
54}
55
56/* generate a show function for simple field */
57#define NETDEVICE_SHOW(field, format_string) \
WANG Cong6b53daf2014-07-23 16:09:10 -070058static ssize_t format_##field(const struct net_device *dev, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{ \
WANG Cong6b53daf2014-07-23 16:09:10 -070060 return sprintf(buf, format_string, dev->field); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070061} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070062static ssize_t field##_show(struct device *dev, \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070063 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070064{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070065 return netdev_show(dev, attr, buf, format_##field); \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070066} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070068#define NETDEVICE_SHOW_RO(field, format_string) \
69NETDEVICE_SHOW(field, format_string); \
70static DEVICE_ATTR_RO(field)
71
72#define NETDEVICE_SHOW_RW(field, format_string) \
73NETDEVICE_SHOW(field, format_string); \
74static DEVICE_ATTR_RW(field)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76/* use same locking and permission rules as SIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070077static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 const char *buf, size_t len,
79 int (*set)(struct net_device *, unsigned long))
80{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000081 struct net_device *netdev = to_net_dev(dev);
82 struct net *net = dev_net(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 unsigned long new;
Colin Ian King5f0224a2020-04-09 14:41:26 +010084 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000086 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 return -EPERM;
88
Shuah Khane1e420c2012-04-12 09:28:13 +000089 ret = kstrtoul(buf, 0, &new);
90 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 goto err;
92
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000093 if (!rtnl_trylock())
Eric W. Biederman336ca572009-05-13 16:57:25 +000094 return restart_syscall();
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000095
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000096 if (dev_isalive(netdev)) {
stephen hemminger6648c652017-08-18 13:46:28 -070097 ret = (*set)(netdev, new);
98 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 ret = len;
100 }
101 rtnl_unlock();
102 err:
103 return ret;
104}
105
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700106NETDEVICE_SHOW_RO(dev_id, fmt_hex);
Amir Vadai3f859442014-02-25 18:17:50 +0200107NETDEVICE_SHOW_RO(dev_port, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700108NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109NETDEVICE_SHOW_RO(addr_len, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700110NETDEVICE_SHOW_RO(ifindex, fmt_dec);
111NETDEVICE_SHOW_RO(type, fmt_dec);
112NETDEVICE_SHOW_RO(link_mode, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200114static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
115 char *buf)
116{
117 struct net_device *ndev = to_net_dev(dev);
118
119 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
120}
121static DEVICE_ATTR_RO(iflink);
122
WANG Cong6b53daf2014-07-23 16:09:10 -0700123static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
Tom Gundersen685343f2014-07-14 16:37:22 +0200124{
WANG Cong6b53daf2014-07-23 16:09:10 -0700125 return sprintf(buf, fmt_dec, dev->name_assign_type);
Tom Gundersen685343f2014-07-14 16:37:22 +0200126}
127
128static ssize_t name_assign_type_show(struct device *dev,
129 struct device_attribute *attr,
130 char *buf)
131{
WANG Cong6b53daf2014-07-23 16:09:10 -0700132 struct net_device *ndev = to_net_dev(dev);
Tom Gundersen685343f2014-07-14 16:37:22 +0200133 ssize_t ret = -EINVAL;
134
WANG Cong6b53daf2014-07-23 16:09:10 -0700135 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
Tom Gundersen685343f2014-07-14 16:37:22 +0200136 ret = netdev_show(dev, attr, buf, format_name_assign_type);
137
138 return ret;
139}
140static DEVICE_ATTR_RO(name_assign_type);
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* use same locking rules as GIFHWADDR ioctl's */
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700143static ssize_t address_show(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700144 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
WANG Cong6b53daf2014-07-23 16:09:10 -0700146 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 ssize_t ret = -EINVAL;
148
149 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -0700150 if (dev_isalive(ndev))
151 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 read_unlock(&dev_base_lock);
153 return ret;
154}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700155static DEVICE_ATTR_RO(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700157static ssize_t broadcast_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
WANG Cong6b53daf2014-07-23 16:09:10 -0700160 struct net_device *ndev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700161
WANG Cong6b53daf2014-07-23 16:09:10 -0700162 if (dev_isalive(ndev))
163 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 return -EINVAL;
165}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700166static DEVICE_ATTR_RO(broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
WANG Cong6b53daf2014-07-23 16:09:10 -0700168static int change_carrier(struct net_device *dev, unsigned long new_carrier)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000169{
WANG Cong6b53daf2014-07-23 16:09:10 -0700170 if (!netif_running(dev))
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000171 return -EINVAL;
stephen hemminger6648c652017-08-18 13:46:28 -0700172 return dev_change_carrier(dev, (bool)new_carrier);
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000173}
174
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700175static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
176 const char *buf, size_t len)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000177{
Antoine Tenart146e5e72021-10-07 16:00:51 +0200178 struct net_device *netdev = to_net_dev(dev);
179
180 /* The check is also done in change_carrier; this helps returning early
181 * without hitting the trylock/restart in netdev_store.
182 */
183 if (!netdev->netdev_ops->ndo_change_carrier)
184 return -EOPNOTSUPP;
185
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000186 return netdev_store(dev, attr, buf, len, change_carrier);
187}
188
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700189static ssize_t carrier_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700190 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700193
194 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
stephen hemminger6648c652017-08-18 13:46:28 -0700196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return -EINVAL;
198}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700199static DEVICE_ATTR_RW(carrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700201static ssize_t speed_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000202 struct device_attribute *attr, char *buf)
203{
204 struct net_device *netdev = to_net_dev(dev);
205 int ret = -EINVAL;
206
Antoine Tenart146e5e72021-10-07 16:00:51 +0200207 /* The check is also done in __ethtool_get_link_ksettings; this helps
208 * returning early without hitting the trylock/restart below.
209 */
210 if (!netdev->ethtool_ops->get_link_ksettings)
211 return ret;
212
Andy Gospodarekd519e172009-10-02 09:26:12 +0000213 if (!rtnl_trylock())
214 return restart_syscall();
215
David Decotigny8ae6daca2011-04-27 18:32:38 +0000216 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800217 struct ethtool_link_ksettings cmd;
218
219 if (!__ethtool_get_link_ksettings(netdev, &cmd))
220 ret = sprintf(buf, fmt_dec, cmd.base.speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000221 }
222 rtnl_unlock();
223 return ret;
224}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700225static DEVICE_ATTR_RO(speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000226
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700227static ssize_t duplex_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000228 struct device_attribute *attr, char *buf)
229{
230 struct net_device *netdev = to_net_dev(dev);
231 int ret = -EINVAL;
232
Antoine Tenart146e5e72021-10-07 16:00:51 +0200233 /* The check is also done in __ethtool_get_link_ksettings; this helps
234 * returning early without hitting the trylock/restart below.
235 */
236 if (!netdev->ethtool_ops->get_link_ksettings)
237 return ret;
238
Andy Gospodarekd519e172009-10-02 09:26:12 +0000239 if (!rtnl_trylock())
240 return restart_syscall();
241
David Decotigny8ae6daca2011-04-27 18:32:38 +0000242 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800243 struct ethtool_link_ksettings cmd;
244
245 if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000246 const char *duplex;
David Decotigny7cad1ba2016-02-24 10:58:10 -0800247
248 switch (cmd.base.duplex) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000249 case DUPLEX_HALF:
250 duplex = "half";
251 break;
252 case DUPLEX_FULL:
253 duplex = "full";
254 break;
255 default:
256 duplex = "unknown";
257 break;
258 }
259 ret = sprintf(buf, "%s\n", duplex);
260 }
Andy Gospodarekd519e172009-10-02 09:26:12 +0000261 }
262 rtnl_unlock();
263 return ret;
264}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700265static DEVICE_ATTR_RO(duplex);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000266
Andrew Lunndb30a572020-04-20 00:11:51 +0200267static ssize_t testing_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 struct net_device *netdev = to_net_dev(dev);
271
272 if (netif_running(netdev))
273 return sprintf(buf, fmt_dec, !!netif_testing(netdev));
274
275 return -EINVAL;
276}
277static DEVICE_ATTR_RO(testing);
278
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700279static ssize_t dormant_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700280 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800281{
282 struct net_device *netdev = to_net_dev(dev);
283
284 if (netif_running(netdev))
285 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
286
287 return -EINVAL;
288}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700289static DEVICE_ATTR_RO(dormant);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800290
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700291static const char *const operstates[] = {
Stefan Rompfb00055a2006-03-20 17:09:11 -0800292 "unknown",
293 "notpresent", /* currently unused */
294 "down",
295 "lowerlayerdown",
Andrew Lunndb30a572020-04-20 00:11:51 +0200296 "testing",
Stefan Rompfb00055a2006-03-20 17:09:11 -0800297 "dormant",
298 "up"
299};
300
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700301static ssize_t operstate_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700302 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800303{
304 const struct net_device *netdev = to_net_dev(dev);
305 unsigned char operstate;
306
307 read_lock(&dev_base_lock);
308 operstate = netdev->operstate;
309 if (!netif_running(netdev))
310 operstate = IF_OPER_DOWN;
311 read_unlock(&dev_base_lock);
312
Adrian Bunke3a5cd92006-04-05 22:19:47 -0700313 if (operstate >= ARRAY_SIZE(operstates))
Stefan Rompfb00055a2006-03-20 17:09:11 -0800314 return -EINVAL; /* should not happen */
315
316 return sprintf(buf, "%s\n", operstates[operstate]);
317}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700318static DEVICE_ATTR_RO(operstate);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800319
david decotigny2d3b4792014-03-29 09:48:35 -0700320static ssize_t carrier_changes_show(struct device *dev,
321 struct device_attribute *attr,
322 char *buf)
323{
324 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700325
david decotigny2d3b4792014-03-29 09:48:35 -0700326 return sprintf(buf, fmt_dec,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800327 atomic_read(&netdev->carrier_up_count) +
328 atomic_read(&netdev->carrier_down_count));
david decotigny2d3b4792014-03-29 09:48:35 -0700329}
330static DEVICE_ATTR_RO(carrier_changes);
331
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800332static ssize_t carrier_up_count_show(struct device *dev,
333 struct device_attribute *attr,
334 char *buf)
335{
336 struct net_device *netdev = to_net_dev(dev);
337
338 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
339}
340static DEVICE_ATTR_RO(carrier_up_count);
341
342static ssize_t carrier_down_count_show(struct device *dev,
343 struct device_attribute *attr,
344 char *buf)
345{
346 struct net_device *netdev = to_net_dev(dev);
347
348 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
349}
350static DEVICE_ATTR_RO(carrier_down_count);
351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352/* read-write attributes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
WANG Cong6b53daf2014-07-23 16:09:10 -0700354static int change_mtu(struct net_device *dev, unsigned long new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
stephen hemminger6648c652017-08-18 13:46:28 -0700356 return dev_set_mtu(dev, (int)new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700359static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700360 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700362 return netdev_store(dev, attr, buf, len, change_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700364NETDEVICE_SHOW_RW(mtu, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
WANG Cong6b53daf2014-07-23 16:09:10 -0700366static int change_flags(struct net_device *dev, unsigned long new_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Petr Machata567c5e12018-12-06 17:05:42 +0000368 return dev_change_flags(dev, (unsigned int)new_flags, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
370
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700371static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700372 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700374 return netdev_store(dev, attr, buf, len, change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700376NETDEVICE_SHOW_RW(flags, fmt_hex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700378static ssize_t tx_queue_len_store(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700379 struct device_attribute *attr,
380 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000382 if (!capable(CAP_NET_ADMIN))
383 return -EPERM;
384
Cong Wang6a643dd2018-01-25 18:26:22 -0800385 return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
Alexey Dobriyan0cd29502017-05-17 13:30:44 +0300387NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Eric Dumazet3b47d302014-11-06 21:09:44 -0800389static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
390{
Eric Dumazet7e417a62020-04-22 09:13:28 -0700391 WRITE_ONCE(dev->gro_flush_timeout, val);
Eric Dumazet3b47d302014-11-06 21:09:44 -0800392 return 0;
393}
394
395static ssize_t gro_flush_timeout_store(struct device *dev,
stephen hemminger6648c652017-08-18 13:46:28 -0700396 struct device_attribute *attr,
397 const char *buf, size_t len)
Eric Dumazet3b47d302014-11-06 21:09:44 -0800398{
399 if (!capable(CAP_NET_ADMIN))
400 return -EPERM;
401
402 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
403}
404NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
405
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700406static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val)
407{
Eric Dumazet7e417a62020-04-22 09:13:28 -0700408 WRITE_ONCE(dev->napi_defer_hard_irqs, val);
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700409 return 0;
410}
411
412static ssize_t napi_defer_hard_irqs_store(struct device *dev,
413 struct device_attribute *attr,
414 const char *buf, size_t len)
415{
416 if (!capable(CAP_NET_ADMIN))
417 return -EPERM;
418
419 return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs);
420}
421NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec);
422
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700423static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700424 const char *buf, size_t len)
425{
426 struct net_device *netdev = to_net_dev(dev);
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000427 struct net *net = dev_net(netdev);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700428 size_t count = len;
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800429 ssize_t ret = 0;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700430
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000431 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700432 return -EPERM;
433
434 /* ignore trailing newline */
435 if (len > 0 && buf[len - 1] == '\n')
436 --count;
437
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800438 if (!rtnl_trylock())
439 return restart_syscall();
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700440
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800441 if (dev_isalive(netdev)) {
442 ret = dev_set_alias(netdev, buf, count);
443 if (ret < 0)
444 goto err;
445 ret = len;
446 netdev_state_change(netdev);
447 }
448err:
449 rtnl_unlock();
450
451 return ret;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700452}
453
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700454static ssize_t ifalias_show(struct device *dev,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700455 struct device_attribute *attr, char *buf)
456{
457 const struct net_device *netdev = to_net_dev(dev);
Florian Westphal6c557002017-10-02 23:50:05 +0200458 char tmp[IFALIASZ];
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700459 ssize_t ret = 0;
460
Florian Westphal6c557002017-10-02 23:50:05 +0200461 ret = dev_get_alias(netdev, tmp, sizeof(tmp));
462 if (ret > 0)
463 ret = sprintf(buf, "%s\n", tmp);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700464 return ret;
465}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700466static DEVICE_ATTR_RW(ifalias);
Vlad Dogarua512b922011-01-24 03:37:29 +0000467
WANG Cong6b53daf2014-07-23 16:09:10 -0700468static int change_group(struct net_device *dev, unsigned long new_group)
Vlad Dogarua512b922011-01-24 03:37:29 +0000469{
stephen hemminger6648c652017-08-18 13:46:28 -0700470 dev_set_group(dev, (int)new_group);
Vlad Dogarua512b922011-01-24 03:37:29 +0000471 return 0;
472}
473
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700474static ssize_t group_store(struct device *dev, struct device_attribute *attr,
475 const char *buf, size_t len)
Vlad Dogarua512b922011-01-24 03:37:29 +0000476{
477 return netdev_store(dev, attr, buf, len, change_group);
478}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700479NETDEVICE_SHOW(group, fmt_dec);
Joe Perchesd6444062018-03-23 15:54:38 -0700480static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
Vlad Dogarua512b922011-01-24 03:37:29 +0000481
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700482static int change_proto_down(struct net_device *dev, unsigned long proto_down)
483{
stephen hemminger6648c652017-08-18 13:46:28 -0700484 return dev_change_proto_down(dev, (bool)proto_down);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700485}
486
487static ssize_t proto_down_store(struct device *dev,
488 struct device_attribute *attr,
489 const char *buf, size_t len)
490{
Antoine Tenart146e5e72021-10-07 16:00:51 +0200491 struct net_device *netdev = to_net_dev(dev);
492
493 /* The check is also done in change_proto_down; this helps returning
494 * early without hitting the trylock/restart in netdev_store.
495 */
496 if (!netdev->netdev_ops->ndo_change_proto_down)
497 return -EOPNOTSUPP;
498
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700499 return netdev_store(dev, attr, buf, len, change_proto_down);
500}
501NETDEVICE_SHOW_RW(proto_down, fmt_dec);
502
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700503static ssize_t phys_port_id_show(struct device *dev,
Jiri Pirkoff80e512013-07-29 18:16:51 +0200504 struct device_attribute *attr, char *buf)
505{
506 struct net_device *netdev = to_net_dev(dev);
507 ssize_t ret = -EINVAL;
508
Antoine Tenart146e5e72021-10-07 16:00:51 +0200509 /* The check is also done in dev_get_phys_port_id; this helps returning
510 * early without hitting the trylock/restart below.
511 */
512 if (!netdev->netdev_ops->ndo_get_phys_port_id)
513 return -EOPNOTSUPP;
514
Jiri Pirkoff80e512013-07-29 18:16:51 +0200515 if (!rtnl_trylock())
516 return restart_syscall();
517
518 if (dev_isalive(netdev)) {
Jiri Pirko02637fc2014-11-28 14:34:16 +0100519 struct netdev_phys_item_id ppid;
Jiri Pirkoff80e512013-07-29 18:16:51 +0200520
521 ret = dev_get_phys_port_id(netdev, &ppid);
522 if (!ret)
523 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
524 }
525 rtnl_unlock();
526
527 return ret;
528}
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700529static DEVICE_ATTR_RO(phys_port_id);
Jiri Pirkoff80e512013-07-29 18:16:51 +0200530
David Aherndb24a902015-03-17 20:23:15 -0600531static ssize_t phys_port_name_show(struct device *dev,
532 struct device_attribute *attr, char *buf)
533{
534 struct net_device *netdev = to_net_dev(dev);
535 ssize_t ret = -EINVAL;
536
Antoine Tenart146e5e72021-10-07 16:00:51 +0200537 /* The checks are also done in dev_get_phys_port_name; this helps
538 * returning early without hitting the trylock/restart below.
539 */
540 if (!netdev->netdev_ops->ndo_get_phys_port_name &&
541 !netdev->netdev_ops->ndo_get_devlink_port)
542 return -EOPNOTSUPP;
543
David Aherndb24a902015-03-17 20:23:15 -0600544 if (!rtnl_trylock())
545 return restart_syscall();
546
547 if (dev_isalive(netdev)) {
548 char name[IFNAMSIZ];
549
550 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
551 if (!ret)
552 ret = sprintf(buf, "%s\n", name);
553 }
554 rtnl_unlock();
555
556 return ret;
557}
558static DEVICE_ATTR_RO(phys_port_name);
559
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100560static ssize_t phys_switch_id_show(struct device *dev,
561 struct device_attribute *attr, char *buf)
562{
563 struct net_device *netdev = to_net_dev(dev);
564 ssize_t ret = -EINVAL;
565
Antoine Tenart146e5e72021-10-07 16:00:51 +0200566 /* The checks are also done in dev_get_phys_port_name; this helps
567 * returning early without hitting the trylock/restart below. This works
568 * because recurse is false when calling dev_get_port_parent_id.
569 */
570 if (!netdev->netdev_ops->ndo_get_port_parent_id &&
571 !netdev->netdev_ops->ndo_get_devlink_port)
572 return -EOPNOTSUPP;
573
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100574 if (!rtnl_trylock())
575 return restart_syscall();
576
577 if (dev_isalive(netdev)) {
Florian Fainellibccb3022019-02-06 09:45:46 -0800578 struct netdev_phys_item_id ppid = { };
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100579
Florian Fainellibccb3022019-02-06 09:45:46 -0800580 ret = dev_get_port_parent_id(netdev, &ppid, false);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100581 if (!ret)
Florian Fainellibccb3022019-02-06 09:45:46 -0800582 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100583 }
584 rtnl_unlock();
585
586 return ret;
587}
588static DEVICE_ATTR_RO(phys_switch_id);
589
Wei Wang5fdd2f02021-02-08 11:34:10 -0800590static ssize_t threaded_show(struct device *dev,
591 struct device_attribute *attr, char *buf)
592{
593 struct net_device *netdev = to_net_dev(dev);
594 ssize_t ret = -EINVAL;
595
596 if (!rtnl_trylock())
597 return restart_syscall();
598
599 if (dev_isalive(netdev))
600 ret = sprintf(buf, fmt_dec, netdev->threaded);
601
602 rtnl_unlock();
603 return ret;
604}
605
606static int modify_napi_threaded(struct net_device *dev, unsigned long val)
607{
608 int ret;
609
610 if (list_empty(&dev->napi_list))
611 return -EOPNOTSUPP;
612
613 if (val != 0 && val != 1)
614 return -EOPNOTSUPP;
615
616 ret = dev_set_threaded(dev, val);
617
618 return ret;
619}
620
621static ssize_t threaded_store(struct device *dev,
622 struct device_attribute *attr,
623 const char *buf, size_t len)
624{
625 return netdev_store(dev, attr, buf, len, modify_napi_threaded);
626}
627static DEVICE_ATTR_RW(threaded);
628
stephen hemmingerec6cc592017-08-18 13:46:23 -0700629static struct attribute *net_class_attrs[] __ro_after_init = {
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700630 &dev_attr_netdev_group.attr,
631 &dev_attr_type.attr,
632 &dev_attr_dev_id.attr,
Amir Vadai3f859442014-02-25 18:17:50 +0200633 &dev_attr_dev_port.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700634 &dev_attr_iflink.attr,
635 &dev_attr_ifindex.attr,
Tom Gundersen685343f2014-07-14 16:37:22 +0200636 &dev_attr_name_assign_type.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700637 &dev_attr_addr_assign_type.attr,
638 &dev_attr_addr_len.attr,
639 &dev_attr_link_mode.attr,
640 &dev_attr_address.attr,
641 &dev_attr_broadcast.attr,
642 &dev_attr_speed.attr,
643 &dev_attr_duplex.attr,
644 &dev_attr_dormant.attr,
Andrew Lunndb30a572020-04-20 00:11:51 +0200645 &dev_attr_testing.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700646 &dev_attr_operstate.attr,
david decotigny2d3b4792014-03-29 09:48:35 -0700647 &dev_attr_carrier_changes.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700648 &dev_attr_ifalias.attr,
649 &dev_attr_carrier.attr,
650 &dev_attr_mtu.attr,
651 &dev_attr_flags.attr,
652 &dev_attr_tx_queue_len.attr,
Eric Dumazet3b47d302014-11-06 21:09:44 -0800653 &dev_attr_gro_flush_timeout.attr,
Eric Dumazet6f8b12d2020-04-22 09:13:27 -0700654 &dev_attr_napi_defer_hard_irqs.attr,
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700655 &dev_attr_phys_port_id.attr,
David Aherndb24a902015-03-17 20:23:15 -0600656 &dev_attr_phys_port_name.attr,
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100657 &dev_attr_phys_switch_id.attr,
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700658 &dev_attr_proto_down.attr,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800659 &dev_attr_carrier_up_count.attr,
660 &dev_attr_carrier_down_count.attr,
Wei Wang5fdd2f02021-02-08 11:34:10 -0800661 &dev_attr_threaded.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700662 NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663};
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700664ATTRIBUTE_GROUPS(net_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666/* Show a given an attribute in the statistics group */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700667static ssize_t netstat_show(const struct device *d,
668 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 unsigned long offset)
670{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700671 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 ssize_t ret = -EINVAL;
673
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000674 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
stephen hemminger6648c652017-08-18 13:46:28 -0700675 offset % sizeof(u64) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677 read_lock(&dev_base_lock);
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700678 if (dev_isalive(dev)) {
Eric Dumazet28172732010-07-07 14:58:56 -0700679 struct rtnl_link_stats64 temp;
680 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
681
stephen hemminger6648c652017-08-18 13:46:28 -0700682 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700683 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 read_unlock(&dev_base_lock);
685 return ret;
686}
687
688/* generate a read-only statistics attribute */
689#define NETSTAT_ENTRY(name) \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700690static ssize_t name##_show(struct device *d, \
stephen hemminger6648c652017-08-18 13:46:28 -0700691 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700693 return netstat_show(d, attr, buf, \
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000694 offsetof(struct rtnl_link_stats64, name)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700696static DEVICE_ATTR_RO(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698NETSTAT_ENTRY(rx_packets);
699NETSTAT_ENTRY(tx_packets);
700NETSTAT_ENTRY(rx_bytes);
701NETSTAT_ENTRY(tx_bytes);
702NETSTAT_ENTRY(rx_errors);
703NETSTAT_ENTRY(tx_errors);
704NETSTAT_ENTRY(rx_dropped);
705NETSTAT_ENTRY(tx_dropped);
706NETSTAT_ENTRY(multicast);
707NETSTAT_ENTRY(collisions);
708NETSTAT_ENTRY(rx_length_errors);
709NETSTAT_ENTRY(rx_over_errors);
710NETSTAT_ENTRY(rx_crc_errors);
711NETSTAT_ENTRY(rx_frame_errors);
712NETSTAT_ENTRY(rx_fifo_errors);
713NETSTAT_ENTRY(rx_missed_errors);
714NETSTAT_ENTRY(tx_aborted_errors);
715NETSTAT_ENTRY(tx_carrier_errors);
716NETSTAT_ENTRY(tx_fifo_errors);
717NETSTAT_ENTRY(tx_heartbeat_errors);
718NETSTAT_ENTRY(tx_window_errors);
719NETSTAT_ENTRY(rx_compressed);
720NETSTAT_ENTRY(tx_compressed);
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500721NETSTAT_ENTRY(rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
stephen hemmingerec6cc592017-08-18 13:46:23 -0700723static struct attribute *netstat_attrs[] __ro_after_init = {
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700724 &dev_attr_rx_packets.attr,
725 &dev_attr_tx_packets.attr,
726 &dev_attr_rx_bytes.attr,
727 &dev_attr_tx_bytes.attr,
728 &dev_attr_rx_errors.attr,
729 &dev_attr_tx_errors.attr,
730 &dev_attr_rx_dropped.attr,
731 &dev_attr_tx_dropped.attr,
732 &dev_attr_multicast.attr,
733 &dev_attr_collisions.attr,
734 &dev_attr_rx_length_errors.attr,
735 &dev_attr_rx_over_errors.attr,
736 &dev_attr_rx_crc_errors.attr,
737 &dev_attr_rx_frame_errors.attr,
738 &dev_attr_rx_fifo_errors.attr,
739 &dev_attr_rx_missed_errors.attr,
740 &dev_attr_tx_aborted_errors.attr,
741 &dev_attr_tx_carrier_errors.attr,
742 &dev_attr_tx_fifo_errors.attr,
743 &dev_attr_tx_heartbeat_errors.attr,
744 &dev_attr_tx_window_errors.attr,
745 &dev_attr_rx_compressed.attr,
746 &dev_attr_tx_compressed.attr,
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500747 &dev_attr_rx_nohandler.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 NULL
749};
750
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530751static const struct attribute_group netstat_group = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 .name = "statistics",
753 .attrs = netstat_attrs,
754};
Johannes Berg38c1a012012-11-16 20:46:19 +0100755
756#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
757static struct attribute *wireless_attrs[] = {
758 NULL
759};
760
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530761static const struct attribute_group wireless_group = {
Johannes Berg38c1a012012-11-16 20:46:19 +0100762 .name = "wireless",
763 .attrs = wireless_attrs,
764};
765#endif
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700766
767#else /* CONFIG_SYSFS */
768#define net_class_groups NULL
Eric W. Biedermand6523dd2010-05-16 21:59:45 -0700769#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Michael Daltona953be52014-01-16 22:23:28 -0800771#ifdef CONFIG_SYSFS
stephen hemminger6648c652017-08-18 13:46:28 -0700772#define to_rx_queue_attr(_attr) \
773 container_of(_attr, struct rx_queue_attribute, attr)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000774
775#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
776
777static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
778 char *buf)
779{
stephen hemminger667e4272017-08-18 13:46:27 -0700780 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000781 struct netdev_rx_queue *queue = to_rx_queue(kobj);
782
783 if (!attribute->show)
784 return -EIO;
785
stephen hemminger718ad682017-08-18 13:46:24 -0700786 return attribute->show(queue, buf);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000787}
788
789static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
790 const char *buf, size_t count)
791{
stephen hemminger667e4272017-08-18 13:46:27 -0700792 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000793 struct netdev_rx_queue *queue = to_rx_queue(kobj);
794
795 if (!attribute->store)
796 return -EIO;
797
stephen hemminger718ad682017-08-18 13:46:24 -0700798 return attribute->store(queue, buf, count);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000799}
800
stephen hemmingerfa50d642010-08-31 12:14:13 +0000801static const struct sysfs_ops rx_queue_sysfs_ops = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000802 .show = rx_queue_attr_show,
803 .store = rx_queue_attr_store,
804};
805
Michael Daltona953be52014-01-16 22:23:28 -0800806#ifdef CONFIG_RPS
stephen hemminger718ad682017-08-18 13:46:24 -0700807static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000808{
809 struct rps_map *map;
810 cpumask_var_t mask;
Tejun Heof0906822015-02-13 14:37:42 -0800811 int i, len;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000812
813 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
814 return -ENOMEM;
815
816 rcu_read_lock();
817 map = rcu_dereference(queue->rps_map);
818 if (map)
819 for (i = 0; i < map->len; i++)
820 cpumask_set_cpu(map->cpus[i], mask);
821
Tejun Heof0906822015-02-13 14:37:42 -0800822 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000823 rcu_read_unlock();
Tom Herbert0a9627f2010-03-16 08:03:29 +0000824 free_cpumask_var(mask);
Tejun Heof0906822015-02-13 14:37:42 -0800825
826 return len < PAGE_SIZE ? len : -EINVAL;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000827}
828
Eric Dumazetf5acb902010-04-19 14:40:57 -0700829static ssize_t store_rps_map(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700830 const char *buf, size_t len)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000831{
832 struct rps_map *old_map, *map;
833 cpumask_var_t mask;
Alex Belits07bbecb2020-06-25 18:34:43 -0400834 int err, cpu, i, hk_flags;
Sasha Levinda65ad12015-08-13 14:03:16 -0400835 static DEFINE_MUTEX(rps_map_mutex);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000836
837 if (!capable(CAP_NET_ADMIN))
838 return -EPERM;
839
840 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
841 return -ENOMEM;
842
843 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
844 if (err) {
845 free_cpumask_var(mask);
846 return err;
847 }
848
Eric Dumazet2e0d8fe2020-08-11 18:34:40 -0700849 if (!cpumask_empty(mask)) {
850 hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
851 cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
852 if (cpumask_empty(mask)) {
853 free_cpumask_var(mask);
854 return -EINVAL;
855 }
Alex Belits07bbecb2020-06-25 18:34:43 -0400856 }
857
Eric Dumazet95c96172012-04-15 05:58:06 +0000858 map = kzalloc(max_t(unsigned int,
stephen hemminger6648c652017-08-18 13:46:28 -0700859 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
860 GFP_KERNEL);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000861 if (!map) {
862 free_cpumask_var(mask);
863 return -ENOMEM;
864 }
865
866 i = 0;
867 for_each_cpu_and(cpu, mask, cpu_online_mask)
868 map->cpus[i++] = cpu;
869
stephen hemminger6648c652017-08-18 13:46:28 -0700870 if (i) {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000871 map->len = i;
stephen hemminger6648c652017-08-18 13:46:28 -0700872 } else {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000873 kfree(map);
874 map = NULL;
875 }
876
Sasha Levinda65ad12015-08-13 14:03:16 -0400877 mutex_lock(&rps_map_mutex);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000878 old_map = rcu_dereference_protected(queue->rps_map,
Sasha Levinda65ad12015-08-13 14:03:16 -0400879 mutex_is_locked(&rps_map_mutex));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000880 rcu_assign_pointer(queue->rps_map, map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000881
Eric Dumazetadc93002011-11-17 03:13:26 +0000882 if (map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700883 static_branch_inc(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700884 if (old_map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700885 static_branch_dec(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700886
Sasha Levinda65ad12015-08-13 14:03:16 -0400887 mutex_unlock(&rps_map_mutex);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700888
889 if (old_map)
890 kfree_rcu(old_map, rcu);
891
Tom Herbert0a9627f2010-03-16 08:03:29 +0000892 free_cpumask_var(mask);
893 return len;
894}
895
Tom Herbertfec5e652010-04-16 16:01:27 -0700896static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
Tom Herbertfec5e652010-04-16 16:01:27 -0700897 char *buf)
898{
899 struct rps_dev_flow_table *flow_table;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000900 unsigned long val = 0;
Tom Herbertfec5e652010-04-16 16:01:27 -0700901
902 rcu_read_lock();
903 flow_table = rcu_dereference(queue->rps_flow_table);
904 if (flow_table)
Eric Dumazet60b778c2011-12-24 06:56:49 +0000905 val = (unsigned long)flow_table->mask + 1;
Tom Herbertfec5e652010-04-16 16:01:27 -0700906 rcu_read_unlock();
907
Eric Dumazet60b778c2011-12-24 06:56:49 +0000908 return sprintf(buf, "%lu\n", val);
Tom Herbertfec5e652010-04-16 16:01:27 -0700909}
910
Tom Herbertfec5e652010-04-16 16:01:27 -0700911static void rps_dev_flow_table_release(struct rcu_head *rcu)
912{
913 struct rps_dev_flow_table *table = container_of(rcu,
914 struct rps_dev_flow_table, rcu);
Al Viro243198d2013-05-05 16:05:55 +0000915 vfree(table);
Tom Herbertfec5e652010-04-16 16:01:27 -0700916}
917
Eric Dumazetf5acb902010-04-19 14:40:57 -0700918static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700919 const char *buf, size_t len)
Tom Herbertfec5e652010-04-16 16:01:27 -0700920{
Eric Dumazet60b778c2011-12-24 06:56:49 +0000921 unsigned long mask, count;
Tom Herbertfec5e652010-04-16 16:01:27 -0700922 struct rps_dev_flow_table *table, *old_table;
923 static DEFINE_SPINLOCK(rps_dev_flow_lock);
Eric Dumazet60b778c2011-12-24 06:56:49 +0000924 int rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700925
926 if (!capable(CAP_NET_ADMIN))
927 return -EPERM;
928
Eric Dumazet60b778c2011-12-24 06:56:49 +0000929 rc = kstrtoul(buf, 0, &count);
930 if (rc < 0)
931 return rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700932
933 if (count) {
Eric Dumazet60b778c2011-12-24 06:56:49 +0000934 mask = count - 1;
935 /* mask = roundup_pow_of_two(count) - 1;
936 * without overflows...
937 */
938 while ((mask | (mask >> 1)) != mask)
939 mask |= (mask >> 1);
940 /* On 64 bit arches, must check mask fits in table->mask (u32),
stephen hemminger8e3bff92013-12-08 12:15:44 -0800941 * and on 32bit arches, must check
942 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
Eric Dumazet60b778c2011-12-24 06:56:49 +0000943 */
944#if BITS_PER_LONG > 32
945 if (mask > (unsigned long)(u32)mask)
Xi Wanga0a129f2011-12-22 13:35:22 +0000946 return -EINVAL;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000947#else
948 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
Xi Wanga0a129f2011-12-22 13:35:22 +0000949 / sizeof(struct rps_dev_flow)) {
Tom Herbertfec5e652010-04-16 16:01:27 -0700950 /* Enforce a limit to prevent overflow */
951 return -EINVAL;
952 }
Eric Dumazet60b778c2011-12-24 06:56:49 +0000953#endif
954 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
Tom Herbertfec5e652010-04-16 16:01:27 -0700955 if (!table)
956 return -ENOMEM;
957
Eric Dumazet60b778c2011-12-24 06:56:49 +0000958 table->mask = mask;
959 for (count = 0; count <= mask; count++)
960 table->flows[count].cpu = RPS_NO_CPU;
stephen hemminger6648c652017-08-18 13:46:28 -0700961 } else {
Tom Herbertfec5e652010-04-16 16:01:27 -0700962 table = NULL;
stephen hemminger6648c652017-08-18 13:46:28 -0700963 }
Tom Herbertfec5e652010-04-16 16:01:27 -0700964
965 spin_lock(&rps_dev_flow_lock);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000966 old_table = rcu_dereference_protected(queue->rps_flow_table,
967 lockdep_is_held(&rps_dev_flow_lock));
Tom Herbertfec5e652010-04-16 16:01:27 -0700968 rcu_assign_pointer(queue->rps_flow_table, table);
969 spin_unlock(&rps_dev_flow_lock);
970
971 if (old_table)
972 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
973
974 return len;
975}
976
stephen hemminger667e4272017-08-18 13:46:27 -0700977static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700978 = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000979
stephen hemminger667e4272017-08-18 13:46:27 -0700980static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700981 = __ATTR(rps_flow_cnt, 0644,
stephen hemminger667e4272017-08-18 13:46:27 -0700982 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
Michael Daltona953be52014-01-16 22:23:28 -0800983#endif /* CONFIG_RPS */
Tom Herbertfec5e652010-04-16 16:01:27 -0700984
stephen hemminger667e4272017-08-18 13:46:27 -0700985static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
Michael Daltona953be52014-01-16 22:23:28 -0800986#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000987 &rps_cpus_attribute.attr,
Tom Herbertfec5e652010-04-16 16:01:27 -0700988 &rps_dev_flow_table_cnt_attribute.attr,
Michael Daltona953be52014-01-16 22:23:28 -0800989#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000990 NULL
991};
Kimberly Brownbe0d6922019-04-01 22:51:35 -0400992ATTRIBUTE_GROUPS(rx_queue_default);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000993
994static void rx_queue_release(struct kobject *kobj)
995{
996 struct netdev_rx_queue *queue = to_rx_queue(kobj);
Michael Daltona953be52014-01-16 22:23:28 -0800997#ifdef CONFIG_RPS
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000998 struct rps_map *map;
999 struct rps_dev_flow_table *flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001000
Eric Dumazet33d480c2011-08-11 19:30:52 +00001001 map = rcu_dereference_protected(queue->rps_map, 1);
John Fastabend9ea19482010-11-16 06:31:39 +00001002 if (map) {
1003 RCU_INIT_POINTER(queue->rps_map, NULL);
Lai Jiangshanf6f80232011-03-18 12:01:31 +08001004 kfree_rcu(map, rcu);
John Fastabend9ea19482010-11-16 06:31:39 +00001005 }
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00001006
Eric Dumazet33d480c2011-08-11 19:30:52 +00001007 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
John Fastabend9ea19482010-11-16 06:31:39 +00001008 if (flow_table) {
1009 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00001010 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
John Fastabend9ea19482010-11-16 06:31:39 +00001011 }
Michael Daltona953be52014-01-16 22:23:28 -08001012#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001013
John Fastabend9ea19482010-11-16 06:31:39 +00001014 memset(kobj, 0, sizeof(*kobj));
Tom Herbertfe822242010-11-09 10:47:38 +00001015 dev_put(queue->dev);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001016}
1017
Weilong Chen82ef3d52014-01-16 17:24:31 +08001018static const void *rx_queue_namespace(struct kobject *kobj)
1019{
1020 struct netdev_rx_queue *queue = to_rx_queue(kobj);
1021 struct device *dev = &queue->dev->dev;
1022 const void *ns = NULL;
1023
1024 if (dev->class && dev->class->ns_type)
1025 ns = dev->class->namespace(dev);
1026
1027 return ns;
1028}
1029
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001030static void rx_queue_get_ownership(struct kobject *kobj,
1031 kuid_t *uid, kgid_t *gid)
1032{
1033 const struct net *net = rx_queue_namespace(kobj);
1034
1035 net_ns_get_ownership(net, uid, gid);
1036}
1037
stephen hemminger667e4272017-08-18 13:46:27 -07001038static struct kobj_type rx_queue_ktype __ro_after_init = {
Tom Herbert0a9627f2010-03-16 08:03:29 +00001039 .sysfs_ops = &rx_queue_sysfs_ops,
1040 .release = rx_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001041 .default_groups = rx_queue_default_groups,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001042 .namespace = rx_queue_namespace,
1043 .get_ownership = rx_queue_get_ownership,
Tom Herbert0a9627f2010-03-16 08:03:29 +00001044};
1045
WANG Cong6b53daf2014-07-23 16:09:10 -07001046static int rx_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001047{
WANG Cong6b53daf2014-07-23 16:09:10 -07001048 struct netdev_rx_queue *queue = dev->_rx + index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001049 struct kobject *kobj = &queue->kobj;
1050 int error = 0;
1051
Jouni Hoganderddd9b5e2019-12-17 13:46:34 +02001052 /* Kobject_put later will trigger rx_queue_release call which
1053 * decreases dev refcount: Take that reference here
1054 */
1055 dev_hold(queue->dev);
1056
WANG Cong6b53daf2014-07-23 16:09:10 -07001057 kobj->kset = dev->queues_kset;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001058 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -07001059 "rx-%u", index);
Michael Daltona953be52014-01-16 22:23:28 -08001060 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001061 goto err;
Michael Daltona953be52014-01-16 22:23:28 -08001062
WANG Cong6b53daf2014-07-23 16:09:10 -07001063 if (dev->sysfs_rx_queue_group) {
1064 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001065 if (error)
1066 goto err;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001067 }
1068
1069 kobject_uevent(kobj, KOBJ_ADD);
1070
1071 return error;
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001072
1073err:
1074 kobject_put(kobj);
1075 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001076}
Christian Braunerd7554072020-02-27 04:37:18 +01001077
1078static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid,
1079 kgid_t kgid)
1080{
1081 struct netdev_rx_queue *queue = dev->_rx + index;
1082 struct kobject *kobj = &queue->kobj;
1083 int error;
1084
1085 error = sysfs_change_owner(kobj, kuid, kgid);
1086 if (error)
1087 return error;
1088
1089 if (dev->sysfs_rx_queue_group)
1090 error = sysfs_group_change_owner(
1091 kobj, dev->sysfs_rx_queue_group, kuid, kgid);
1092
1093 return error;
1094}
Paul Bolle80dd6ea2014-02-09 14:07:11 +01001095#endif /* CONFIG_SYSFS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001096
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001097int
WANG Cong6b53daf2014-07-23 16:09:10 -07001098net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001099{
Michael Daltona953be52014-01-16 22:23:28 -08001100#ifdef CONFIG_SYSFS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001101 int i;
1102 int error = 0;
1103
Michael Daltona953be52014-01-16 22:23:28 -08001104#ifndef CONFIG_RPS
WANG Cong6b53daf2014-07-23 16:09:10 -07001105 if (!dev->sysfs_rx_queue_group)
Michael Daltona953be52014-01-16 22:23:28 -08001106 return 0;
1107#endif
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001108 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001109 error = rx_queue_add_kobject(dev, i);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001110 if (error) {
1111 new_num = old_num;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001112 break;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001113 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001114 }
1115
Michael Daltona953be52014-01-16 22:23:28 -08001116 while (--i >= new_num) {
Andrey Vagin002d8a12016-10-24 19:09:53 -07001117 struct kobject *kobj = &dev->_rx[i].kobj;
1118
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001119 if (!refcount_read(&dev_net(dev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001120 kobj->uevent_suppress = 1;
WANG Cong6b53daf2014-07-23 16:09:10 -07001121 if (dev->sysfs_rx_queue_group)
Andrey Vagin002d8a12016-10-24 19:09:53 -07001122 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
1123 kobject_put(kobj);
Michael Daltona953be52014-01-16 22:23:28 -08001124 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001125
1126 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001127#else
1128 return 0;
1129#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001130}
1131
Christian Braunerd7554072020-02-27 04:37:18 +01001132static int net_rx_queue_change_owner(struct net_device *dev, int num,
1133 kuid_t kuid, kgid_t kgid)
1134{
1135#ifdef CONFIG_SYSFS
1136 int error = 0;
1137 int i;
1138
1139#ifndef CONFIG_RPS
1140 if (!dev->sysfs_rx_queue_group)
1141 return 0;
1142#endif
1143 for (i = 0; i < num; i++) {
1144 error = rx_queue_change_owner(dev, i, kuid, kgid);
1145 if (error)
1146 break;
1147 }
1148
1149 return error;
1150#else
1151 return 0;
1152#endif
1153}
1154
david decotignyccf5ff62011-11-16 12:15:10 +00001155#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001156/*
1157 * netdev_queue sysfs structures and functions.
1158 */
1159struct netdev_queue_attribute {
1160 struct attribute attr;
stephen hemminger718ad682017-08-18 13:46:24 -07001161 ssize_t (*show)(struct netdev_queue *queue, char *buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001162 ssize_t (*store)(struct netdev_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -07001163 const char *buf, size_t len);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001164};
stephen hemminger6648c652017-08-18 13:46:28 -07001165#define to_netdev_queue_attr(_attr) \
1166 container_of(_attr, struct netdev_queue_attribute, attr)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001167
1168#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
1169
1170static ssize_t netdev_queue_attr_show(struct kobject *kobj,
1171 struct attribute *attr, char *buf)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001172{
stephen hemminger667e4272017-08-18 13:46:27 -07001173 const struct netdev_queue_attribute *attribute
1174 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001175 struct netdev_queue *queue = to_netdev_queue(kobj);
1176
1177 if (!attribute->show)
1178 return -EIO;
1179
stephen hemminger718ad682017-08-18 13:46:24 -07001180 return attribute->show(queue, buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001181}
1182
1183static ssize_t netdev_queue_attr_store(struct kobject *kobj,
1184 struct attribute *attr,
1185 const char *buf, size_t count)
1186{
stephen hemminger667e4272017-08-18 13:46:27 -07001187 const struct netdev_queue_attribute *attribute
1188 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001189 struct netdev_queue *queue = to_netdev_queue(kobj);
1190
1191 if (!attribute->store)
1192 return -EIO;
1193
stephen hemminger718ad682017-08-18 13:46:24 -07001194 return attribute->store(queue, buf, count);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001195}
1196
1197static const struct sysfs_ops netdev_queue_sysfs_ops = {
1198 .show = netdev_queue_attr_show,
1199 .store = netdev_queue_attr_store,
1200};
1201
stephen hemminger2b9c7582017-08-18 13:46:26 -07001202static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
david decotignyccf5ff62011-11-16 12:15:10 +00001203{
Eric Dumazet8160fb42021-11-16 19:29:21 -08001204 unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001205
Xiongfeng Wang9bb5fbe2020-07-21 15:02:57 +08001206 return sprintf(buf, fmt_ulong, trans_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001207}
1208
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001209static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
John Fastabend822b3b22015-03-18 14:57:33 +02001210{
1211 struct net_device *dev = queue->dev;
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001212 unsigned int i;
John Fastabend822b3b22015-03-18 14:57:33 +02001213
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001214 i = queue - dev->_tx;
John Fastabend822b3b22015-03-18 14:57:33 +02001215 BUG_ON(i >= dev->num_tx_queues);
1216
1217 return i;
1218}
1219
stephen hemminger2b9c7582017-08-18 13:46:26 -07001220static ssize_t traffic_class_show(struct netdev_queue *queue,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001221 char *buf)
1222{
1223 struct net_device *dev = queue->dev;
Alexander Duyckb2f17562021-02-08 14:29:18 -08001224 int num_tc, tc;
Alexander Duyckd7be9772018-07-09 12:19:32 -04001225 int index;
Alexander Duyck8d059b02016-10-28 11:43:49 -04001226
Alexander Duyckd7be9772018-07-09 12:19:32 -04001227 if (!netif_is_multiqueue(dev))
1228 return -ENOENT;
1229
Alexander Duyckb2f17562021-02-08 14:29:18 -08001230 if (!rtnl_trylock())
1231 return restart_syscall();
1232
Alexander Duyckd7be9772018-07-09 12:19:32 -04001233 index = get_netdev_queue_index(queue);
Alexander Duyckffcfe252018-07-09 12:19:38 -04001234
1235 /* If queue belongs to subordinate dev use its TC mapping */
1236 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1237
Alexander Duyckb2f17562021-02-08 14:29:18 -08001238 num_tc = dev->num_tc;
Alexander Duyckd7be9772018-07-09 12:19:32 -04001239 tc = netdev_txq_to_tc(dev, index);
Alexander Duyckb2f17562021-02-08 14:29:18 -08001240
1241 rtnl_unlock();
1242
Alexander Duyck8d059b02016-10-28 11:43:49 -04001243 if (tc < 0)
1244 return -EINVAL;
1245
Alexander Duyckffcfe252018-07-09 12:19:38 -04001246 /* We can report the traffic class one of two ways:
1247 * Subordinate device traffic classes are reported with the traffic
1248 * class first, and then the subordinate class so for example TC0 on
1249 * subordinate device 2 will be reported as "0-2". If the queue
1250 * belongs to the root device it will be reported with just the
1251 * traffic class, so just "0" for TC 0 for example.
1252 */
Alexander Duyckb2f17562021-02-08 14:29:18 -08001253 return num_tc < 0 ? sprintf(buf, "%d%d\n", tc, num_tc) :
1254 sprintf(buf, "%d\n", tc);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001255}
1256
1257#ifdef CONFIG_XPS
stephen hemminger2b9c7582017-08-18 13:46:26 -07001258static ssize_t tx_maxrate_show(struct netdev_queue *queue,
John Fastabend822b3b22015-03-18 14:57:33 +02001259 char *buf)
1260{
1261 return sprintf(buf, "%lu\n", queue->tx_maxrate);
1262}
1263
stephen hemminger2b9c7582017-08-18 13:46:26 -07001264static ssize_t tx_maxrate_store(struct netdev_queue *queue,
1265 const char *buf, size_t len)
John Fastabend822b3b22015-03-18 14:57:33 +02001266{
1267 struct net_device *dev = queue->dev;
1268 int err, index = get_netdev_queue_index(queue);
1269 u32 rate = 0;
1270
Tyler Hicks3033fce2018-07-20 21:56:51 +00001271 if (!capable(CAP_NET_ADMIN))
1272 return -EPERM;
1273
Antoine Tenart146e5e72021-10-07 16:00:51 +02001274 /* The check is also done later; this helps returning early without
1275 * hitting the trylock/restart below.
1276 */
1277 if (!dev->netdev_ops->ndo_set_tx_maxrate)
1278 return -EOPNOTSUPP;
1279
John Fastabend822b3b22015-03-18 14:57:33 +02001280 err = kstrtou32(buf, 10, &rate);
1281 if (err < 0)
1282 return err;
1283
1284 if (!rtnl_trylock())
1285 return restart_syscall();
1286
1287 err = -EOPNOTSUPP;
1288 if (dev->netdev_ops->ndo_set_tx_maxrate)
1289 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1290
1291 rtnl_unlock();
1292 if (!err) {
1293 queue->tx_maxrate = rate;
1294 return len;
1295 }
1296 return err;
1297}
1298
stephen hemminger2b9c7582017-08-18 13:46:26 -07001299static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1300 = __ATTR_RW(tx_maxrate);
John Fastabend822b3b22015-03-18 14:57:33 +02001301#endif
1302
stephen hemminger2b9c7582017-08-18 13:46:26 -07001303static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1304 = __ATTR_RO(tx_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001305
stephen hemminger2b9c7582017-08-18 13:46:26 -07001306static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1307 = __ATTR_RO(traffic_class);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001308
Tom Herbert114cf582011-11-28 16:33:09 +00001309#ifdef CONFIG_BQL
1310/*
1311 * Byte queue limits sysfs structures and functions.
1312 */
1313static ssize_t bql_show(char *buf, unsigned int value)
1314{
1315 return sprintf(buf, "%u\n", value);
1316}
1317
1318static ssize_t bql_set(const char *buf, const size_t count,
1319 unsigned int *pvalue)
1320{
1321 unsigned int value;
1322 int err;
1323
stephen hemminger6648c652017-08-18 13:46:28 -07001324 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
Tom Herbert114cf582011-11-28 16:33:09 +00001325 value = DQL_MAX_LIMIT;
stephen hemminger6648c652017-08-18 13:46:28 -07001326 } else {
Tom Herbert114cf582011-11-28 16:33:09 +00001327 err = kstrtouint(buf, 10, &value);
1328 if (err < 0)
1329 return err;
1330 if (value > DQL_MAX_LIMIT)
1331 return -EINVAL;
1332 }
1333
1334 *pvalue = value;
1335
1336 return count;
1337}
1338
1339static ssize_t bql_show_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001340 char *buf)
1341{
1342 struct dql *dql = &queue->dql;
1343
1344 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1345}
1346
1347static ssize_t bql_set_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001348 const char *buf, size_t len)
1349{
1350 struct dql *dql = &queue->dql;
Eric Dumazet95c96172012-04-15 05:58:06 +00001351 unsigned int value;
Tom Herbert114cf582011-11-28 16:33:09 +00001352 int err;
1353
1354 err = kstrtouint(buf, 10, &value);
1355 if (err < 0)
1356 return err;
1357
1358 dql->slack_hold_time = msecs_to_jiffies(value);
1359
1360 return len;
1361}
1362
stephen hemminger170c6582017-08-18 13:46:25 -07001363static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -07001364 = __ATTR(hold_time, 0644,
stephen hemminger170c6582017-08-18 13:46:25 -07001365 bql_show_hold_time, bql_set_hold_time);
Tom Herbert114cf582011-11-28 16:33:09 +00001366
1367static ssize_t bql_show_inflight(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001368 char *buf)
1369{
1370 struct dql *dql = &queue->dql;
1371
1372 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1373}
1374
stephen hemminger170c6582017-08-18 13:46:25 -07001375static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
Joe Perchesd6444062018-03-23 15:54:38 -07001376 __ATTR(inflight, 0444, bql_show_inflight, NULL);
Tom Herbert114cf582011-11-28 16:33:09 +00001377
1378#define BQL_ATTR(NAME, FIELD) \
1379static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001380 char *buf) \
1381{ \
1382 return bql_show(buf, queue->dql.FIELD); \
1383} \
1384 \
1385static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001386 const char *buf, size_t len) \
1387{ \
1388 return bql_set(buf, len, &queue->dql.FIELD); \
1389} \
1390 \
stephen hemminger170c6582017-08-18 13:46:25 -07001391static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
Joe Perchesd6444062018-03-23 15:54:38 -07001392 = __ATTR(NAME, 0644, \
stephen hemminger170c6582017-08-18 13:46:25 -07001393 bql_show_ ## NAME, bql_set_ ## NAME)
Tom Herbert114cf582011-11-28 16:33:09 +00001394
stephen hemminger170c6582017-08-18 13:46:25 -07001395BQL_ATTR(limit, limit);
1396BQL_ATTR(limit_max, max_limit);
1397BQL_ATTR(limit_min, min_limit);
Tom Herbert114cf582011-11-28 16:33:09 +00001398
stephen hemminger170c6582017-08-18 13:46:25 -07001399static struct attribute *dql_attrs[] __ro_after_init = {
Tom Herbert114cf582011-11-28 16:33:09 +00001400 &bql_limit_attribute.attr,
1401 &bql_limit_max_attribute.attr,
1402 &bql_limit_min_attribute.attr,
1403 &bql_hold_time_attribute.attr,
1404 &bql_inflight_attribute.attr,
1405 NULL
1406};
1407
Arvind Yadav38ef00c2017-06-29 16:31:26 +05301408static const struct attribute_group dql_group = {
Tom Herbert114cf582011-11-28 16:33:09 +00001409 .name = "byte_queue_limits",
1410 .attrs = dql_attrs,
1411};
1412#endif /* CONFIG_BQL */
1413
david decotignyccf5ff62011-11-16 12:15:10 +00001414#ifdef CONFIG_XPS
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001415static ssize_t xps_queue_show(struct net_device *dev, unsigned int index,
1416 int tc, char *buf, enum xps_map_type type)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001417{
Tom Herbert1d24eb42010-11-21 13:17:27 +00001418 struct xps_dev_maps *dev_maps;
Antoine Tenartd9a063d22021-03-18 19:37:41 +01001419 unsigned long *mask;
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001420 unsigned int nr_ids;
1421 int j, len;
Antoine Tenartd7be87a2021-03-18 19:37:49 +01001422
Tom Herbert1d24eb42010-11-21 13:17:27 +00001423 rcu_read_lock();
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001424 dev_maps = rcu_dereference(dev->xps_maps[type]);
1425
1426 /* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0
1427 * when dev_maps hasn't been allocated yet, to be backward compatible.
1428 */
1429 nr_ids = dev_maps ? dev_maps->nr_ids :
1430 (type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues);
Antoine Tenart5478fcd2021-03-18 19:37:44 +01001431
Antoine Tenart7f08ec62021-03-22 16:43:29 +01001432 mask = bitmap_zalloc(nr_ids, GFP_NOWAIT);
Antoine Tenart5478fcd2021-03-18 19:37:44 +01001433 if (!mask) {
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001434 rcu_read_unlock();
1435 return -ENOMEM;
Antoine Tenart5478fcd2021-03-18 19:37:44 +01001436 }
1437
Antoine Tenart255c04a2021-03-18 19:37:43 +01001438 if (!dev_maps || tc >= dev_maps->num_tc)
Antoine Tenart73f5e522021-03-18 19:37:42 +01001439 goto out_no_maps;
Alexander Duyck184c4492016-10-28 11:50:13 -04001440
Antoine Tenart6f361582021-03-18 19:37:45 +01001441 for (j = 0; j < nr_ids; j++) {
Antoine Tenart255c04a2021-03-18 19:37:43 +01001442 int i, tci = j * dev_maps->num_tc + tc;
Antoine Tenart73f5e522021-03-18 19:37:42 +01001443 struct xps_map *map;
Alexander Duyck184c4492016-10-28 11:50:13 -04001444
Antoine Tenart73f5e522021-03-18 19:37:42 +01001445 map = rcu_dereference(dev_maps->attr_map[tci]);
1446 if (!map)
1447 continue;
1448
1449 for (i = map->len; i--;) {
1450 if (map->queues[i] == index) {
Christophe JAILLET08a7abf2021-11-21 19:01:03 +01001451 __set_bit(j, mask);
Antoine Tenart73f5e522021-03-18 19:37:42 +01001452 break;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001453 }
1454 }
1455 }
Antoine Tenart73f5e522021-03-18 19:37:42 +01001456out_no_maps:
Tom Herbert1d24eb42010-11-21 13:17:27 +00001457 rcu_read_unlock();
Antoine Tenartfb250382020-12-23 22:23:21 +01001458
Antoine Tenart5478fcd2021-03-18 19:37:44 +01001459 len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids);
Antoine Tenartea4fe7e2021-03-18 19:37:40 +01001460 bitmap_free(mask);
Antoine Tenartfb250382020-12-23 22:23:21 +01001461
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001462 return len < PAGE_SIZE ? len : -EINVAL;
1463}
1464
1465static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf)
1466{
1467 struct net_device *dev = queue->dev;
1468 unsigned int index;
1469 int len, tc;
1470
1471 if (!netif_is_multiqueue(dev))
1472 return -ENOENT;
1473
1474 index = get_netdev_queue_index(queue);
1475
1476 if (!rtnl_trylock())
1477 return restart_syscall();
1478
1479 /* If queue belongs to subordinate dev use its map */
1480 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1481
1482 tc = netdev_txq_to_tc(dev, index);
1483 if (tc < 0) {
1484 rtnl_unlock();
1485 return -EINVAL;
1486 }
1487
1488 /* Make sure the subordinate device can't be freed */
1489 get_device(&dev->dev);
1490 rtnl_unlock();
1491
1492 len = xps_queue_show(dev, index, tc, buf, XPS_CPUS);
1493
Antoine Tenartd7be87a2021-03-18 19:37:49 +01001494 put_device(&dev->dev);
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001495 return len;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001496}
1497
stephen hemminger2b9c7582017-08-18 13:46:26 -07001498static ssize_t xps_cpus_store(struct netdev_queue *queue,
1499 const char *buf, size_t len)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001500{
1501 struct net_device *dev = queue->dev;
Antoine Tenartd9a063d22021-03-18 19:37:41 +01001502 unsigned int index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001503 cpumask_var_t mask;
1504 int err;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001505
Alexander Duyckd7be9772018-07-09 12:19:32 -04001506 if (!netif_is_multiqueue(dev))
1507 return -ENOENT;
1508
Tom Herbert1d24eb42010-11-21 13:17:27 +00001509 if (!capable(CAP_NET_ADMIN))
1510 return -EPERM;
1511
1512 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1513 return -ENOMEM;
1514
1515 index = get_netdev_queue_index(queue);
1516
1517 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1518 if (err) {
1519 free_cpumask_var(mask);
1520 return err;
1521 }
1522
Antoine Tenart1ad582252020-12-23 22:23:20 +01001523 if (!rtnl_trylock()) {
1524 free_cpumask_var(mask);
1525 return restart_syscall();
1526 }
1527
Alexander Duyck537c00d2013-01-10 08:57:02 +00001528 err = netif_set_xps_queue(dev, mask, index);
Antoine Tenart1ad582252020-12-23 22:23:20 +01001529 rtnl_unlock();
Tom Herbert1d24eb42010-11-21 13:17:27 +00001530
1531 free_cpumask_var(mask);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001532
Alexander Duyck537c00d2013-01-10 08:57:02 +00001533 return err ? : len;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001534}
1535
stephen hemminger2b9c7582017-08-18 13:46:26 -07001536static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1537 = __ATTR_RW(xps_cpus);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001538
1539static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
1540{
1541 struct net_device *dev = queue->dev;
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001542 unsigned int index;
1543 int tc;
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001544
1545 index = get_netdev_queue_index(queue);
1546
Antoine Tenart4ae2bb82020-12-23 22:23:23 +01001547 if (!rtnl_trylock())
1548 return restart_syscall();
1549
Antoine Tenart255c04a2021-03-18 19:37:43 +01001550 tc = netdev_txq_to_tc(dev, index);
Antoine Tenartd7be87a2021-03-18 19:37:49 +01001551 rtnl_unlock();
1552 if (tc < 0)
1553 return -EINVAL;
Antoine Tenart255c04a2021-03-18 19:37:43 +01001554
Antoine Tenart2db6cda2021-03-18 19:37:50 +01001555 return xps_queue_show(dev, index, tc, buf, XPS_RXQS);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001556}
1557
1558static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
1559 size_t len)
1560{
1561 struct net_device *dev = queue->dev;
1562 struct net *net = dev_net(dev);
Antoine Tenartd9a063d22021-03-18 19:37:41 +01001563 unsigned long *mask;
1564 unsigned int index;
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001565 int err;
1566
1567 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1568 return -EPERM;
1569
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001570 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001571 if (!mask)
1572 return -ENOMEM;
1573
1574 index = get_netdev_queue_index(queue);
1575
1576 err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
1577 if (err) {
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001578 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001579 return err;
1580 }
1581
Antoine Tenart2d57b4f2020-12-23 22:23:22 +01001582 if (!rtnl_trylock()) {
1583 bitmap_free(mask);
1584 return restart_syscall();
1585 }
1586
Andrei Vagin4d99f662018-08-08 20:07:35 -07001587 cpus_read_lock();
Antoine Tenart044ab862021-03-18 19:37:46 +01001588 err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS);
Andrei Vagin4d99f662018-08-08 20:07:35 -07001589 cpus_read_unlock();
1590
Antoine Tenart2d57b4f2020-12-23 22:23:22 +01001591 rtnl_unlock();
1592
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001593 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001594 return err ? : len;
1595}
1596
1597static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
1598 = __ATTR_RW(xps_rxqs);
david decotignyccf5ff62011-11-16 12:15:10 +00001599#endif /* CONFIG_XPS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001600
stephen hemminger2b9c7582017-08-18 13:46:26 -07001601static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
david decotignyccf5ff62011-11-16 12:15:10 +00001602 &queue_trans_timeout.attr,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001603 &queue_traffic_class.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001604#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001605 &xps_cpus_attribute.attr,
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001606 &xps_rxqs_attribute.attr,
John Fastabend822b3b22015-03-18 14:57:33 +02001607 &queue_tx_maxrate.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001608#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001609 NULL
1610};
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001611ATTRIBUTE_GROUPS(netdev_queue_default);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001612
1613static void netdev_queue_release(struct kobject *kobj)
1614{
1615 struct netdev_queue *queue = to_netdev_queue(kobj);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001616
Tom Herbert1d24eb42010-11-21 13:17:27 +00001617 memset(kobj, 0, sizeof(*kobj));
1618 dev_put(queue->dev);
1619}
1620
Weilong Chen82ef3d52014-01-16 17:24:31 +08001621static const void *netdev_queue_namespace(struct kobject *kobj)
1622{
1623 struct netdev_queue *queue = to_netdev_queue(kobj);
1624 struct device *dev = &queue->dev->dev;
1625 const void *ns = NULL;
1626
1627 if (dev->class && dev->class->ns_type)
1628 ns = dev->class->namespace(dev);
1629
1630 return ns;
1631}
1632
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001633static void netdev_queue_get_ownership(struct kobject *kobj,
1634 kuid_t *uid, kgid_t *gid)
1635{
1636 const struct net *net = netdev_queue_namespace(kobj);
1637
1638 net_ns_get_ownership(net, uid, gid);
1639}
1640
stephen hemminger2b9c7582017-08-18 13:46:26 -07001641static struct kobj_type netdev_queue_ktype __ro_after_init = {
Tom Herbert1d24eb42010-11-21 13:17:27 +00001642 .sysfs_ops = &netdev_queue_sysfs_ops,
1643 .release = netdev_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001644 .default_groups = netdev_queue_default_groups,
Weilong Chen82ef3d52014-01-16 17:24:31 +08001645 .namespace = netdev_queue_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001646 .get_ownership = netdev_queue_get_ownership,
Tom Herbert1d24eb42010-11-21 13:17:27 +00001647};
1648
WANG Cong6b53daf2014-07-23 16:09:10 -07001649static int netdev_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001650{
WANG Cong6b53daf2014-07-23 16:09:10 -07001651 struct netdev_queue *queue = dev->_tx + index;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001652 struct kobject *kobj = &queue->kobj;
1653 int error = 0;
1654
Jouni Hogandere0b609032019-12-05 15:57:07 +02001655 /* Kobject_put later will trigger netdev_queue_release call
1656 * which decreases dev refcount: Take that reference here
1657 */
1658 dev_hold(queue->dev);
1659
WANG Cong6b53daf2014-07-23 16:09:10 -07001660 kobj->kset = dev->queues_kset;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001661 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -07001662 "tx-%u", index);
Tom Herbert114cf582011-11-28 16:33:09 +00001663 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001664 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001665
1666#ifdef CONFIG_BQL
1667 error = sysfs_create_group(kobj, &dql_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001668 if (error)
1669 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001670#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001671
1672 kobject_uevent(kobj, KOBJ_ADD);
Eric Dumazet48a322b2019-11-20 19:19:07 -08001673 return 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001674
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001675err:
1676 kobject_put(kobj);
1677 return error;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001678}
Christian Braunerd7554072020-02-27 04:37:18 +01001679
1680static int tx_queue_change_owner(struct net_device *ndev, int index,
1681 kuid_t kuid, kgid_t kgid)
1682{
1683 struct netdev_queue *queue = ndev->_tx + index;
1684 struct kobject *kobj = &queue->kobj;
1685 int error;
1686
1687 error = sysfs_change_owner(kobj, kuid, kgid);
1688 if (error)
1689 return error;
1690
1691#ifdef CONFIG_BQL
1692 error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
1693#endif
1694 return error;
1695}
david decotignyccf5ff62011-11-16 12:15:10 +00001696#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001697
1698int
WANG Cong6b53daf2014-07-23 16:09:10 -07001699netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001700{
david decotignyccf5ff62011-11-16 12:15:10 +00001701#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001702 int i;
1703 int error = 0;
1704
1705 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001706 error = netdev_queue_add_kobject(dev, i);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001707 if (error) {
1708 new_num = old_num;
1709 break;
1710 }
1711 }
1712
Tom Herbert114cf582011-11-28 16:33:09 +00001713 while (--i >= new_num) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001714 struct netdev_queue *queue = dev->_tx + i;
Tom Herbert114cf582011-11-28 16:33:09 +00001715
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001716 if (!refcount_read(&dev_net(dev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001717 queue->kobj.uevent_suppress = 1;
Tom Herbert114cf582011-11-28 16:33:09 +00001718#ifdef CONFIG_BQL
1719 sysfs_remove_group(&queue->kobj, &dql_group);
1720#endif
1721 kobject_put(&queue->kobj);
1722 }
Tom Herbert1d24eb42010-11-21 13:17:27 +00001723
1724 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001725#else
1726 return 0;
david decotignyccf5ff62011-11-16 12:15:10 +00001727#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001728}
1729
Christian Braunerd7554072020-02-27 04:37:18 +01001730static int net_tx_queue_change_owner(struct net_device *dev, int num,
1731 kuid_t kuid, kgid_t kgid)
1732{
1733#ifdef CONFIG_SYSFS
1734 int error = 0;
1735 int i;
1736
1737 for (i = 0; i < num; i++) {
1738 error = tx_queue_change_owner(dev, i, kuid, kgid);
1739 if (error)
1740 break;
1741 }
1742
1743 return error;
1744#else
1745 return 0;
1746#endif /* CONFIG_SYSFS */
1747}
1748
WANG Cong6b53daf2014-07-23 16:09:10 -07001749static int register_queue_kobjects(struct net_device *dev)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001750{
Tom Herbertbf264142010-11-26 08:36:09 +00001751 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001752
david decotignyccf5ff62011-11-16 12:15:10 +00001753#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001754 dev->queues_kset = kset_create_and_add("queues",
stephen hemminger6648c652017-08-18 13:46:28 -07001755 NULL, &dev->dev.kobj);
WANG Cong6b53daf2014-07-23 16:09:10 -07001756 if (!dev->queues_kset)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001757 return -ENOMEM;
WANG Cong6b53daf2014-07-23 16:09:10 -07001758 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001759#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001760 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001761
WANG Cong6b53daf2014-07-23 16:09:10 -07001762 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001763 if (error)
1764 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001765 rxq = real_rx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001766
WANG Cong6b53daf2014-07-23 16:09:10 -07001767 error = netdev_queue_update_kobjects(dev, 0, real_tx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001768 if (error)
1769 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001770 txq = real_tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001771
1772 return 0;
1773
1774error:
WANG Cong6b53daf2014-07-23 16:09:10 -07001775 netdev_queue_update_kobjects(dev, txq, 0);
1776 net_rx_queue_update_kobjects(dev, rxq, 0);
YueHaibing895a5e92019-03-02 10:34:55 +08001777#ifdef CONFIG_SYSFS
1778 kset_unregister(dev->queues_kset);
1779#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001780 return error;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001781}
1782
Christian Braunerd7554072020-02-27 04:37:18 +01001783static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid)
1784{
1785 int error = 0, real_rx = 0, real_tx = 0;
1786
1787#ifdef CONFIG_SYSFS
1788 if (ndev->queues_kset) {
1789 error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid);
1790 if (error)
1791 return error;
1792 }
1793 real_rx = ndev->real_num_rx_queues;
1794#endif
1795 real_tx = ndev->real_num_tx_queues;
1796
1797 error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid);
1798 if (error)
1799 return error;
1800
1801 error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid);
1802 if (error)
1803 return error;
1804
1805 return 0;
1806}
1807
WANG Cong6b53daf2014-07-23 16:09:10 -07001808static void remove_queue_kobjects(struct net_device *dev)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001809{
Tom Herbertbf264142010-11-26 08:36:09 +00001810 int real_rx = 0, real_tx = 0;
1811
Michael Daltona953be52014-01-16 22:23:28 -08001812#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001813 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001814#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001815 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001816
WANG Cong6b53daf2014-07-23 16:09:10 -07001817 net_rx_queue_update_kobjects(dev, real_rx, 0);
1818 netdev_queue_update_kobjects(dev, real_tx, 0);
david decotignyccf5ff62011-11-16 12:15:10 +00001819#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001820 kset_unregister(dev->queues_kset);
Tom Herbertbf264142010-11-26 08:36:09 +00001821#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001822}
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001823
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001824static bool net_current_may_mount(void)
1825{
1826 struct net *net = current->nsproxy->net_ns;
1827
1828 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1829}
1830
Al Viroa685e082011-06-08 21:13:01 -04001831static void *net_grab_current_ns(void)
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001832{
Al Viroa685e082011-06-08 21:13:01 -04001833 struct net *ns = current->nsproxy->net_ns;
1834#ifdef CONFIG_NET_NS
1835 if (ns)
Reshetova, Elenac122e142017-06-30 13:08:08 +03001836 refcount_inc(&ns->passive);
Al Viroa685e082011-06-08 21:13:01 -04001837#endif
1838 return ns;
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001839}
1840
1841static const void *net_initial_ns(void)
1842{
1843 return &init_net;
1844}
1845
1846static const void *net_netlink_ns(struct sock *sk)
1847{
1848 return sock_net(sk);
1849}
1850
stephen hemminger737aec52017-08-18 13:46:22 -07001851const struct kobj_ns_type_operations net_ns_type_operations = {
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001852 .type = KOBJ_NS_TYPE_NET,
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001853 .current_may_mount = net_current_may_mount,
Al Viroa685e082011-06-08 21:13:01 -04001854 .grab_current_ns = net_grab_current_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001855 .netlink_ns = net_netlink_ns,
1856 .initial_ns = net_initial_ns,
Al Viroa685e082011-06-08 21:13:01 -04001857 .drop_ns = net_drop_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001858};
Johannes Berg04600792010-08-05 17:45:15 +02001859EXPORT_SYMBOL_GPL(net_ns_type_operations);
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001860
Kay Sievers7eff2e72007-08-14 15:15:12 +02001861static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001863 struct net_device *dev = to_net_dev(d);
Kay Sievers7eff2e72007-08-14 15:15:12 +02001864 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
Kay Sievers312c0042005-11-16 09:00:00 +01001866 /* pass interface to uevent. */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001867 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
Eric Rannaudbf624562007-03-30 22:23:12 -07001868 if (retval)
1869 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001871 /* pass ifindex to uevent.
1872 * ifindex is useful as it won't change (interface name may change)
stephen hemminger6648c652017-08-18 13:46:28 -07001873 * and is what RtNetlink uses natively.
1874 */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001875 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001876
Eric Rannaudbf624562007-03-30 22:23:12 -07001877exit:
Eric Rannaudbf624562007-03-30 22:23:12 -07001878 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
1881/*
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001882 * netdev_release -- destroy and free a dead device.
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001883 * Called when last reference to device kobject is gone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001885static void netdev_release(struct device *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001887 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 BUG_ON(dev->reg_state != NETREG_RELEASED);
1890
Florian Westphal6c557002017-10-02 23:50:05 +02001891 /* no need to wait for rcu grace period:
1892 * device is dead and about to be freed.
1893 */
1894 kfree(rcu_access_pointer(dev->ifalias));
Eric Dumazet74d332c2013-10-30 13:10:44 -07001895 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896}
1897
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001898static const void *net_namespace(struct device *d)
1899{
Geliang Tang5c294822015-12-22 23:11:49 +08001900 struct net_device *dev = to_net_dev(d);
1901
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001902 return dev_net(dev);
1903}
1904
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001905static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
1906{
1907 struct net_device *dev = to_net_dev(d);
1908 const struct net *net = dev_net(dev);
1909
1910 net_ns_get_ownership(net, uid, gid);
1911}
1912
stephen hemmingere6d473e2017-08-18 13:46:21 -07001913static struct class net_class __ro_after_init = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 .name = "net",
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001915 .dev_release = netdev_release,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -07001916 .dev_groups = net_class_groups,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001917 .dev_uevent = netdev_uevent,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001918 .ns_type = &net_ns_type_operations,
1919 .namespace = net_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001920 .get_ownership = net_get_ownership,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921};
1922
Jakub Kicinskie330fb12021-10-06 18:06:54 -07001923#ifdef CONFIG_OF
Florian Fainelliaa836df2015-03-09 14:31:20 -07001924static int of_dev_node_match(struct device *dev, const void *data)
1925{
Tobias Waldekranz2e186a22020-05-15 11:52:52 +02001926 for (; dev; dev = dev->parent) {
1927 if (dev->of_node == data)
1928 return 1;
1929 }
Florian Fainelliaa836df2015-03-09 14:31:20 -07001930
Tobias Waldekranz2e186a22020-05-15 11:52:52 +02001931 return 0;
Florian Fainelliaa836df2015-03-09 14:31:20 -07001932}
1933
Russell King9861f722015-09-24 20:36:33 +01001934/*
1935 * of_find_net_device_by_node - lookup the net device for the device node
1936 * @np: OF device node
1937 *
1938 * Looks up the net_device structure corresponding with the device node.
1939 * If successful, returns a pointer to the net_device with the embedded
1940 * struct device refcount incremented by one, or NULL on failure. The
1941 * refcount must be dropped when done with the net_device.
1942 */
Florian Fainelliaa836df2015-03-09 14:31:20 -07001943struct net_device *of_find_net_device_by_node(struct device_node *np)
1944{
1945 struct device *dev;
1946
1947 dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1948 if (!dev)
1949 return NULL;
1950
1951 return to_net_dev(dev);
1952}
1953EXPORT_SYMBOL(of_find_net_device_by_node);
1954#endif
1955
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001956/* Delete sysfs entries but hold kobject reference until after all
1957 * netdev references are gone.
1958 */
WANG Cong6b53daf2014-07-23 16:09:10 -07001959void netdev_unregister_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960{
stephen hemminger6648c652017-08-18 13:46:28 -07001961 struct device *dev = &ndev->dev;
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001962
Christian Brauner8b8f3e62020-08-19 14:06:36 +02001963 if (!refcount_read(&dev_net(ndev)->ns.count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001964 dev_set_uevent_suppress(dev, 1);
1965
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001966 kobject_get(&dev->kobj);
Eric W. Biederman38918452008-10-27 17:51:47 -07001967
WANG Cong6b53daf2014-07-23 16:09:10 -07001968 remove_queue_kobjects(ndev);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001969
Ming Lei9802c8e2013-02-22 16:34:16 -08001970 pm_runtime_set_memalloc_noio(dev, false);
1971
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001972 device_del(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973}
1974
1975/* Create sysfs entries for network device. */
WANG Cong6b53daf2014-07-23 16:09:10 -07001976int netdev_register_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977{
stephen hemminger6648c652017-08-18 13:46:28 -07001978 struct device *dev = &ndev->dev;
WANG Cong6b53daf2014-07-23 16:09:10 -07001979 const struct attribute_group **groups = ndev->sysfs_groups;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001980 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001982 device_initialize(dev);
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001983 dev->class = &net_class;
WANG Cong6b53daf2014-07-23 16:09:10 -07001984 dev->platform_data = ndev;
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001985 dev->groups = groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
WANG Cong6b53daf2014-07-23 16:09:10 -07001987 dev_set_name(dev, "%s", ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001989#ifdef CONFIG_SYSFS
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001990 /* Allow for a device specific group */
1991 if (*groups)
1992 groups++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001994 *groups++ = &netstat_group;
Johannes Berg38c1a012012-11-16 20:46:19 +01001995
1996#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
WANG Cong6b53daf2014-07-23 16:09:10 -07001997 if (ndev->ieee80211_ptr)
Johannes Berg38c1a012012-11-16 20:46:19 +01001998 *groups++ = &wireless_group;
1999#if IS_ENABLED(CONFIG_WIRELESS_EXT)
WANG Cong6b53daf2014-07-23 16:09:10 -07002000 else if (ndev->wireless_handlers)
Johannes Berg38c1a012012-11-16 20:46:19 +01002001 *groups++ = &wireless_group;
2002#endif
2003#endif
Eric W. Biederman8b41d182007-09-26 22:02:53 -07002004#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
Tom Herbert0a9627f2010-03-16 08:03:29 +00002006 error = device_add(dev);
2007 if (error)
Wang Hai8ed633b2019-04-12 16:36:33 -04002008 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002009
WANG Cong6b53daf2014-07-23 16:09:10 -07002010 error = register_queue_kobjects(ndev);
Wang Hai8ed633b2019-04-12 16:36:33 -04002011 if (error) {
2012 device_del(dev);
2013 return error;
2014 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00002015
Ming Lei9802c8e2013-02-22 16:34:16 -08002016 pm_runtime_set_memalloc_noio(dev, true);
2017
Tom Herbert0a9627f2010-03-16 08:03:29 +00002018 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019}
2020
Christian Braunere6dee9f2020-02-27 04:37:17 +01002021/* Change owner for sysfs entries when moving network devices across network
2022 * namespaces owned by different user namespaces.
2023 */
2024int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
2025 const struct net *net_new)
2026{
Xin Longf7a1e762021-10-25 02:31:48 -04002027 kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
2028 kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
Christian Braunere6dee9f2020-02-27 04:37:17 +01002029 struct device *dev = &ndev->dev;
Christian Braunere6dee9f2020-02-27 04:37:17 +01002030 int error;
2031
2032 net_ns_get_ownership(net_old, &old_uid, &old_gid);
2033 net_ns_get_ownership(net_new, &new_uid, &new_gid);
2034
2035 /* The network namespace was changed but the owning user namespace is
2036 * identical so there's no need to change the owner of sysfs entries.
2037 */
2038 if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid))
2039 return 0;
2040
2041 error = device_change_owner(dev, new_uid, new_gid);
2042 if (error)
2043 return error;
2044
Christian Braunerd7554072020-02-27 04:37:18 +01002045 error = queue_change_owner(ndev, new_uid, new_gid);
2046 if (error)
2047 return error;
2048
Christian Braunere6dee9f2020-02-27 04:37:17 +01002049 return 0;
2050}
2051
stephen hemmingerb793dc52017-08-18 13:46:20 -07002052int netdev_class_create_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04002053 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07002054{
Tejun Heo58292cbe2013-09-11 22:29:04 -04002055 return class_create_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07002056}
Tejun Heo58292cbe2013-09-11 22:29:04 -04002057EXPORT_SYMBOL(netdev_class_create_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07002058
stephen hemmingerb793dc52017-08-18 13:46:20 -07002059void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04002060 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07002061{
Tejun Heo58292cbe2013-09-11 22:29:04 -04002062 class_remove_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07002063}
Tejun Heo58292cbe2013-09-11 22:29:04 -04002064EXPORT_SYMBOL(netdev_class_remove_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07002065
Daniel Borkmanna48d4bb2014-01-06 01:20:11 +01002066int __init netdev_kobject_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067{
Eric W. Biederman608b4b92010-05-04 17:36:45 -07002068 kobj_ns_type_register(&net_ns_type_operations);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 return class_register(&net_class);
2070}