blob: ae3bcb1540ec57df311dac6847323a23a74ec960 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net-sysfs.c - network device class and attributes
4 *
5 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Randy Dunlap4fc268d2006-01-11 12:17:47 -08008#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/netdevice.h>
11#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010013#include <linux/sched/signal.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070014#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <net/sock.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070016#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/rtnetlink.h>
Tom Herbertfec5e652010-04-16 16:01:27 -070018#include <linux/vmalloc.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040019#include <linux/export.h>
Tom Herbert114cf582011-11-28 16:33:09 +000020#include <linux/jiffies.h>
Ming Lei9802c8e2013-02-22 16:34:16 -080021#include <linux/pm_runtime.h>
Florian Fainelliaa836df2015-03-09 14:31:20 -070022#include <linux/of.h>
Ben Dooks88832a22016-06-07 19:27:51 +010023#include <linux/of_net.h>
Andrei Vagin4d99f662018-08-08 20:07:35 -070024#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Pavel Emelyanov342709e2007-10-23 21:14:45 -070026#include "net-sysfs.h"
27
Eric W. Biederman8b41d182007-09-26 22:02:53 -070028#ifdef CONFIG_SYSFS
Linus Torvalds1da177e2005-04-16 15:20:36 -070029static const char fmt_hex[] = "%#x\n";
30static const char fmt_dec[] = "%d\n";
31static const char fmt_ulong[] = "%lu\n";
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +000032static const char fmt_u64[] = "%llu\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090034static inline int dev_isalive(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035{
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -070036 return dev->reg_state <= NETREG_REGISTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037}
38
39/* use same locking rules as GIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070040static ssize_t netdev_show(const struct device *dev,
41 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 ssize_t (*format)(const struct net_device *, char *))
43{
WANG Cong6b53daf2014-07-23 16:09:10 -070044 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 ssize_t ret = -EINVAL;
46
47 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -070048 if (dev_isalive(ndev))
49 ret = (*format)(ndev, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 read_unlock(&dev_base_lock);
51
52 return ret;
53}
54
55/* generate a show function for simple field */
56#define NETDEVICE_SHOW(field, format_string) \
WANG Cong6b53daf2014-07-23 16:09:10 -070057static ssize_t format_##field(const struct net_device *dev, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{ \
WANG Cong6b53daf2014-07-23 16:09:10 -070059 return sprintf(buf, format_string, dev->field); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070060} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070061static ssize_t field##_show(struct device *dev, \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070062 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070064 return netdev_show(dev, attr, buf, format_##field); \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070065} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -070067#define NETDEVICE_SHOW_RO(field, format_string) \
68NETDEVICE_SHOW(field, format_string); \
69static DEVICE_ATTR_RO(field)
70
71#define NETDEVICE_SHOW_RW(field, format_string) \
72NETDEVICE_SHOW(field, format_string); \
73static DEVICE_ATTR_RW(field)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* use same locking and permission rules as SIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070076static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 const char *buf, size_t len,
78 int (*set)(struct net_device *, unsigned long))
79{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000080 struct net_device *netdev = to_net_dev(dev);
81 struct net *net = dev_net(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 unsigned long new;
83 int ret = -EINVAL;
84
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000085 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 return -EPERM;
87
Shuah Khane1e420c2012-04-12 09:28:13 +000088 ret = kstrtoul(buf, 0, &new);
89 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 goto err;
91
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000092 if (!rtnl_trylock())
Eric W. Biederman336ca572009-05-13 16:57:25 +000093 return restart_syscall();
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000094
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000095 if (dev_isalive(netdev)) {
stephen hemminger6648c652017-08-18 13:46:28 -070096 ret = (*set)(netdev, new);
97 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 ret = len;
99 }
100 rtnl_unlock();
101 err:
102 return ret;
103}
104
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700105NETDEVICE_SHOW_RO(dev_id, fmt_hex);
Amir Vadai3f859442014-02-25 18:17:50 +0200106NETDEVICE_SHOW_RO(dev_port, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700107NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
108NETDEVICE_SHOW_RO(addr_len, fmt_dec);
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700109NETDEVICE_SHOW_RO(ifindex, fmt_dec);
110NETDEVICE_SHOW_RO(type, fmt_dec);
111NETDEVICE_SHOW_RO(link_mode, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200113static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
114 char *buf)
115{
116 struct net_device *ndev = to_net_dev(dev);
117
118 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
119}
120static DEVICE_ATTR_RO(iflink);
121
WANG Cong6b53daf2014-07-23 16:09:10 -0700122static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
Tom Gundersen685343f2014-07-14 16:37:22 +0200123{
WANG Cong6b53daf2014-07-23 16:09:10 -0700124 return sprintf(buf, fmt_dec, dev->name_assign_type);
Tom Gundersen685343f2014-07-14 16:37:22 +0200125}
126
127static ssize_t name_assign_type_show(struct device *dev,
128 struct device_attribute *attr,
129 char *buf)
130{
WANG Cong6b53daf2014-07-23 16:09:10 -0700131 struct net_device *ndev = to_net_dev(dev);
Tom Gundersen685343f2014-07-14 16:37:22 +0200132 ssize_t ret = -EINVAL;
133
WANG Cong6b53daf2014-07-23 16:09:10 -0700134 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
Tom Gundersen685343f2014-07-14 16:37:22 +0200135 ret = netdev_show(dev, attr, buf, format_name_assign_type);
136
137 return ret;
138}
139static DEVICE_ATTR_RO(name_assign_type);
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141/* use same locking rules as GIFHWADDR ioctl's */
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700142static ssize_t address_show(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700143 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
WANG Cong6b53daf2014-07-23 16:09:10 -0700145 struct net_device *ndev = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 ssize_t ret = -EINVAL;
147
148 read_lock(&dev_base_lock);
WANG Cong6b53daf2014-07-23 16:09:10 -0700149 if (dev_isalive(ndev))
150 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 read_unlock(&dev_base_lock);
152 return ret;
153}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700154static DEVICE_ATTR_RO(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700156static ssize_t broadcast_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
WANG Cong6b53daf2014-07-23 16:09:10 -0700159 struct net_device *ndev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700160
WANG Cong6b53daf2014-07-23 16:09:10 -0700161 if (dev_isalive(ndev))
162 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 return -EINVAL;
164}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700165static DEVICE_ATTR_RO(broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
WANG Cong6b53daf2014-07-23 16:09:10 -0700167static int change_carrier(struct net_device *dev, unsigned long new_carrier)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000168{
WANG Cong6b53daf2014-07-23 16:09:10 -0700169 if (!netif_running(dev))
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000170 return -EINVAL;
stephen hemminger6648c652017-08-18 13:46:28 -0700171 return dev_change_carrier(dev, (bool)new_carrier);
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000172}
173
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700174static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
175 const char *buf, size_t len)
Jiri Pirkofdae0fd2012-12-27 23:49:38 +0000176{
177 return netdev_store(dev, attr, buf, len, change_carrier);
178}
179
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700180static ssize_t carrier_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700181 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700184
185 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
stephen hemminger6648c652017-08-18 13:46:28 -0700187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 return -EINVAL;
189}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700190static DEVICE_ATTR_RW(carrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700192static ssize_t speed_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000193 struct device_attribute *attr, char *buf)
194{
195 struct net_device *netdev = to_net_dev(dev);
196 int ret = -EINVAL;
197
198 if (!rtnl_trylock())
199 return restart_syscall();
200
David Decotigny8ae6daca2011-04-27 18:32:38 +0000201 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800202 struct ethtool_link_ksettings cmd;
203
204 if (!__ethtool_get_link_ksettings(netdev, &cmd))
205 ret = sprintf(buf, fmt_dec, cmd.base.speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000206 }
207 rtnl_unlock();
208 return ret;
209}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700210static DEVICE_ATTR_RO(speed);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000211
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700212static ssize_t duplex_show(struct device *dev,
Andy Gospodarekd519e172009-10-02 09:26:12 +0000213 struct device_attribute *attr, char *buf)
214{
215 struct net_device *netdev = to_net_dev(dev);
216 int ret = -EINVAL;
217
218 if (!rtnl_trylock())
219 return restart_syscall();
220
David Decotigny8ae6daca2011-04-27 18:32:38 +0000221 if (netif_running(netdev)) {
David Decotigny7cad1ba2016-02-24 10:58:10 -0800222 struct ethtool_link_ksettings cmd;
223
224 if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000225 const char *duplex;
David Decotigny7cad1ba2016-02-24 10:58:10 -0800226
227 switch (cmd.base.duplex) {
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000228 case DUPLEX_HALF:
229 duplex = "half";
230 break;
231 case DUPLEX_FULL:
232 duplex = "full";
233 break;
234 default:
235 duplex = "unknown";
236 break;
237 }
238 ret = sprintf(buf, "%s\n", duplex);
239 }
Andy Gospodarekd519e172009-10-02 09:26:12 +0000240 }
241 rtnl_unlock();
242 return ret;
243}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700244static DEVICE_ATTR_RO(duplex);
Andy Gospodarekd519e172009-10-02 09:26:12 +0000245
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700246static ssize_t dormant_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700247 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800248{
249 struct net_device *netdev = to_net_dev(dev);
250
251 if (netif_running(netdev))
252 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
253
254 return -EINVAL;
255}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700256static DEVICE_ATTR_RO(dormant);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800257
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700258static const char *const operstates[] = {
Stefan Rompfb00055a2006-03-20 17:09:11 -0800259 "unknown",
260 "notpresent", /* currently unused */
261 "down",
262 "lowerlayerdown",
263 "testing", /* currently unused */
264 "dormant",
265 "up"
266};
267
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700268static ssize_t operstate_show(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700269 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800270{
271 const struct net_device *netdev = to_net_dev(dev);
272 unsigned char operstate;
273
274 read_lock(&dev_base_lock);
275 operstate = netdev->operstate;
276 if (!netif_running(netdev))
277 operstate = IF_OPER_DOWN;
278 read_unlock(&dev_base_lock);
279
Adrian Bunke3a5cd92006-04-05 22:19:47 -0700280 if (operstate >= ARRAY_SIZE(operstates))
Stefan Rompfb00055a2006-03-20 17:09:11 -0800281 return -EINVAL; /* should not happen */
282
283 return sprintf(buf, "%s\n", operstates[operstate]);
284}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700285static DEVICE_ATTR_RO(operstate);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800286
david decotigny2d3b4792014-03-29 09:48:35 -0700287static ssize_t carrier_changes_show(struct device *dev,
288 struct device_attribute *attr,
289 char *buf)
290{
291 struct net_device *netdev = to_net_dev(dev);
stephen hemminger6648c652017-08-18 13:46:28 -0700292
david decotigny2d3b4792014-03-29 09:48:35 -0700293 return sprintf(buf, fmt_dec,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800294 atomic_read(&netdev->carrier_up_count) +
295 atomic_read(&netdev->carrier_down_count));
david decotigny2d3b4792014-03-29 09:48:35 -0700296}
297static DEVICE_ATTR_RO(carrier_changes);
298
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800299static ssize_t carrier_up_count_show(struct device *dev,
300 struct device_attribute *attr,
301 char *buf)
302{
303 struct net_device *netdev = to_net_dev(dev);
304
305 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
306}
307static DEVICE_ATTR_RO(carrier_up_count);
308
309static ssize_t carrier_down_count_show(struct device *dev,
310 struct device_attribute *attr,
311 char *buf)
312{
313 struct net_device *netdev = to_net_dev(dev);
314
315 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
316}
317static DEVICE_ATTR_RO(carrier_down_count);
318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319/* read-write attributes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
WANG Cong6b53daf2014-07-23 16:09:10 -0700321static int change_mtu(struct net_device *dev, unsigned long new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
stephen hemminger6648c652017-08-18 13:46:28 -0700323 return dev_set_mtu(dev, (int)new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700326static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700327 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700329 return netdev_store(dev, attr, buf, len, change_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700331NETDEVICE_SHOW_RW(mtu, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
WANG Cong6b53daf2014-07-23 16:09:10 -0700333static int change_flags(struct net_device *dev, unsigned long new_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Petr Machata567c5e12018-12-06 17:05:42 +0000335 return dev_change_flags(dev, (unsigned int)new_flags, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700338static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700339 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700341 return netdev_store(dev, attr, buf, len, change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700343NETDEVICE_SHOW_RW(flags, fmt_hex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700345static ssize_t tx_queue_len_store(struct device *dev,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700346 struct device_attribute *attr,
347 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000349 if (!capable(CAP_NET_ADMIN))
350 return -EPERM;
351
Cong Wang6a643dd2018-01-25 18:26:22 -0800352 return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353}
Alexey Dobriyan0cd29502017-05-17 13:30:44 +0300354NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
Eric Dumazet3b47d302014-11-06 21:09:44 -0800356static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
357{
358 dev->gro_flush_timeout = val;
359 return 0;
360}
361
362static ssize_t gro_flush_timeout_store(struct device *dev,
stephen hemminger6648c652017-08-18 13:46:28 -0700363 struct device_attribute *attr,
364 const char *buf, size_t len)
Eric Dumazet3b47d302014-11-06 21:09:44 -0800365{
366 if (!capable(CAP_NET_ADMIN))
367 return -EPERM;
368
369 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
370}
371NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
372
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700373static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700374 const char *buf, size_t len)
375{
376 struct net_device *netdev = to_net_dev(dev);
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000377 struct net *net = dev_net(netdev);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700378 size_t count = len;
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800379 ssize_t ret = 0;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700380
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000381 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700382 return -EPERM;
383
384 /* ignore trailing newline */
385 if (len > 0 && buf[len - 1] == '\n')
386 --count;
387
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800388 if (!rtnl_trylock())
389 return restart_syscall();
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700390
Roopa Prabhuc92eb772017-11-13 23:21:36 -0800391 if (dev_isalive(netdev)) {
392 ret = dev_set_alias(netdev, buf, count);
393 if (ret < 0)
394 goto err;
395 ret = len;
396 netdev_state_change(netdev);
397 }
398err:
399 rtnl_unlock();
400
401 return ret;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700402}
403
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700404static ssize_t ifalias_show(struct device *dev,
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700405 struct device_attribute *attr, char *buf)
406{
407 const struct net_device *netdev = to_net_dev(dev);
Florian Westphal6c557002017-10-02 23:50:05 +0200408 char tmp[IFALIASZ];
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700409 ssize_t ret = 0;
410
Florian Westphal6c557002017-10-02 23:50:05 +0200411 ret = dev_get_alias(netdev, tmp, sizeof(tmp));
412 if (ret > 0)
413 ret = sprintf(buf, "%s\n", tmp);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700414 return ret;
415}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700416static DEVICE_ATTR_RW(ifalias);
Vlad Dogarua512b922011-01-24 03:37:29 +0000417
WANG Cong6b53daf2014-07-23 16:09:10 -0700418static int change_group(struct net_device *dev, unsigned long new_group)
Vlad Dogarua512b922011-01-24 03:37:29 +0000419{
stephen hemminger6648c652017-08-18 13:46:28 -0700420 dev_set_group(dev, (int)new_group);
Vlad Dogarua512b922011-01-24 03:37:29 +0000421 return 0;
422}
423
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700424static ssize_t group_store(struct device *dev, struct device_attribute *attr,
425 const char *buf, size_t len)
Vlad Dogarua512b922011-01-24 03:37:29 +0000426{
427 return netdev_store(dev, attr, buf, len, change_group);
428}
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700429NETDEVICE_SHOW(group, fmt_dec);
Joe Perchesd6444062018-03-23 15:54:38 -0700430static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
Vlad Dogarua512b922011-01-24 03:37:29 +0000431
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700432static int change_proto_down(struct net_device *dev, unsigned long proto_down)
433{
stephen hemminger6648c652017-08-18 13:46:28 -0700434 return dev_change_proto_down(dev, (bool)proto_down);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700435}
436
437static ssize_t proto_down_store(struct device *dev,
438 struct device_attribute *attr,
439 const char *buf, size_t len)
440{
441 return netdev_store(dev, attr, buf, len, change_proto_down);
442}
443NETDEVICE_SHOW_RW(proto_down, fmt_dec);
444
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700445static ssize_t phys_port_id_show(struct device *dev,
Jiri Pirkoff80e512013-07-29 18:16:51 +0200446 struct device_attribute *attr, char *buf)
447{
448 struct net_device *netdev = to_net_dev(dev);
449 ssize_t ret = -EINVAL;
450
451 if (!rtnl_trylock())
452 return restart_syscall();
453
454 if (dev_isalive(netdev)) {
Jiri Pirko02637fc2014-11-28 14:34:16 +0100455 struct netdev_phys_item_id ppid;
Jiri Pirkoff80e512013-07-29 18:16:51 +0200456
457 ret = dev_get_phys_port_id(netdev, &ppid);
458 if (!ret)
459 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
460 }
461 rtnl_unlock();
462
463 return ret;
464}
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700465static DEVICE_ATTR_RO(phys_port_id);
Jiri Pirkoff80e512013-07-29 18:16:51 +0200466
David Aherndb24a902015-03-17 20:23:15 -0600467static ssize_t phys_port_name_show(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct net_device *netdev = to_net_dev(dev);
471 ssize_t ret = -EINVAL;
472
473 if (!rtnl_trylock())
474 return restart_syscall();
475
476 if (dev_isalive(netdev)) {
477 char name[IFNAMSIZ];
478
479 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
480 if (!ret)
481 ret = sprintf(buf, "%s\n", name);
482 }
483 rtnl_unlock();
484
485 return ret;
486}
487static DEVICE_ATTR_RO(phys_port_name);
488
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100489static ssize_t phys_switch_id_show(struct device *dev,
490 struct device_attribute *attr, char *buf)
491{
492 struct net_device *netdev = to_net_dev(dev);
493 ssize_t ret = -EINVAL;
494
495 if (!rtnl_trylock())
496 return restart_syscall();
497
498 if (dev_isalive(netdev)) {
Florian Fainellibccb3022019-02-06 09:45:46 -0800499 struct netdev_phys_item_id ppid = { };
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100500
Florian Fainellibccb3022019-02-06 09:45:46 -0800501 ret = dev_get_port_parent_id(netdev, &ppid, false);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100502 if (!ret)
Florian Fainellibccb3022019-02-06 09:45:46 -0800503 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100504 }
505 rtnl_unlock();
506
507 return ret;
508}
509static DEVICE_ATTR_RO(phys_switch_id);
510
stephen hemmingerec6cc592017-08-18 13:46:23 -0700511static struct attribute *net_class_attrs[] __ro_after_init = {
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700512 &dev_attr_netdev_group.attr,
513 &dev_attr_type.attr,
514 &dev_attr_dev_id.attr,
Amir Vadai3f859442014-02-25 18:17:50 +0200515 &dev_attr_dev_port.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700516 &dev_attr_iflink.attr,
517 &dev_attr_ifindex.attr,
Tom Gundersen685343f2014-07-14 16:37:22 +0200518 &dev_attr_name_assign_type.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700519 &dev_attr_addr_assign_type.attr,
520 &dev_attr_addr_len.attr,
521 &dev_attr_link_mode.attr,
522 &dev_attr_address.attr,
523 &dev_attr_broadcast.attr,
524 &dev_attr_speed.attr,
525 &dev_attr_duplex.attr,
526 &dev_attr_dormant.attr,
527 &dev_attr_operstate.attr,
david decotigny2d3b4792014-03-29 09:48:35 -0700528 &dev_attr_carrier_changes.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700529 &dev_attr_ifalias.attr,
530 &dev_attr_carrier.attr,
531 &dev_attr_mtu.attr,
532 &dev_attr_flags.attr,
533 &dev_attr_tx_queue_len.attr,
Eric Dumazet3b47d302014-11-06 21:09:44 -0800534 &dev_attr_gro_flush_timeout.attr,
Linus Torvaldscc998ff2013-09-05 14:54:29 -0700535 &dev_attr_phys_port_id.attr,
David Aherndb24a902015-03-17 20:23:15 -0600536 &dev_attr_phys_port_name.attr,
Jiri Pirkoaecbe012014-11-28 14:34:19 +0100537 &dev_attr_phys_switch_id.attr,
Anuradha Karuppiahd746d702015-07-14 13:43:19 -0700538 &dev_attr_proto_down.attr,
David Decotignyb2d3bcf2018-01-18 09:59:13 -0800539 &dev_attr_carrier_up_count.attr,
540 &dev_attr_carrier_down_count.attr,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700541 NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542};
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700543ATTRIBUTE_GROUPS(net_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545/* Show a given an attribute in the statistics group */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700546static ssize_t netstat_show(const struct device *d,
547 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 unsigned long offset)
549{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700550 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 ssize_t ret = -EINVAL;
552
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000553 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
stephen hemminger6648c652017-08-18 13:46:28 -0700554 offset % sizeof(u64) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
556 read_lock(&dev_base_lock);
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700557 if (dev_isalive(dev)) {
Eric Dumazet28172732010-07-07 14:58:56 -0700558 struct rtnl_link_stats64 temp;
559 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
560
stephen hemminger6648c652017-08-18 13:46:28 -0700561 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 read_unlock(&dev_base_lock);
564 return ret;
565}
566
567/* generate a read-only statistics attribute */
568#define NETSTAT_ENTRY(name) \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700569static ssize_t name##_show(struct device *d, \
stephen hemminger6648c652017-08-18 13:46:28 -0700570 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700572 return netstat_show(d, attr, buf, \
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000573 offsetof(struct rtnl_link_stats64, name)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574} \
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700575static DEVICE_ATTR_RO(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577NETSTAT_ENTRY(rx_packets);
578NETSTAT_ENTRY(tx_packets);
579NETSTAT_ENTRY(rx_bytes);
580NETSTAT_ENTRY(tx_bytes);
581NETSTAT_ENTRY(rx_errors);
582NETSTAT_ENTRY(tx_errors);
583NETSTAT_ENTRY(rx_dropped);
584NETSTAT_ENTRY(tx_dropped);
585NETSTAT_ENTRY(multicast);
586NETSTAT_ENTRY(collisions);
587NETSTAT_ENTRY(rx_length_errors);
588NETSTAT_ENTRY(rx_over_errors);
589NETSTAT_ENTRY(rx_crc_errors);
590NETSTAT_ENTRY(rx_frame_errors);
591NETSTAT_ENTRY(rx_fifo_errors);
592NETSTAT_ENTRY(rx_missed_errors);
593NETSTAT_ENTRY(tx_aborted_errors);
594NETSTAT_ENTRY(tx_carrier_errors);
595NETSTAT_ENTRY(tx_fifo_errors);
596NETSTAT_ENTRY(tx_heartbeat_errors);
597NETSTAT_ENTRY(tx_window_errors);
598NETSTAT_ENTRY(rx_compressed);
599NETSTAT_ENTRY(tx_compressed);
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500600NETSTAT_ENTRY(rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
stephen hemmingerec6cc592017-08-18 13:46:23 -0700602static struct attribute *netstat_attrs[] __ro_after_init = {
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700603 &dev_attr_rx_packets.attr,
604 &dev_attr_tx_packets.attr,
605 &dev_attr_rx_bytes.attr,
606 &dev_attr_tx_bytes.attr,
607 &dev_attr_rx_errors.attr,
608 &dev_attr_tx_errors.attr,
609 &dev_attr_rx_dropped.attr,
610 &dev_attr_tx_dropped.attr,
611 &dev_attr_multicast.attr,
612 &dev_attr_collisions.attr,
613 &dev_attr_rx_length_errors.attr,
614 &dev_attr_rx_over_errors.attr,
615 &dev_attr_rx_crc_errors.attr,
616 &dev_attr_rx_frame_errors.attr,
617 &dev_attr_rx_fifo_errors.attr,
618 &dev_attr_rx_missed_errors.attr,
619 &dev_attr_tx_aborted_errors.attr,
620 &dev_attr_tx_carrier_errors.attr,
621 &dev_attr_tx_fifo_errors.attr,
622 &dev_attr_tx_heartbeat_errors.attr,
623 &dev_attr_tx_window_errors.attr,
624 &dev_attr_rx_compressed.attr,
625 &dev_attr_tx_compressed.attr,
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500626 &dev_attr_rx_nohandler.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 NULL
628};
629
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530630static const struct attribute_group netstat_group = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 .name = "statistics",
632 .attrs = netstat_attrs,
633};
Johannes Berg38c1a012012-11-16 20:46:19 +0100634
635#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
636static struct attribute *wireless_attrs[] = {
637 NULL
638};
639
Arvind Yadav38ef00c2017-06-29 16:31:26 +0530640static const struct attribute_group wireless_group = {
Johannes Berg38c1a012012-11-16 20:46:19 +0100641 .name = "wireless",
642 .attrs = wireless_attrs,
643};
644#endif
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -0700645
646#else /* CONFIG_SYSFS */
647#define net_class_groups NULL
Eric W. Biedermand6523dd2010-05-16 21:59:45 -0700648#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
Michael Daltona953be52014-01-16 22:23:28 -0800650#ifdef CONFIG_SYSFS
stephen hemminger6648c652017-08-18 13:46:28 -0700651#define to_rx_queue_attr(_attr) \
652 container_of(_attr, struct rx_queue_attribute, attr)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000653
654#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
655
656static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
657 char *buf)
658{
stephen hemminger667e4272017-08-18 13:46:27 -0700659 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000660 struct netdev_rx_queue *queue = to_rx_queue(kobj);
661
662 if (!attribute->show)
663 return -EIO;
664
stephen hemminger718ad682017-08-18 13:46:24 -0700665 return attribute->show(queue, buf);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000666}
667
668static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
669 const char *buf, size_t count)
670{
stephen hemminger667e4272017-08-18 13:46:27 -0700671 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000672 struct netdev_rx_queue *queue = to_rx_queue(kobj);
673
674 if (!attribute->store)
675 return -EIO;
676
stephen hemminger718ad682017-08-18 13:46:24 -0700677 return attribute->store(queue, buf, count);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000678}
679
stephen hemmingerfa50d642010-08-31 12:14:13 +0000680static const struct sysfs_ops rx_queue_sysfs_ops = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000681 .show = rx_queue_attr_show,
682 .store = rx_queue_attr_store,
683};
684
Michael Daltona953be52014-01-16 22:23:28 -0800685#ifdef CONFIG_RPS
stephen hemminger718ad682017-08-18 13:46:24 -0700686static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000687{
688 struct rps_map *map;
689 cpumask_var_t mask;
Tejun Heof0906822015-02-13 14:37:42 -0800690 int i, len;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000691
692 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
693 return -ENOMEM;
694
695 rcu_read_lock();
696 map = rcu_dereference(queue->rps_map);
697 if (map)
698 for (i = 0; i < map->len; i++)
699 cpumask_set_cpu(map->cpus[i], mask);
700
Tejun Heof0906822015-02-13 14:37:42 -0800701 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000702 rcu_read_unlock();
Tom Herbert0a9627f2010-03-16 08:03:29 +0000703 free_cpumask_var(mask);
Tejun Heof0906822015-02-13 14:37:42 -0800704
705 return len < PAGE_SIZE ? len : -EINVAL;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000706}
707
Eric Dumazetf5acb902010-04-19 14:40:57 -0700708static ssize_t store_rps_map(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700709 const char *buf, size_t len)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000710{
711 struct rps_map *old_map, *map;
712 cpumask_var_t mask;
713 int err, cpu, i;
Sasha Levinda65ad12015-08-13 14:03:16 -0400714 static DEFINE_MUTEX(rps_map_mutex);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000715
716 if (!capable(CAP_NET_ADMIN))
717 return -EPERM;
718
719 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
720 return -ENOMEM;
721
722 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
723 if (err) {
724 free_cpumask_var(mask);
725 return err;
726 }
727
Eric Dumazet95c96172012-04-15 05:58:06 +0000728 map = kzalloc(max_t(unsigned int,
stephen hemminger6648c652017-08-18 13:46:28 -0700729 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
730 GFP_KERNEL);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000731 if (!map) {
732 free_cpumask_var(mask);
733 return -ENOMEM;
734 }
735
736 i = 0;
737 for_each_cpu_and(cpu, mask, cpu_online_mask)
738 map->cpus[i++] = cpu;
739
stephen hemminger6648c652017-08-18 13:46:28 -0700740 if (i) {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000741 map->len = i;
stephen hemminger6648c652017-08-18 13:46:28 -0700742 } else {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000743 kfree(map);
744 map = NULL;
745 }
746
Sasha Levinda65ad12015-08-13 14:03:16 -0400747 mutex_lock(&rps_map_mutex);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000748 old_map = rcu_dereference_protected(queue->rps_map,
Sasha Levinda65ad12015-08-13 14:03:16 -0400749 mutex_is_locked(&rps_map_mutex));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000750 rcu_assign_pointer(queue->rps_map, map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000751
Eric Dumazetadc93002011-11-17 03:13:26 +0000752 if (map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700753 static_branch_inc(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700754 if (old_map)
Eric Dumazetdc053602019-03-22 08:56:38 -0700755 static_branch_dec(&rps_needed);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700756
Sasha Levinda65ad12015-08-13 14:03:16 -0400757 mutex_unlock(&rps_map_mutex);
Tom Herbert10e4ea72015-08-05 09:39:27 -0700758
759 if (old_map)
760 kfree_rcu(old_map, rcu);
761
Tom Herbert0a9627f2010-03-16 08:03:29 +0000762 free_cpumask_var(mask);
763 return len;
764}
765
Tom Herbertfec5e652010-04-16 16:01:27 -0700766static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
Tom Herbertfec5e652010-04-16 16:01:27 -0700767 char *buf)
768{
769 struct rps_dev_flow_table *flow_table;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000770 unsigned long val = 0;
Tom Herbertfec5e652010-04-16 16:01:27 -0700771
772 rcu_read_lock();
773 flow_table = rcu_dereference(queue->rps_flow_table);
774 if (flow_table)
Eric Dumazet60b778c2011-12-24 06:56:49 +0000775 val = (unsigned long)flow_table->mask + 1;
Tom Herbertfec5e652010-04-16 16:01:27 -0700776 rcu_read_unlock();
777
Eric Dumazet60b778c2011-12-24 06:56:49 +0000778 return sprintf(buf, "%lu\n", val);
Tom Herbertfec5e652010-04-16 16:01:27 -0700779}
780
Tom Herbertfec5e652010-04-16 16:01:27 -0700781static void rps_dev_flow_table_release(struct rcu_head *rcu)
782{
783 struct rps_dev_flow_table *table = container_of(rcu,
784 struct rps_dev_flow_table, rcu);
Al Viro243198d2013-05-05 16:05:55 +0000785 vfree(table);
Tom Herbertfec5e652010-04-16 16:01:27 -0700786}
787
Eric Dumazetf5acb902010-04-19 14:40:57 -0700788static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700789 const char *buf, size_t len)
Tom Herbertfec5e652010-04-16 16:01:27 -0700790{
Eric Dumazet60b778c2011-12-24 06:56:49 +0000791 unsigned long mask, count;
Tom Herbertfec5e652010-04-16 16:01:27 -0700792 struct rps_dev_flow_table *table, *old_table;
793 static DEFINE_SPINLOCK(rps_dev_flow_lock);
Eric Dumazet60b778c2011-12-24 06:56:49 +0000794 int rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700795
796 if (!capable(CAP_NET_ADMIN))
797 return -EPERM;
798
Eric Dumazet60b778c2011-12-24 06:56:49 +0000799 rc = kstrtoul(buf, 0, &count);
800 if (rc < 0)
801 return rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700802
803 if (count) {
Eric Dumazet60b778c2011-12-24 06:56:49 +0000804 mask = count - 1;
805 /* mask = roundup_pow_of_two(count) - 1;
806 * without overflows...
807 */
808 while ((mask | (mask >> 1)) != mask)
809 mask |= (mask >> 1);
810 /* On 64 bit arches, must check mask fits in table->mask (u32),
stephen hemminger8e3bff92013-12-08 12:15:44 -0800811 * and on 32bit arches, must check
812 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
Eric Dumazet60b778c2011-12-24 06:56:49 +0000813 */
814#if BITS_PER_LONG > 32
815 if (mask > (unsigned long)(u32)mask)
Xi Wanga0a129f2011-12-22 13:35:22 +0000816 return -EINVAL;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000817#else
818 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
Xi Wanga0a129f2011-12-22 13:35:22 +0000819 / sizeof(struct rps_dev_flow)) {
Tom Herbertfec5e652010-04-16 16:01:27 -0700820 /* Enforce a limit to prevent overflow */
821 return -EINVAL;
822 }
Eric Dumazet60b778c2011-12-24 06:56:49 +0000823#endif
824 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
Tom Herbertfec5e652010-04-16 16:01:27 -0700825 if (!table)
826 return -ENOMEM;
827
Eric Dumazet60b778c2011-12-24 06:56:49 +0000828 table->mask = mask;
829 for (count = 0; count <= mask; count++)
830 table->flows[count].cpu = RPS_NO_CPU;
stephen hemminger6648c652017-08-18 13:46:28 -0700831 } else {
Tom Herbertfec5e652010-04-16 16:01:27 -0700832 table = NULL;
stephen hemminger6648c652017-08-18 13:46:28 -0700833 }
Tom Herbertfec5e652010-04-16 16:01:27 -0700834
835 spin_lock(&rps_dev_flow_lock);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000836 old_table = rcu_dereference_protected(queue->rps_flow_table,
837 lockdep_is_held(&rps_dev_flow_lock));
Tom Herbertfec5e652010-04-16 16:01:27 -0700838 rcu_assign_pointer(queue->rps_flow_table, table);
839 spin_unlock(&rps_dev_flow_lock);
840
841 if (old_table)
842 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
843
844 return len;
845}
846
stephen hemminger667e4272017-08-18 13:46:27 -0700847static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700848 = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000849
stephen hemminger667e4272017-08-18 13:46:27 -0700850static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -0700851 = __ATTR(rps_flow_cnt, 0644,
stephen hemminger667e4272017-08-18 13:46:27 -0700852 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
Michael Daltona953be52014-01-16 22:23:28 -0800853#endif /* CONFIG_RPS */
Tom Herbertfec5e652010-04-16 16:01:27 -0700854
stephen hemminger667e4272017-08-18 13:46:27 -0700855static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
Michael Daltona953be52014-01-16 22:23:28 -0800856#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000857 &rps_cpus_attribute.attr,
Tom Herbertfec5e652010-04-16 16:01:27 -0700858 &rps_dev_flow_table_cnt_attribute.attr,
Michael Daltona953be52014-01-16 22:23:28 -0800859#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000860 NULL
861};
Kimberly Brownbe0d6922019-04-01 22:51:35 -0400862ATTRIBUTE_GROUPS(rx_queue_default);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000863
864static void rx_queue_release(struct kobject *kobj)
865{
866 struct netdev_rx_queue *queue = to_rx_queue(kobj);
Michael Daltona953be52014-01-16 22:23:28 -0800867#ifdef CONFIG_RPS
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000868 struct rps_map *map;
869 struct rps_dev_flow_table *flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000870
Eric Dumazet33d480c2011-08-11 19:30:52 +0000871 map = rcu_dereference_protected(queue->rps_map, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000872 if (map) {
873 RCU_INIT_POINTER(queue->rps_map, NULL);
Lai Jiangshanf6f80232011-03-18 12:01:31 +0800874 kfree_rcu(map, rcu);
John Fastabend9ea19482010-11-16 06:31:39 +0000875 }
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000876
Eric Dumazet33d480c2011-08-11 19:30:52 +0000877 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000878 if (flow_table) {
879 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000880 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
John Fastabend9ea19482010-11-16 06:31:39 +0000881 }
Michael Daltona953be52014-01-16 22:23:28 -0800882#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000883
John Fastabend9ea19482010-11-16 06:31:39 +0000884 memset(kobj, 0, sizeof(*kobj));
Tom Herbertfe822242010-11-09 10:47:38 +0000885 dev_put(queue->dev);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000886}
887
Weilong Chen82ef3d52014-01-16 17:24:31 +0800888static const void *rx_queue_namespace(struct kobject *kobj)
889{
890 struct netdev_rx_queue *queue = to_rx_queue(kobj);
891 struct device *dev = &queue->dev->dev;
892 const void *ns = NULL;
893
894 if (dev->class && dev->class->ns_type)
895 ns = dev->class->namespace(dev);
896
897 return ns;
898}
899
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +0000900static void rx_queue_get_ownership(struct kobject *kobj,
901 kuid_t *uid, kgid_t *gid)
902{
903 const struct net *net = rx_queue_namespace(kobj);
904
905 net_ns_get_ownership(net, uid, gid);
906}
907
stephen hemminger667e4272017-08-18 13:46:27 -0700908static struct kobj_type rx_queue_ktype __ro_after_init = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000909 .sysfs_ops = &rx_queue_sysfs_ops,
910 .release = rx_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -0400911 .default_groups = rx_queue_default_groups,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +0000912 .namespace = rx_queue_namespace,
913 .get_ownership = rx_queue_get_ownership,
Tom Herbert0a9627f2010-03-16 08:03:29 +0000914};
915
WANG Cong6b53daf2014-07-23 16:09:10 -0700916static int rx_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000917{
WANG Cong6b53daf2014-07-23 16:09:10 -0700918 struct netdev_rx_queue *queue = dev->_rx + index;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000919 struct kobject *kobj = &queue->kobj;
920 int error = 0;
921
WANG Cong6b53daf2014-07-23 16:09:10 -0700922 kobj->kset = dev->queues_kset;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000923 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -0700924 "rx-%u", index);
Michael Daltona953be52014-01-16 22:23:28 -0800925 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200926 goto err;
Michael Daltona953be52014-01-16 22:23:28 -0800927
YueHaibinga3e23f72019-03-19 10:16:53 +0800928 dev_hold(queue->dev);
929
WANG Cong6b53daf2014-07-23 16:09:10 -0700930 if (dev->sysfs_rx_queue_group) {
931 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200932 if (error)
933 goto err;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000934 }
935
936 kobject_uevent(kobj, KOBJ_ADD);
937
938 return error;
Jouni Hoganderb8eb7182019-11-20 09:08:16 +0200939
940err:
941 kobject_put(kobj);
942 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000943}
Paul Bolle80dd6ea2014-02-09 14:07:11 +0100944#endif /* CONFIG_SYSFS */
Tom Herbert0a9627f2010-03-16 08:03:29 +0000945
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000946int
WANG Cong6b53daf2014-07-23 16:09:10 -0700947net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000948{
Michael Daltona953be52014-01-16 22:23:28 -0800949#ifdef CONFIG_SYSFS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000950 int i;
951 int error = 0;
952
Michael Daltona953be52014-01-16 22:23:28 -0800953#ifndef CONFIG_RPS
WANG Cong6b53daf2014-07-23 16:09:10 -0700954 if (!dev->sysfs_rx_queue_group)
Michael Daltona953be52014-01-16 22:23:28 -0800955 return 0;
956#endif
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000957 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -0700958 error = rx_queue_add_kobject(dev, i);
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000959 if (error) {
960 new_num = old_num;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000961 break;
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000962 }
Tom Herbert0a9627f2010-03-16 08:03:29 +0000963 }
964
Michael Daltona953be52014-01-16 22:23:28 -0800965 while (--i >= new_num) {
Andrey Vagin002d8a12016-10-24 19:09:53 -0700966 struct kobject *kobj = &dev->_rx[i].kobj;
967
Kirill Tkhai273c28b2018-01-12 18:28:31 +0300968 if (!refcount_read(&dev_net(dev)->count))
Andrey Vagin002d8a12016-10-24 19:09:53 -0700969 kobj->uevent_suppress = 1;
WANG Cong6b53daf2014-07-23 16:09:10 -0700970 if (dev->sysfs_rx_queue_group)
Andrey Vagin002d8a12016-10-24 19:09:53 -0700971 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
972 kobject_put(kobj);
Michael Daltona953be52014-01-16 22:23:28 -0800973 }
Tom Herbert0a9627f2010-03-16 08:03:29 +0000974
975 return error;
Tom Herbertbf264142010-11-26 08:36:09 +0000976#else
977 return 0;
978#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000979}
980
david decotignyccf5ff62011-11-16 12:15:10 +0000981#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000982/*
983 * netdev_queue sysfs structures and functions.
984 */
985struct netdev_queue_attribute {
986 struct attribute attr;
stephen hemminger718ad682017-08-18 13:46:24 -0700987 ssize_t (*show)(struct netdev_queue *queue, char *buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +0000988 ssize_t (*store)(struct netdev_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700989 const char *buf, size_t len);
Tom Herbert1d24eb42010-11-21 13:17:27 +0000990};
stephen hemminger6648c652017-08-18 13:46:28 -0700991#define to_netdev_queue_attr(_attr) \
992 container_of(_attr, struct netdev_queue_attribute, attr)
Tom Herbert1d24eb42010-11-21 13:17:27 +0000993
994#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
995
996static ssize_t netdev_queue_attr_show(struct kobject *kobj,
997 struct attribute *attr, char *buf)
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000998{
stephen hemminger667e4272017-08-18 13:46:27 -0700999 const struct netdev_queue_attribute *attribute
1000 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001001 struct netdev_queue *queue = to_netdev_queue(kobj);
1002
1003 if (!attribute->show)
1004 return -EIO;
1005
stephen hemminger718ad682017-08-18 13:46:24 -07001006 return attribute->show(queue, buf);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001007}
1008
1009static ssize_t netdev_queue_attr_store(struct kobject *kobj,
1010 struct attribute *attr,
1011 const char *buf, size_t count)
1012{
stephen hemminger667e4272017-08-18 13:46:27 -07001013 const struct netdev_queue_attribute *attribute
1014 = to_netdev_queue_attr(attr);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001015 struct netdev_queue *queue = to_netdev_queue(kobj);
1016
1017 if (!attribute->store)
1018 return -EIO;
1019
stephen hemminger718ad682017-08-18 13:46:24 -07001020 return attribute->store(queue, buf, count);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001021}
1022
1023static const struct sysfs_ops netdev_queue_sysfs_ops = {
1024 .show = netdev_queue_attr_show,
1025 .store = netdev_queue_attr_store,
1026};
1027
stephen hemminger2b9c7582017-08-18 13:46:26 -07001028static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
david decotignyccf5ff62011-11-16 12:15:10 +00001029{
1030 unsigned long trans_timeout;
1031
1032 spin_lock_irq(&queue->_xmit_lock);
1033 trans_timeout = queue->trans_timeout;
1034 spin_unlock_irq(&queue->_xmit_lock);
1035
1036 return sprintf(buf, "%lu", trans_timeout);
1037}
1038
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001039static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
John Fastabend822b3b22015-03-18 14:57:33 +02001040{
1041 struct net_device *dev = queue->dev;
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001042 unsigned int i;
John Fastabend822b3b22015-03-18 14:57:33 +02001043
Thadeu Lima de Souza Cascardoc4047f52015-09-15 18:28:00 -03001044 i = queue - dev->_tx;
John Fastabend822b3b22015-03-18 14:57:33 +02001045 BUG_ON(i >= dev->num_tx_queues);
1046
1047 return i;
1048}
1049
stephen hemminger2b9c7582017-08-18 13:46:26 -07001050static ssize_t traffic_class_show(struct netdev_queue *queue,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001051 char *buf)
1052{
1053 struct net_device *dev = queue->dev;
Alexander Duyckd7be9772018-07-09 12:19:32 -04001054 int index;
1055 int tc;
Alexander Duyck8d059b02016-10-28 11:43:49 -04001056
Alexander Duyckd7be9772018-07-09 12:19:32 -04001057 if (!netif_is_multiqueue(dev))
1058 return -ENOENT;
1059
1060 index = get_netdev_queue_index(queue);
Alexander Duyckffcfe252018-07-09 12:19:38 -04001061
1062 /* If queue belongs to subordinate dev use its TC mapping */
1063 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1064
Alexander Duyckd7be9772018-07-09 12:19:32 -04001065 tc = netdev_txq_to_tc(dev, index);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001066 if (tc < 0)
1067 return -EINVAL;
1068
Alexander Duyckffcfe252018-07-09 12:19:38 -04001069 /* We can report the traffic class one of two ways:
1070 * Subordinate device traffic classes are reported with the traffic
1071 * class first, and then the subordinate class so for example TC0 on
1072 * subordinate device 2 will be reported as "0-2". If the queue
1073 * belongs to the root device it will be reported with just the
1074 * traffic class, so just "0" for TC 0 for example.
1075 */
1076 return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) :
1077 sprintf(buf, "%u\n", tc);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001078}
1079
1080#ifdef CONFIG_XPS
stephen hemminger2b9c7582017-08-18 13:46:26 -07001081static ssize_t tx_maxrate_show(struct netdev_queue *queue,
John Fastabend822b3b22015-03-18 14:57:33 +02001082 char *buf)
1083{
1084 return sprintf(buf, "%lu\n", queue->tx_maxrate);
1085}
1086
stephen hemminger2b9c7582017-08-18 13:46:26 -07001087static ssize_t tx_maxrate_store(struct netdev_queue *queue,
1088 const char *buf, size_t len)
John Fastabend822b3b22015-03-18 14:57:33 +02001089{
1090 struct net_device *dev = queue->dev;
1091 int err, index = get_netdev_queue_index(queue);
1092 u32 rate = 0;
1093
Tyler Hicks3033fce2018-07-20 21:56:51 +00001094 if (!capable(CAP_NET_ADMIN))
1095 return -EPERM;
1096
John Fastabend822b3b22015-03-18 14:57:33 +02001097 err = kstrtou32(buf, 10, &rate);
1098 if (err < 0)
1099 return err;
1100
1101 if (!rtnl_trylock())
1102 return restart_syscall();
1103
1104 err = -EOPNOTSUPP;
1105 if (dev->netdev_ops->ndo_set_tx_maxrate)
1106 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1107
1108 rtnl_unlock();
1109 if (!err) {
1110 queue->tx_maxrate = rate;
1111 return len;
1112 }
1113 return err;
1114}
1115
stephen hemminger2b9c7582017-08-18 13:46:26 -07001116static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1117 = __ATTR_RW(tx_maxrate);
John Fastabend822b3b22015-03-18 14:57:33 +02001118#endif
1119
stephen hemminger2b9c7582017-08-18 13:46:26 -07001120static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1121 = __ATTR_RO(tx_timeout);
david decotignyccf5ff62011-11-16 12:15:10 +00001122
stephen hemminger2b9c7582017-08-18 13:46:26 -07001123static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1124 = __ATTR_RO(traffic_class);
Alexander Duyck8d059b02016-10-28 11:43:49 -04001125
Tom Herbert114cf582011-11-28 16:33:09 +00001126#ifdef CONFIG_BQL
1127/*
1128 * Byte queue limits sysfs structures and functions.
1129 */
1130static ssize_t bql_show(char *buf, unsigned int value)
1131{
1132 return sprintf(buf, "%u\n", value);
1133}
1134
1135static ssize_t bql_set(const char *buf, const size_t count,
1136 unsigned int *pvalue)
1137{
1138 unsigned int value;
1139 int err;
1140
stephen hemminger6648c652017-08-18 13:46:28 -07001141 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
Tom Herbert114cf582011-11-28 16:33:09 +00001142 value = DQL_MAX_LIMIT;
stephen hemminger6648c652017-08-18 13:46:28 -07001143 } else {
Tom Herbert114cf582011-11-28 16:33:09 +00001144 err = kstrtouint(buf, 10, &value);
1145 if (err < 0)
1146 return err;
1147 if (value > DQL_MAX_LIMIT)
1148 return -EINVAL;
1149 }
1150
1151 *pvalue = value;
1152
1153 return count;
1154}
1155
1156static ssize_t bql_show_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001157 char *buf)
1158{
1159 struct dql *dql = &queue->dql;
1160
1161 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1162}
1163
1164static ssize_t bql_set_hold_time(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001165 const char *buf, size_t len)
1166{
1167 struct dql *dql = &queue->dql;
Eric Dumazet95c96172012-04-15 05:58:06 +00001168 unsigned int value;
Tom Herbert114cf582011-11-28 16:33:09 +00001169 int err;
1170
1171 err = kstrtouint(buf, 10, &value);
1172 if (err < 0)
1173 return err;
1174
1175 dql->slack_hold_time = msecs_to_jiffies(value);
1176
1177 return len;
1178}
1179
stephen hemminger170c6582017-08-18 13:46:25 -07001180static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
Joe Perchesd6444062018-03-23 15:54:38 -07001181 = __ATTR(hold_time, 0644,
stephen hemminger170c6582017-08-18 13:46:25 -07001182 bql_show_hold_time, bql_set_hold_time);
Tom Herbert114cf582011-11-28 16:33:09 +00001183
1184static ssize_t bql_show_inflight(struct netdev_queue *queue,
Tom Herbert114cf582011-11-28 16:33:09 +00001185 char *buf)
1186{
1187 struct dql *dql = &queue->dql;
1188
1189 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1190}
1191
stephen hemminger170c6582017-08-18 13:46:25 -07001192static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
Joe Perchesd6444062018-03-23 15:54:38 -07001193 __ATTR(inflight, 0444, bql_show_inflight, NULL);
Tom Herbert114cf582011-11-28 16:33:09 +00001194
1195#define BQL_ATTR(NAME, FIELD) \
1196static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001197 char *buf) \
1198{ \
1199 return bql_show(buf, queue->dql.FIELD); \
1200} \
1201 \
1202static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
Tom Herbert114cf582011-11-28 16:33:09 +00001203 const char *buf, size_t len) \
1204{ \
1205 return bql_set(buf, len, &queue->dql.FIELD); \
1206} \
1207 \
stephen hemminger170c6582017-08-18 13:46:25 -07001208static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
Joe Perchesd6444062018-03-23 15:54:38 -07001209 = __ATTR(NAME, 0644, \
stephen hemminger170c6582017-08-18 13:46:25 -07001210 bql_show_ ## NAME, bql_set_ ## NAME)
Tom Herbert114cf582011-11-28 16:33:09 +00001211
stephen hemminger170c6582017-08-18 13:46:25 -07001212BQL_ATTR(limit, limit);
1213BQL_ATTR(limit_max, max_limit);
1214BQL_ATTR(limit_min, min_limit);
Tom Herbert114cf582011-11-28 16:33:09 +00001215
stephen hemminger170c6582017-08-18 13:46:25 -07001216static struct attribute *dql_attrs[] __ro_after_init = {
Tom Herbert114cf582011-11-28 16:33:09 +00001217 &bql_limit_attribute.attr,
1218 &bql_limit_max_attribute.attr,
1219 &bql_limit_min_attribute.attr,
1220 &bql_hold_time_attribute.attr,
1221 &bql_inflight_attribute.attr,
1222 NULL
1223};
1224
Arvind Yadav38ef00c2017-06-29 16:31:26 +05301225static const struct attribute_group dql_group = {
Tom Herbert114cf582011-11-28 16:33:09 +00001226 .name = "byte_queue_limits",
1227 .attrs = dql_attrs,
1228};
1229#endif /* CONFIG_BQL */
1230
david decotignyccf5ff62011-11-16 12:15:10 +00001231#ifdef CONFIG_XPS
stephen hemminger2b9c7582017-08-18 13:46:26 -07001232static ssize_t xps_cpus_show(struct netdev_queue *queue,
1233 char *buf)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001234{
1235 struct net_device *dev = queue->dev;
Alexander Duyck184c4492016-10-28 11:50:13 -04001236 int cpu, len, num_tc = 1, tc = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001237 struct xps_dev_maps *dev_maps;
1238 cpumask_var_t mask;
1239 unsigned long index;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001240
Alexander Duyckd7be9772018-07-09 12:19:32 -04001241 if (!netif_is_multiqueue(dev))
1242 return -ENOENT;
1243
Tom Herbert1d24eb42010-11-21 13:17:27 +00001244 index = get_netdev_queue_index(queue);
1245
Alexander Duyck184c4492016-10-28 11:50:13 -04001246 if (dev->num_tc) {
Alexander Duyckffcfe252018-07-09 12:19:38 -04001247 /* Do not allow XPS on subordinate device directly */
Alexander Duyck184c4492016-10-28 11:50:13 -04001248 num_tc = dev->num_tc;
Alexander Duyckffcfe252018-07-09 12:19:38 -04001249 if (num_tc < 0)
1250 return -EINVAL;
1251
1252 /* If queue belongs to subordinate dev use its map */
1253 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1254
Alexander Duyck184c4492016-10-28 11:50:13 -04001255 tc = netdev_txq_to_tc(dev, index);
1256 if (tc < 0)
1257 return -EINVAL;
1258 }
1259
Alexander Duyck664088f2018-05-31 15:59:46 -04001260 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1261 return -ENOMEM;
1262
Tom Herbert1d24eb42010-11-21 13:17:27 +00001263 rcu_read_lock();
Amritha Nambiar80d19662018-06-29 21:26:41 -07001264 dev_maps = rcu_dereference(dev->xps_cpus_map);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001265 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04001266 for_each_possible_cpu(cpu) {
1267 int i, tci = cpu * num_tc + tc;
1268 struct xps_map *map;
1269
Amritha Nambiar80d19662018-06-29 21:26:41 -07001270 map = rcu_dereference(dev_maps->attr_map[tci]);
Alexander Duyck184c4492016-10-28 11:50:13 -04001271 if (!map)
1272 continue;
1273
1274 for (i = map->len; i--;) {
1275 if (map->queues[i] == index) {
1276 cpumask_set_cpu(cpu, mask);
1277 break;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001278 }
1279 }
1280 }
1281 }
1282 rcu_read_unlock();
1283
Tejun Heof0906822015-02-13 14:37:42 -08001284 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
Tom Herbert1d24eb42010-11-21 13:17:27 +00001285 free_cpumask_var(mask);
Tejun Heof0906822015-02-13 14:37:42 -08001286 return len < PAGE_SIZE ? len : -EINVAL;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001287}
1288
stephen hemminger2b9c7582017-08-18 13:46:26 -07001289static ssize_t xps_cpus_store(struct netdev_queue *queue,
1290 const char *buf, size_t len)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001291{
1292 struct net_device *dev = queue->dev;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001293 unsigned long index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001294 cpumask_var_t mask;
1295 int err;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001296
Alexander Duyckd7be9772018-07-09 12:19:32 -04001297 if (!netif_is_multiqueue(dev))
1298 return -ENOENT;
1299
Tom Herbert1d24eb42010-11-21 13:17:27 +00001300 if (!capable(CAP_NET_ADMIN))
1301 return -EPERM;
1302
1303 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1304 return -ENOMEM;
1305
1306 index = get_netdev_queue_index(queue);
1307
1308 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1309 if (err) {
1310 free_cpumask_var(mask);
1311 return err;
1312 }
1313
Alexander Duyck537c00d2013-01-10 08:57:02 +00001314 err = netif_set_xps_queue(dev, mask, index);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001315
1316 free_cpumask_var(mask);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001317
Alexander Duyck537c00d2013-01-10 08:57:02 +00001318 return err ? : len;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001319}
1320
stephen hemminger2b9c7582017-08-18 13:46:26 -07001321static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1322 = __ATTR_RW(xps_cpus);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001323
1324static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
1325{
1326 struct net_device *dev = queue->dev;
1327 struct xps_dev_maps *dev_maps;
1328 unsigned long *mask, index;
1329 int j, len, num_tc = 1, tc = 0;
1330
1331 index = get_netdev_queue_index(queue);
1332
1333 if (dev->num_tc) {
1334 num_tc = dev->num_tc;
1335 tc = netdev_txq_to_tc(dev, index);
1336 if (tc < 0)
1337 return -EINVAL;
1338 }
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001339 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001340 if (!mask)
1341 return -ENOMEM;
1342
1343 rcu_read_lock();
1344 dev_maps = rcu_dereference(dev->xps_rxqs_map);
1345 if (!dev_maps)
1346 goto out_no_maps;
1347
1348 for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
1349 j < dev->num_rx_queues;) {
1350 int i, tci = j * num_tc + tc;
1351 struct xps_map *map;
1352
1353 map = rcu_dereference(dev_maps->attr_map[tci]);
1354 if (!map)
1355 continue;
1356
1357 for (i = map->len; i--;) {
1358 if (map->queues[i] == index) {
1359 set_bit(j, mask);
1360 break;
1361 }
1362 }
1363 }
1364out_no_maps:
1365 rcu_read_unlock();
1366
1367 len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001368 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001369
1370 return len < PAGE_SIZE ? len : -EINVAL;
1371}
1372
1373static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
1374 size_t len)
1375{
1376 struct net_device *dev = queue->dev;
1377 struct net *net = dev_net(dev);
1378 unsigned long *mask, index;
1379 int err;
1380
1381 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1382 return -EPERM;
1383
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001384 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001385 if (!mask)
1386 return -ENOMEM;
1387
1388 index = get_netdev_queue_index(queue);
1389
1390 err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
1391 if (err) {
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001392 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001393 return err;
1394 }
1395
Andrei Vagin4d99f662018-08-08 20:07:35 -07001396 cpus_read_lock();
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001397 err = __netif_set_xps_queue(dev, mask, index, true);
Andrei Vagin4d99f662018-08-08 20:07:35 -07001398 cpus_read_unlock();
1399
Andy Shevchenko29ca1c52019-03-04 11:48:56 +02001400 bitmap_free(mask);
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001401 return err ? : len;
1402}
1403
1404static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
1405 = __ATTR_RW(xps_rxqs);
david decotignyccf5ff62011-11-16 12:15:10 +00001406#endif /* CONFIG_XPS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001407
stephen hemminger2b9c7582017-08-18 13:46:26 -07001408static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
david decotignyccf5ff62011-11-16 12:15:10 +00001409 &queue_trans_timeout.attr,
Alexander Duyck8d059b02016-10-28 11:43:49 -04001410 &queue_traffic_class.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001411#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001412 &xps_cpus_attribute.attr,
Amritha Nambiar8af2c062018-06-29 21:27:07 -07001413 &xps_rxqs_attribute.attr,
John Fastabend822b3b22015-03-18 14:57:33 +02001414 &queue_tx_maxrate.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001415#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001416 NULL
1417};
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001418ATTRIBUTE_GROUPS(netdev_queue_default);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001419
1420static void netdev_queue_release(struct kobject *kobj)
1421{
1422 struct netdev_queue *queue = to_netdev_queue(kobj);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001423
Tom Herbert1d24eb42010-11-21 13:17:27 +00001424 memset(kobj, 0, sizeof(*kobj));
1425 dev_put(queue->dev);
1426}
1427
Weilong Chen82ef3d52014-01-16 17:24:31 +08001428static const void *netdev_queue_namespace(struct kobject *kobj)
1429{
1430 struct netdev_queue *queue = to_netdev_queue(kobj);
1431 struct device *dev = &queue->dev->dev;
1432 const void *ns = NULL;
1433
1434 if (dev->class && dev->class->ns_type)
1435 ns = dev->class->namespace(dev);
1436
1437 return ns;
1438}
1439
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001440static void netdev_queue_get_ownership(struct kobject *kobj,
1441 kuid_t *uid, kgid_t *gid)
1442{
1443 const struct net *net = netdev_queue_namespace(kobj);
1444
1445 net_ns_get_ownership(net, uid, gid);
1446}
1447
stephen hemminger2b9c7582017-08-18 13:46:26 -07001448static struct kobj_type netdev_queue_ktype __ro_after_init = {
Tom Herbert1d24eb42010-11-21 13:17:27 +00001449 .sysfs_ops = &netdev_queue_sysfs_ops,
1450 .release = netdev_queue_release,
Kimberly Brownbe0d6922019-04-01 22:51:35 -04001451 .default_groups = netdev_queue_default_groups,
Weilong Chen82ef3d52014-01-16 17:24:31 +08001452 .namespace = netdev_queue_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001453 .get_ownership = netdev_queue_get_ownership,
Tom Herbert1d24eb42010-11-21 13:17:27 +00001454};
1455
WANG Cong6b53daf2014-07-23 16:09:10 -07001456static int netdev_queue_add_kobject(struct net_device *dev, int index)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001457{
WANG Cong6b53daf2014-07-23 16:09:10 -07001458 struct netdev_queue *queue = dev->_tx + index;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001459 struct kobject *kobj = &queue->kobj;
1460 int error = 0;
1461
WANG Cong6b53daf2014-07-23 16:09:10 -07001462 kobj->kset = dev->queues_kset;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001463 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
stephen hemminger6648c652017-08-18 13:46:28 -07001464 "tx-%u", index);
Tom Herbert114cf582011-11-28 16:33:09 +00001465 if (error)
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001466 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001467
YueHaibinga3e23f72019-03-19 10:16:53 +08001468 dev_hold(queue->dev);
1469
Tom Herbert114cf582011-11-28 16:33:09 +00001470#ifdef CONFIG_BQL
1471 error = sysfs_create_group(kobj, &dql_group);
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001472 if (error)
1473 goto err;
Tom Herbert114cf582011-11-28 16:33:09 +00001474#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001475
1476 kobject_uevent(kobj, KOBJ_ADD);
Eric Dumazet48a322b2019-11-20 19:19:07 -08001477 return 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001478
Jouni Hoganderb8eb7182019-11-20 09:08:16 +02001479err:
1480 kobject_put(kobj);
1481 return error;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001482}
david decotignyccf5ff62011-11-16 12:15:10 +00001483#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001484
1485int
WANG Cong6b53daf2014-07-23 16:09:10 -07001486netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001487{
david decotignyccf5ff62011-11-16 12:15:10 +00001488#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001489 int i;
1490 int error = 0;
1491
1492 for (i = old_num; i < new_num; i++) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001493 error = netdev_queue_add_kobject(dev, i);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001494 if (error) {
1495 new_num = old_num;
1496 break;
1497 }
1498 }
1499
Tom Herbert114cf582011-11-28 16:33:09 +00001500 while (--i >= new_num) {
WANG Cong6b53daf2014-07-23 16:09:10 -07001501 struct netdev_queue *queue = dev->_tx + i;
Tom Herbert114cf582011-11-28 16:33:09 +00001502
Kirill Tkhai273c28b2018-01-12 18:28:31 +03001503 if (!refcount_read(&dev_net(dev)->count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001504 queue->kobj.uevent_suppress = 1;
Tom Herbert114cf582011-11-28 16:33:09 +00001505#ifdef CONFIG_BQL
1506 sysfs_remove_group(&queue->kobj, &dql_group);
1507#endif
1508 kobject_put(&queue->kobj);
1509 }
Tom Herbert1d24eb42010-11-21 13:17:27 +00001510
1511 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001512#else
1513 return 0;
david decotignyccf5ff62011-11-16 12:15:10 +00001514#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001515}
1516
WANG Cong6b53daf2014-07-23 16:09:10 -07001517static int register_queue_kobjects(struct net_device *dev)
Tom Herbert1d24eb42010-11-21 13:17:27 +00001518{
Tom Herbertbf264142010-11-26 08:36:09 +00001519 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001520
david decotignyccf5ff62011-11-16 12:15:10 +00001521#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001522 dev->queues_kset = kset_create_and_add("queues",
stephen hemminger6648c652017-08-18 13:46:28 -07001523 NULL, &dev->dev.kobj);
WANG Cong6b53daf2014-07-23 16:09:10 -07001524 if (!dev->queues_kset)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001525 return -ENOMEM;
WANG Cong6b53daf2014-07-23 16:09:10 -07001526 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001527#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001528 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001529
WANG Cong6b53daf2014-07-23 16:09:10 -07001530 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001531 if (error)
1532 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001533 rxq = real_rx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001534
WANG Cong6b53daf2014-07-23 16:09:10 -07001535 error = netdev_queue_update_kobjects(dev, 0, real_tx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001536 if (error)
1537 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001538 txq = real_tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001539
1540 return 0;
1541
1542error:
WANG Cong6b53daf2014-07-23 16:09:10 -07001543 netdev_queue_update_kobjects(dev, txq, 0);
1544 net_rx_queue_update_kobjects(dev, rxq, 0);
YueHaibing895a5e92019-03-02 10:34:55 +08001545#ifdef CONFIG_SYSFS
1546 kset_unregister(dev->queues_kset);
1547#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001548 return error;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001549}
1550
WANG Cong6b53daf2014-07-23 16:09:10 -07001551static void remove_queue_kobjects(struct net_device *dev)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001552{
Tom Herbertbf264142010-11-26 08:36:09 +00001553 int real_rx = 0, real_tx = 0;
1554
Michael Daltona953be52014-01-16 22:23:28 -08001555#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001556 real_rx = dev->real_num_rx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001557#endif
WANG Cong6b53daf2014-07-23 16:09:10 -07001558 real_tx = dev->real_num_tx_queues;
Tom Herbertbf264142010-11-26 08:36:09 +00001559
WANG Cong6b53daf2014-07-23 16:09:10 -07001560 net_rx_queue_update_kobjects(dev, real_rx, 0);
1561 netdev_queue_update_kobjects(dev, real_tx, 0);
david decotignyccf5ff62011-11-16 12:15:10 +00001562#ifdef CONFIG_SYSFS
WANG Cong6b53daf2014-07-23 16:09:10 -07001563 kset_unregister(dev->queues_kset);
Tom Herbertbf264142010-11-26 08:36:09 +00001564#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001565}
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001566
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001567static bool net_current_may_mount(void)
1568{
1569 struct net *net = current->nsproxy->net_ns;
1570
1571 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1572}
1573
Al Viroa685e082011-06-08 21:13:01 -04001574static void *net_grab_current_ns(void)
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001575{
Al Viroa685e082011-06-08 21:13:01 -04001576 struct net *ns = current->nsproxy->net_ns;
1577#ifdef CONFIG_NET_NS
1578 if (ns)
Reshetova, Elenac122e142017-06-30 13:08:08 +03001579 refcount_inc(&ns->passive);
Al Viroa685e082011-06-08 21:13:01 -04001580#endif
1581 return ns;
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001582}
1583
1584static const void *net_initial_ns(void)
1585{
1586 return &init_net;
1587}
1588
1589static const void *net_netlink_ns(struct sock *sk)
1590{
1591 return sock_net(sk);
1592}
1593
stephen hemminger737aec52017-08-18 13:46:22 -07001594const struct kobj_ns_type_operations net_ns_type_operations = {
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001595 .type = KOBJ_NS_TYPE_NET,
Eric W. Biederman7dc5dbc2013-03-25 20:07:01 -07001596 .current_may_mount = net_current_may_mount,
Al Viroa685e082011-06-08 21:13:01 -04001597 .grab_current_ns = net_grab_current_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001598 .netlink_ns = net_netlink_ns,
1599 .initial_ns = net_initial_ns,
Al Viroa685e082011-06-08 21:13:01 -04001600 .drop_ns = net_drop_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001601};
Johannes Berg04600792010-08-05 17:45:15 +02001602EXPORT_SYMBOL_GPL(net_ns_type_operations);
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001603
Kay Sievers7eff2e72007-08-14 15:15:12 +02001604static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001606 struct net_device *dev = to_net_dev(d);
Kay Sievers7eff2e72007-08-14 15:15:12 +02001607 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
Kay Sievers312c0042005-11-16 09:00:00 +01001609 /* pass interface to uevent. */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001610 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
Eric Rannaudbf624562007-03-30 22:23:12 -07001611 if (retval)
1612 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001614 /* pass ifindex to uevent.
1615 * ifindex is useful as it won't change (interface name may change)
stephen hemminger6648c652017-08-18 13:46:28 -07001616 * and is what RtNetlink uses natively.
1617 */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001618 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001619
Eric Rannaudbf624562007-03-30 22:23:12 -07001620exit:
Eric Rannaudbf624562007-03-30 22:23:12 -07001621 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624/*
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001625 * netdev_release -- destroy and free a dead device.
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001626 * Called when last reference to device kobject is gone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001628static void netdev_release(struct device *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001630 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 BUG_ON(dev->reg_state != NETREG_RELEASED);
1633
Florian Westphal6c557002017-10-02 23:50:05 +02001634 /* no need to wait for rcu grace period:
1635 * device is dead and about to be freed.
1636 */
1637 kfree(rcu_access_pointer(dev->ifalias));
Eric Dumazet74d332c2013-10-30 13:10:44 -07001638 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639}
1640
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001641static const void *net_namespace(struct device *d)
1642{
Geliang Tang5c294822015-12-22 23:11:49 +08001643 struct net_device *dev = to_net_dev(d);
1644
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001645 return dev_net(dev);
1646}
1647
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001648static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
1649{
1650 struct net_device *dev = to_net_dev(d);
1651 const struct net *net = dev_net(dev);
1652
1653 net_ns_get_ownership(net, uid, gid);
1654}
1655
stephen hemmingere6d473e2017-08-18 13:46:21 -07001656static struct class net_class __ro_after_init = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 .name = "net",
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001658 .dev_release = netdev_release,
Greg Kroah-Hartman6be8aee2013-07-24 15:05:33 -07001659 .dev_groups = net_class_groups,
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001660 .dev_uevent = netdev_uevent,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001661 .ns_type = &net_ns_type_operations,
1662 .namespace = net_namespace,
Dmitry Torokhovb0e37c02018-07-20 21:56:52 +00001663 .get_ownership = net_get_ownership,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664};
1665
Florian Fainelliaa836df2015-03-09 14:31:20 -07001666#ifdef CONFIG_OF_NET
1667static int of_dev_node_match(struct device *dev, const void *data)
1668{
1669 int ret = 0;
1670
1671 if (dev->parent)
1672 ret = dev->parent->of_node == data;
1673
1674 return ret == 0 ? dev->of_node == data : ret;
1675}
1676
Russell King9861f722015-09-24 20:36:33 +01001677/*
1678 * of_find_net_device_by_node - lookup the net device for the device node
1679 * @np: OF device node
1680 *
1681 * Looks up the net_device structure corresponding with the device node.
1682 * If successful, returns a pointer to the net_device with the embedded
1683 * struct device refcount incremented by one, or NULL on failure. The
1684 * refcount must be dropped when done with the net_device.
1685 */
Florian Fainelliaa836df2015-03-09 14:31:20 -07001686struct net_device *of_find_net_device_by_node(struct device_node *np)
1687{
1688 struct device *dev;
1689
1690 dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1691 if (!dev)
1692 return NULL;
1693
1694 return to_net_dev(dev);
1695}
1696EXPORT_SYMBOL(of_find_net_device_by_node);
1697#endif
1698
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001699/* Delete sysfs entries but hold kobject reference until after all
1700 * netdev references are gone.
1701 */
WANG Cong6b53daf2014-07-23 16:09:10 -07001702void netdev_unregister_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
stephen hemminger6648c652017-08-18 13:46:28 -07001704 struct device *dev = &ndev->dev;
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001705
Kirill Tkhai273c28b2018-01-12 18:28:31 +03001706 if (!refcount_read(&dev_net(ndev)->count))
Andrey Vagin002d8a12016-10-24 19:09:53 -07001707 dev_set_uevent_suppress(dev, 1);
1708
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001709 kobject_get(&dev->kobj);
Eric W. Biederman38918452008-10-27 17:51:47 -07001710
WANG Cong6b53daf2014-07-23 16:09:10 -07001711 remove_queue_kobjects(ndev);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001712
Ming Lei9802c8e2013-02-22 16:34:16 -08001713 pm_runtime_set_memalloc_noio(dev, false);
1714
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001715 device_del(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716}
1717
1718/* Create sysfs entries for network device. */
WANG Cong6b53daf2014-07-23 16:09:10 -07001719int netdev_register_kobject(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
stephen hemminger6648c652017-08-18 13:46:28 -07001721 struct device *dev = &ndev->dev;
WANG Cong6b53daf2014-07-23 16:09:10 -07001722 const struct attribute_group **groups = ndev->sysfs_groups;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001723 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001725 device_initialize(dev);
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001726 dev->class = &net_class;
WANG Cong6b53daf2014-07-23 16:09:10 -07001727 dev->platform_data = ndev;
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001728 dev->groups = groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
WANG Cong6b53daf2014-07-23 16:09:10 -07001730 dev_set_name(dev, "%s", ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001732#ifdef CONFIG_SYSFS
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001733 /* Allow for a device specific group */
1734 if (*groups)
1735 groups++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001737 *groups++ = &netstat_group;
Johannes Berg38c1a012012-11-16 20:46:19 +01001738
1739#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
WANG Cong6b53daf2014-07-23 16:09:10 -07001740 if (ndev->ieee80211_ptr)
Johannes Berg38c1a012012-11-16 20:46:19 +01001741 *groups++ = &wireless_group;
1742#if IS_ENABLED(CONFIG_WIRELESS_EXT)
WANG Cong6b53daf2014-07-23 16:09:10 -07001743 else if (ndev->wireless_handlers)
Johannes Berg38c1a012012-11-16 20:46:19 +01001744 *groups++ = &wireless_group;
1745#endif
1746#endif
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001747#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Tom Herbert0a9627f2010-03-16 08:03:29 +00001749 error = device_add(dev);
1750 if (error)
Wang Hai8ed633b2019-04-12 16:36:33 -04001751 return error;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001752
WANG Cong6b53daf2014-07-23 16:09:10 -07001753 error = register_queue_kobjects(ndev);
Wang Hai8ed633b2019-04-12 16:36:33 -04001754 if (error) {
1755 device_del(dev);
1756 return error;
1757 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00001758
Ming Lei9802c8e2013-02-22 16:34:16 -08001759 pm_runtime_set_memalloc_noio(dev, true);
1760
Tom Herbert0a9627f2010-03-16 08:03:29 +00001761 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762}
1763
stephen hemmingerb793dc52017-08-18 13:46:20 -07001764int netdev_class_create_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04001765 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07001766{
Tejun Heo58292cbe2013-09-11 22:29:04 -04001767 return class_create_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001768}
Tejun Heo58292cbe2013-09-11 22:29:04 -04001769EXPORT_SYMBOL(netdev_class_create_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001770
stephen hemmingerb793dc52017-08-18 13:46:20 -07001771void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
Tejun Heo58292cbe2013-09-11 22:29:04 -04001772 const void *ns)
Jay Vosburghb8a97872008-06-13 18:12:04 -07001773{
Tejun Heo58292cbe2013-09-11 22:29:04 -04001774 class_remove_file_ns(&net_class, class_attr, ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001775}
Tejun Heo58292cbe2013-09-11 22:29:04 -04001776EXPORT_SYMBOL(netdev_class_remove_file_ns);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001777
Daniel Borkmanna48d4bb2014-01-06 01:20:11 +01001778int __init netdev_kobject_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779{
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001780 kobj_ns_type_register(&net_ns_type_operations);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 return class_register(&net_class);
1782}