Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net-sysfs.c - network device class and attributes |
| 4 | * |
| 5 | * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
Randy Dunlap | 4fc268d | 2006-01-11 12:17:47 -0800 | [diff] [blame] | 8 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/netdevice.h> |
| 11 | #include <linux/if_arp.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 13 | #include <linux/sched/signal.h> |
Alex Belits | 07bbecb | 2020-06-25 18:34:43 -0400 | [diff] [blame] | 14 | #include <linux/sched/isolation.h> |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 15 | #include <linux/nsproxy.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <net/sock.h> |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 17 | #include <net/net_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/rtnetlink.h> |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 19 | #include <linux/vmalloc.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 20 | #include <linux/export.h> |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 21 | #include <linux/jiffies.h> |
Ming Lei | 9802c8e | 2013-02-22 16:34:16 -0800 | [diff] [blame] | 22 | #include <linux/pm_runtime.h> |
Florian Fainelli | aa836df | 2015-03-09 14:31:20 -0700 | [diff] [blame] | 23 | #include <linux/of.h> |
Ben Dooks | 88832a2 | 2016-06-07 19:27:51 +0100 | [diff] [blame] | 24 | #include <linux/of_net.h> |
Andrei Vagin | 4d99f66 | 2018-08-08 20:07:35 -0700 | [diff] [blame] | 25 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Pavel Emelyanov | 342709e | 2007-10-23 21:14:45 -0700 | [diff] [blame] | 27 | #include "net-sysfs.h" |
| 28 | |
Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 29 | #ifdef CONFIG_SYSFS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | static const char fmt_hex[] = "%#x\n"; |
| 31 | static const char fmt_dec[] = "%d\n"; |
| 32 | static const char fmt_ulong[] = "%lu\n"; |
Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 33 | static const char fmt_u64[] = "%llu\n"; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 35 | static inline int dev_isalive(const struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | { |
Stephen Hemminger | fe9925b | 2006-05-06 17:56:03 -0700 | [diff] [blame] | 37 | return dev->reg_state <= NETREG_REGISTERED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | } |
| 39 | |
| 40 | /* use same locking rules as GIF* ioctl's */ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 41 | static ssize_t netdev_show(const struct device *dev, |
| 42 | struct device_attribute *attr, char *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | ssize_t (*format)(const struct net_device *, char *)) |
| 44 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 45 | struct net_device *ndev = to_net_dev(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | ssize_t ret = -EINVAL; |
| 47 | |
| 48 | read_lock(&dev_base_lock); |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 49 | if (dev_isalive(ndev)) |
| 50 | ret = (*format)(ndev, buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | read_unlock(&dev_base_lock); |
| 52 | |
| 53 | return ret; |
| 54 | } |
| 55 | |
| 56 | /* generate a show function for simple field */ |
| 57 | #define NETDEVICE_SHOW(field, format_string) \ |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 58 | static ssize_t format_##field(const struct net_device *dev, char *buf) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | { \ |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 60 | return sprintf(buf, format_string, dev->field); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | } \ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 62 | static ssize_t field##_show(struct device *dev, \ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 63 | struct device_attribute *attr, char *buf) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | { \ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 65 | return netdev_show(dev, attr, buf, format_##field); \ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 66 | } \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 68 | #define NETDEVICE_SHOW_RO(field, format_string) \ |
| 69 | NETDEVICE_SHOW(field, format_string); \ |
| 70 | static DEVICE_ATTR_RO(field) |
| 71 | |
| 72 | #define NETDEVICE_SHOW_RW(field, format_string) \ |
| 73 | NETDEVICE_SHOW(field, format_string); \ |
| 74 | static DEVICE_ATTR_RW(field) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
| 76 | /* use same locking and permission rules as SIF* ioctl's */ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 77 | static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | const char *buf, size_t len, |
| 79 | int (*set)(struct net_device *, unsigned long)) |
| 80 | { |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 81 | struct net_device *netdev = to_net_dev(dev); |
| 82 | struct net *net = dev_net(netdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | unsigned long new; |
Colin Ian King | 5f0224a | 2020-04-09 14:41:26 +0100 | [diff] [blame] | 84 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 86 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | return -EPERM; |
| 88 | |
Shuah Khan | e1e420c | 2012-04-12 09:28:13 +0000 | [diff] [blame] | 89 | ret = kstrtoul(buf, 0, &new); |
| 90 | if (ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | goto err; |
| 92 | |
Stephen Hemminger | 5a5990d | 2009-02-26 06:49:24 +0000 | [diff] [blame] | 93 | if (!rtnl_trylock()) |
Eric W. Biederman | 336ca57 | 2009-05-13 16:57:25 +0000 | [diff] [blame] | 94 | return restart_syscall(); |
Stephen Hemminger | 5a5990d | 2009-02-26 06:49:24 +0000 | [diff] [blame] | 95 | |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 96 | if (dev_isalive(netdev)) { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 97 | ret = (*set)(netdev, new); |
| 98 | if (ret == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | ret = len; |
| 100 | } |
| 101 | rtnl_unlock(); |
| 102 | err: |
| 103 | return ret; |
| 104 | } |
| 105 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 106 | NETDEVICE_SHOW_RO(dev_id, fmt_hex); |
Amir Vadai | 3f85944 | 2014-02-25 18:17:50 +0200 | [diff] [blame] | 107 | NETDEVICE_SHOW_RO(dev_port, fmt_dec); |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 108 | NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); |
| 109 | NETDEVICE_SHOW_RO(addr_len, fmt_dec); |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 110 | NETDEVICE_SHOW_RO(ifindex, fmt_dec); |
| 111 | NETDEVICE_SHOW_RO(type, fmt_dec); |
| 112 | NETDEVICE_SHOW_RO(link_mode, fmt_dec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Nicolas Dichtel | a54acb3 | 2015-04-02 17:07:00 +0200 | [diff] [blame] | 114 | static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, |
| 115 | char *buf) |
| 116 | { |
| 117 | struct net_device *ndev = to_net_dev(dev); |
| 118 | |
| 119 | return sprintf(buf, fmt_dec, dev_get_iflink(ndev)); |
| 120 | } |
| 121 | static DEVICE_ATTR_RO(iflink); |
| 122 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 123 | static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 124 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 125 | return sprintf(buf, fmt_dec, dev->name_assign_type); |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | static ssize_t name_assign_type_show(struct device *dev, |
| 129 | struct device_attribute *attr, |
| 130 | char *buf) |
| 131 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 132 | struct net_device *ndev = to_net_dev(dev); |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 133 | ssize_t ret = -EINVAL; |
| 134 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 135 | if (ndev->name_assign_type != NET_NAME_UNKNOWN) |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 136 | ret = netdev_show(dev, attr, buf, format_name_assign_type); |
| 137 | |
| 138 | return ret; |
| 139 | } |
| 140 | static DEVICE_ATTR_RO(name_assign_type); |
| 141 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | /* use same locking rules as GIFHWADDR ioctl's */ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 143 | static ssize_t address_show(struct device *dev, struct device_attribute *attr, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 144 | char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 146 | struct net_device *ndev = to_net_dev(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | ssize_t ret = -EINVAL; |
| 148 | |
| 149 | read_lock(&dev_base_lock); |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 150 | if (dev_isalive(ndev)) |
| 151 | ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | read_unlock(&dev_base_lock); |
| 153 | return ret; |
| 154 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 155 | static DEVICE_ATTR_RO(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 157 | static ssize_t broadcast_show(struct device *dev, |
| 158 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 160 | struct net_device *ndev = to_net_dev(dev); |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 161 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 162 | if (dev_isalive(ndev)) |
| 163 | return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | return -EINVAL; |
| 165 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 166 | static DEVICE_ATTR_RO(broadcast); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 168 | static int change_carrier(struct net_device *dev, unsigned long new_carrier) |
Jiri Pirko | fdae0fd | 2012-12-27 23:49:38 +0000 | [diff] [blame] | 169 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 170 | if (!netif_running(dev)) |
Jiri Pirko | fdae0fd | 2012-12-27 23:49:38 +0000 | [diff] [blame] | 171 | return -EINVAL; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 172 | return dev_change_carrier(dev, (bool)new_carrier); |
Jiri Pirko | fdae0fd | 2012-12-27 23:49:38 +0000 | [diff] [blame] | 173 | } |
| 174 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 175 | static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, |
| 176 | const char *buf, size_t len) |
Jiri Pirko | fdae0fd | 2012-12-27 23:49:38 +0000 | [diff] [blame] | 177 | { |
| 178 | return netdev_store(dev, attr, buf, len, change_carrier); |
| 179 | } |
| 180 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 181 | static ssize_t carrier_show(struct device *dev, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 182 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | { |
| 184 | struct net_device *netdev = to_net_dev(dev); |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 185 | |
| 186 | if (netif_running(netdev)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 188 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | return -EINVAL; |
| 190 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 191 | static DEVICE_ATTR_RW(carrier); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 193 | static ssize_t speed_show(struct device *dev, |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 194 | struct device_attribute *attr, char *buf) |
| 195 | { |
| 196 | struct net_device *netdev = to_net_dev(dev); |
| 197 | int ret = -EINVAL; |
| 198 | |
| 199 | if (!rtnl_trylock()) |
| 200 | return restart_syscall(); |
| 201 | |
David Decotigny | 8ae6daca | 2011-04-27 18:32:38 +0000 | [diff] [blame] | 202 | if (netif_running(netdev)) { |
David Decotigny | 7cad1ba | 2016-02-24 10:58:10 -0800 | [diff] [blame] | 203 | struct ethtool_link_ksettings cmd; |
| 204 | |
| 205 | if (!__ethtool_get_link_ksettings(netdev, &cmd)) |
| 206 | ret = sprintf(buf, fmt_dec, cmd.base.speed); |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 207 | } |
| 208 | rtnl_unlock(); |
| 209 | return ret; |
| 210 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 211 | static DEVICE_ATTR_RO(speed); |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 212 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 213 | static ssize_t duplex_show(struct device *dev, |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 214 | struct device_attribute *attr, char *buf) |
| 215 | { |
| 216 | struct net_device *netdev = to_net_dev(dev); |
| 217 | int ret = -EINVAL; |
| 218 | |
| 219 | if (!rtnl_trylock()) |
| 220 | return restart_syscall(); |
| 221 | |
David Decotigny | 8ae6daca | 2011-04-27 18:32:38 +0000 | [diff] [blame] | 222 | if (netif_running(netdev)) { |
David Decotigny | 7cad1ba | 2016-02-24 10:58:10 -0800 | [diff] [blame] | 223 | struct ethtool_link_ksettings cmd; |
| 224 | |
| 225 | if (!__ethtool_get_link_ksettings(netdev, &cmd)) { |
Nikolay Aleksandrov | c6c1396 | 2012-09-05 04:11:28 +0000 | [diff] [blame] | 226 | const char *duplex; |
David Decotigny | 7cad1ba | 2016-02-24 10:58:10 -0800 | [diff] [blame] | 227 | |
| 228 | switch (cmd.base.duplex) { |
Nikolay Aleksandrov | c6c1396 | 2012-09-05 04:11:28 +0000 | [diff] [blame] | 229 | case DUPLEX_HALF: |
| 230 | duplex = "half"; |
| 231 | break; |
| 232 | case DUPLEX_FULL: |
| 233 | duplex = "full"; |
| 234 | break; |
| 235 | default: |
| 236 | duplex = "unknown"; |
| 237 | break; |
| 238 | } |
| 239 | ret = sprintf(buf, "%s\n", duplex); |
| 240 | } |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 241 | } |
| 242 | rtnl_unlock(); |
| 243 | return ret; |
| 244 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 245 | static DEVICE_ATTR_RO(duplex); |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 246 | |
Andrew Lunn | db30a57 | 2020-04-20 00:11:51 +0200 | [diff] [blame] | 247 | static ssize_t testing_show(struct device *dev, |
| 248 | struct device_attribute *attr, char *buf) |
| 249 | { |
| 250 | struct net_device *netdev = to_net_dev(dev); |
| 251 | |
| 252 | if (netif_running(netdev)) |
| 253 | return sprintf(buf, fmt_dec, !!netif_testing(netdev)); |
| 254 | |
| 255 | return -EINVAL; |
| 256 | } |
| 257 | static DEVICE_ATTR_RO(testing); |
| 258 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 259 | static ssize_t dormant_show(struct device *dev, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 260 | struct device_attribute *attr, char *buf) |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 261 | { |
| 262 | struct net_device *netdev = to_net_dev(dev); |
| 263 | |
| 264 | if (netif_running(netdev)) |
| 265 | return sprintf(buf, fmt_dec, !!netif_dormant(netdev)); |
| 266 | |
| 267 | return -EINVAL; |
| 268 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 269 | static DEVICE_ATTR_RO(dormant); |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 270 | |
Jan Engelhardt | 36cbd3d | 2009-08-05 10:42:58 -0700 | [diff] [blame] | 271 | static const char *const operstates[] = { |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 272 | "unknown", |
| 273 | "notpresent", /* currently unused */ |
| 274 | "down", |
| 275 | "lowerlayerdown", |
Andrew Lunn | db30a57 | 2020-04-20 00:11:51 +0200 | [diff] [blame] | 276 | "testing", |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 277 | "dormant", |
| 278 | "up" |
| 279 | }; |
| 280 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 281 | static ssize_t operstate_show(struct device *dev, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 282 | struct device_attribute *attr, char *buf) |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 283 | { |
| 284 | const struct net_device *netdev = to_net_dev(dev); |
| 285 | unsigned char operstate; |
| 286 | |
| 287 | read_lock(&dev_base_lock); |
| 288 | operstate = netdev->operstate; |
| 289 | if (!netif_running(netdev)) |
| 290 | operstate = IF_OPER_DOWN; |
| 291 | read_unlock(&dev_base_lock); |
| 292 | |
Adrian Bunk | e3a5cd9 | 2006-04-05 22:19:47 -0700 | [diff] [blame] | 293 | if (operstate >= ARRAY_SIZE(operstates)) |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 294 | return -EINVAL; /* should not happen */ |
| 295 | |
| 296 | return sprintf(buf, "%s\n", operstates[operstate]); |
| 297 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 298 | static DEVICE_ATTR_RO(operstate); |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 299 | |
david decotigny | 2d3b479 | 2014-03-29 09:48:35 -0700 | [diff] [blame] | 300 | static ssize_t carrier_changes_show(struct device *dev, |
| 301 | struct device_attribute *attr, |
| 302 | char *buf) |
| 303 | { |
| 304 | struct net_device *netdev = to_net_dev(dev); |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 305 | |
david decotigny | 2d3b479 | 2014-03-29 09:48:35 -0700 | [diff] [blame] | 306 | return sprintf(buf, fmt_dec, |
David Decotigny | b2d3bcf | 2018-01-18 09:59:13 -0800 | [diff] [blame] | 307 | atomic_read(&netdev->carrier_up_count) + |
| 308 | atomic_read(&netdev->carrier_down_count)); |
david decotigny | 2d3b479 | 2014-03-29 09:48:35 -0700 | [diff] [blame] | 309 | } |
| 310 | static DEVICE_ATTR_RO(carrier_changes); |
| 311 | |
David Decotigny | b2d3bcf | 2018-01-18 09:59:13 -0800 | [diff] [blame] | 312 | static ssize_t carrier_up_count_show(struct device *dev, |
| 313 | struct device_attribute *attr, |
| 314 | char *buf) |
| 315 | { |
| 316 | struct net_device *netdev = to_net_dev(dev); |
| 317 | |
| 318 | return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); |
| 319 | } |
| 320 | static DEVICE_ATTR_RO(carrier_up_count); |
| 321 | |
| 322 | static ssize_t carrier_down_count_show(struct device *dev, |
| 323 | struct device_attribute *attr, |
| 324 | char *buf) |
| 325 | { |
| 326 | struct net_device *netdev = to_net_dev(dev); |
| 327 | |
| 328 | return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); |
| 329 | } |
| 330 | static DEVICE_ATTR_RO(carrier_down_count); |
| 331 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | /* read-write attributes */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 334 | static int change_mtu(struct net_device *dev, unsigned long new_mtu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 336 | return dev_set_mtu(dev, (int)new_mtu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | } |
| 338 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 339 | static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 340 | const char *buf, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 342 | return netdev_store(dev, attr, buf, len, change_mtu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 344 | NETDEVICE_SHOW_RW(mtu, fmt_dec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 346 | static int change_flags(struct net_device *dev, unsigned long new_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | { |
Petr Machata | 567c5e1 | 2018-12-06 17:05:42 +0000 | [diff] [blame] | 348 | return dev_change_flags(dev, (unsigned int)new_flags, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | } |
| 350 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 351 | static ssize_t flags_store(struct device *dev, struct device_attribute *attr, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 352 | const char *buf, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 354 | return netdev_store(dev, attr, buf, len, change_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 356 | NETDEVICE_SHOW_RW(flags, fmt_hex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 358 | static ssize_t tx_queue_len_store(struct device *dev, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 359 | struct device_attribute *attr, |
| 360 | const char *buf, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | { |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 362 | if (!capable(CAP_NET_ADMIN)) |
| 363 | return -EPERM; |
| 364 | |
Cong Wang | 6a643dd | 2018-01-25 18:26:22 -0800 | [diff] [blame] | 365 | return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | } |
Alexey Dobriyan | 0cd2950 | 2017-05-17 13:30:44 +0300 | [diff] [blame] | 367 | NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | |
Eric Dumazet | 3b47d30 | 2014-11-06 21:09:44 -0800 | [diff] [blame] | 369 | static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) |
| 370 | { |
Eric Dumazet | 7e417a6 | 2020-04-22 09:13:28 -0700 | [diff] [blame] | 371 | WRITE_ONCE(dev->gro_flush_timeout, val); |
Eric Dumazet | 3b47d30 | 2014-11-06 21:09:44 -0800 | [diff] [blame] | 372 | return 0; |
| 373 | } |
| 374 | |
| 375 | static ssize_t gro_flush_timeout_store(struct device *dev, |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 376 | struct device_attribute *attr, |
| 377 | const char *buf, size_t len) |
Eric Dumazet | 3b47d30 | 2014-11-06 21:09:44 -0800 | [diff] [blame] | 378 | { |
| 379 | if (!capable(CAP_NET_ADMIN)) |
| 380 | return -EPERM; |
| 381 | |
| 382 | return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); |
| 383 | } |
| 384 | NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); |
| 385 | |
Eric Dumazet | 6f8b12d | 2020-04-22 09:13:27 -0700 | [diff] [blame] | 386 | static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) |
| 387 | { |
Eric Dumazet | 7e417a6 | 2020-04-22 09:13:28 -0700 | [diff] [blame] | 388 | WRITE_ONCE(dev->napi_defer_hard_irqs, val); |
Eric Dumazet | 6f8b12d | 2020-04-22 09:13:27 -0700 | [diff] [blame] | 389 | return 0; |
| 390 | } |
| 391 | |
| 392 | static ssize_t napi_defer_hard_irqs_store(struct device *dev, |
| 393 | struct device_attribute *attr, |
| 394 | const char *buf, size_t len) |
| 395 | { |
| 396 | if (!capable(CAP_NET_ADMIN)) |
| 397 | return -EPERM; |
| 398 | |
| 399 | return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs); |
| 400 | } |
| 401 | NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec); |
| 402 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 403 | static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 404 | const char *buf, size_t len) |
| 405 | { |
| 406 | struct net_device *netdev = to_net_dev(dev); |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 407 | struct net *net = dev_net(netdev); |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 408 | size_t count = len; |
Roopa Prabhu | c92eb77 | 2017-11-13 23:21:36 -0800 | [diff] [blame] | 409 | ssize_t ret = 0; |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 410 | |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 411 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 412 | return -EPERM; |
| 413 | |
| 414 | /* ignore trailing newline */ |
| 415 | if (len > 0 && buf[len - 1] == '\n') |
| 416 | --count; |
| 417 | |
Roopa Prabhu | c92eb77 | 2017-11-13 23:21:36 -0800 | [diff] [blame] | 418 | if (!rtnl_trylock()) |
| 419 | return restart_syscall(); |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 420 | |
Roopa Prabhu | c92eb77 | 2017-11-13 23:21:36 -0800 | [diff] [blame] | 421 | if (dev_isalive(netdev)) { |
| 422 | ret = dev_set_alias(netdev, buf, count); |
| 423 | if (ret < 0) |
| 424 | goto err; |
| 425 | ret = len; |
| 426 | netdev_state_change(netdev); |
| 427 | } |
| 428 | err: |
| 429 | rtnl_unlock(); |
| 430 | |
| 431 | return ret; |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 432 | } |
| 433 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 434 | static ssize_t ifalias_show(struct device *dev, |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 435 | struct device_attribute *attr, char *buf) |
| 436 | { |
| 437 | const struct net_device *netdev = to_net_dev(dev); |
Florian Westphal | 6c55700 | 2017-10-02 23:50:05 +0200 | [diff] [blame] | 438 | char tmp[IFALIASZ]; |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 439 | ssize_t ret = 0; |
| 440 | |
Florian Westphal | 6c55700 | 2017-10-02 23:50:05 +0200 | [diff] [blame] | 441 | ret = dev_get_alias(netdev, tmp, sizeof(tmp)); |
| 442 | if (ret > 0) |
| 443 | ret = sprintf(buf, "%s\n", tmp); |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 444 | return ret; |
| 445 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 446 | static DEVICE_ATTR_RW(ifalias); |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 447 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 448 | static int change_group(struct net_device *dev, unsigned long new_group) |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 449 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 450 | dev_set_group(dev, (int)new_group); |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 451 | return 0; |
| 452 | } |
| 453 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 454 | static ssize_t group_store(struct device *dev, struct device_attribute *attr, |
| 455 | const char *buf, size_t len) |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 456 | { |
| 457 | return netdev_store(dev, attr, buf, len, change_group); |
| 458 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 459 | NETDEVICE_SHOW(group, fmt_dec); |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 460 | static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 461 | |
Anuradha Karuppiah | d746d70 | 2015-07-14 13:43:19 -0700 | [diff] [blame] | 462 | static int change_proto_down(struct net_device *dev, unsigned long proto_down) |
| 463 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 464 | return dev_change_proto_down(dev, (bool)proto_down); |
Anuradha Karuppiah | d746d70 | 2015-07-14 13:43:19 -0700 | [diff] [blame] | 465 | } |
| 466 | |
| 467 | static ssize_t proto_down_store(struct device *dev, |
| 468 | struct device_attribute *attr, |
| 469 | const char *buf, size_t len) |
| 470 | { |
| 471 | return netdev_store(dev, attr, buf, len, change_proto_down); |
| 472 | } |
| 473 | NETDEVICE_SHOW_RW(proto_down, fmt_dec); |
| 474 | |
Linus Torvalds | cc998ff | 2013-09-05 14:54:29 -0700 | [diff] [blame] | 475 | static ssize_t phys_port_id_show(struct device *dev, |
Jiri Pirko | ff80e51 | 2013-07-29 18:16:51 +0200 | [diff] [blame] | 476 | struct device_attribute *attr, char *buf) |
| 477 | { |
| 478 | struct net_device *netdev = to_net_dev(dev); |
| 479 | ssize_t ret = -EINVAL; |
| 480 | |
| 481 | if (!rtnl_trylock()) |
| 482 | return restart_syscall(); |
| 483 | |
| 484 | if (dev_isalive(netdev)) { |
Jiri Pirko | 02637fc | 2014-11-28 14:34:16 +0100 | [diff] [blame] | 485 | struct netdev_phys_item_id ppid; |
Jiri Pirko | ff80e51 | 2013-07-29 18:16:51 +0200 | [diff] [blame] | 486 | |
| 487 | ret = dev_get_phys_port_id(netdev, &ppid); |
| 488 | if (!ret) |
| 489 | ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); |
| 490 | } |
| 491 | rtnl_unlock(); |
| 492 | |
| 493 | return ret; |
| 494 | } |
Linus Torvalds | cc998ff | 2013-09-05 14:54:29 -0700 | [diff] [blame] | 495 | static DEVICE_ATTR_RO(phys_port_id); |
Jiri Pirko | ff80e51 | 2013-07-29 18:16:51 +0200 | [diff] [blame] | 496 | |
David Ahern | db24a90 | 2015-03-17 20:23:15 -0600 | [diff] [blame] | 497 | static ssize_t phys_port_name_show(struct device *dev, |
| 498 | struct device_attribute *attr, char *buf) |
| 499 | { |
| 500 | struct net_device *netdev = to_net_dev(dev); |
| 501 | ssize_t ret = -EINVAL; |
| 502 | |
| 503 | if (!rtnl_trylock()) |
| 504 | return restart_syscall(); |
| 505 | |
| 506 | if (dev_isalive(netdev)) { |
| 507 | char name[IFNAMSIZ]; |
| 508 | |
| 509 | ret = dev_get_phys_port_name(netdev, name, sizeof(name)); |
| 510 | if (!ret) |
| 511 | ret = sprintf(buf, "%s\n", name); |
| 512 | } |
| 513 | rtnl_unlock(); |
| 514 | |
| 515 | return ret; |
| 516 | } |
| 517 | static DEVICE_ATTR_RO(phys_port_name); |
| 518 | |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 519 | static ssize_t phys_switch_id_show(struct device *dev, |
| 520 | struct device_attribute *attr, char *buf) |
| 521 | { |
| 522 | struct net_device *netdev = to_net_dev(dev); |
| 523 | ssize_t ret = -EINVAL; |
| 524 | |
| 525 | if (!rtnl_trylock()) |
| 526 | return restart_syscall(); |
| 527 | |
| 528 | if (dev_isalive(netdev)) { |
Florian Fainelli | bccb302 | 2019-02-06 09:45:46 -0800 | [diff] [blame] | 529 | struct netdev_phys_item_id ppid = { }; |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 530 | |
Florian Fainelli | bccb302 | 2019-02-06 09:45:46 -0800 | [diff] [blame] | 531 | ret = dev_get_port_parent_id(netdev, &ppid, false); |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 532 | if (!ret) |
Florian Fainelli | bccb302 | 2019-02-06 09:45:46 -0800 | [diff] [blame] | 533 | ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 534 | } |
| 535 | rtnl_unlock(); |
| 536 | |
| 537 | return ret; |
| 538 | } |
| 539 | static DEVICE_ATTR_RO(phys_switch_id); |
| 540 | |
stephen hemminger | ec6cc59 | 2017-08-18 13:46:23 -0700 | [diff] [blame] | 541 | static struct attribute *net_class_attrs[] __ro_after_init = { |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 542 | &dev_attr_netdev_group.attr, |
| 543 | &dev_attr_type.attr, |
| 544 | &dev_attr_dev_id.attr, |
Amir Vadai | 3f85944 | 2014-02-25 18:17:50 +0200 | [diff] [blame] | 545 | &dev_attr_dev_port.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 546 | &dev_attr_iflink.attr, |
| 547 | &dev_attr_ifindex.attr, |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 548 | &dev_attr_name_assign_type.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 549 | &dev_attr_addr_assign_type.attr, |
| 550 | &dev_attr_addr_len.attr, |
| 551 | &dev_attr_link_mode.attr, |
| 552 | &dev_attr_address.attr, |
| 553 | &dev_attr_broadcast.attr, |
| 554 | &dev_attr_speed.attr, |
| 555 | &dev_attr_duplex.attr, |
| 556 | &dev_attr_dormant.attr, |
Andrew Lunn | db30a57 | 2020-04-20 00:11:51 +0200 | [diff] [blame] | 557 | &dev_attr_testing.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 558 | &dev_attr_operstate.attr, |
david decotigny | 2d3b479 | 2014-03-29 09:48:35 -0700 | [diff] [blame] | 559 | &dev_attr_carrier_changes.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 560 | &dev_attr_ifalias.attr, |
| 561 | &dev_attr_carrier.attr, |
| 562 | &dev_attr_mtu.attr, |
| 563 | &dev_attr_flags.attr, |
| 564 | &dev_attr_tx_queue_len.attr, |
Eric Dumazet | 3b47d30 | 2014-11-06 21:09:44 -0800 | [diff] [blame] | 565 | &dev_attr_gro_flush_timeout.attr, |
Eric Dumazet | 6f8b12d | 2020-04-22 09:13:27 -0700 | [diff] [blame] | 566 | &dev_attr_napi_defer_hard_irqs.attr, |
Linus Torvalds | cc998ff | 2013-09-05 14:54:29 -0700 | [diff] [blame] | 567 | &dev_attr_phys_port_id.attr, |
David Ahern | db24a90 | 2015-03-17 20:23:15 -0600 | [diff] [blame] | 568 | &dev_attr_phys_port_name.attr, |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 569 | &dev_attr_phys_switch_id.attr, |
Anuradha Karuppiah | d746d70 | 2015-07-14 13:43:19 -0700 | [diff] [blame] | 570 | &dev_attr_proto_down.attr, |
David Decotigny | b2d3bcf | 2018-01-18 09:59:13 -0800 | [diff] [blame] | 571 | &dev_attr_carrier_up_count.attr, |
| 572 | &dev_attr_carrier_down_count.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 573 | NULL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | }; |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 575 | ATTRIBUTE_GROUPS(net_class); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | |
| 577 | /* Show a given an attribute in the statistics group */ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 578 | static ssize_t netstat_show(const struct device *d, |
| 579 | struct device_attribute *attr, char *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | unsigned long offset) |
| 581 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 582 | struct net_device *dev = to_net_dev(d); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | ssize_t ret = -EINVAL; |
| 584 | |
Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 585 | WARN_ON(offset > sizeof(struct rtnl_link_stats64) || |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 586 | offset % sizeof(u64) != 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | |
| 588 | read_lock(&dev_base_lock); |
Pavel Emelyanov | 96e7408 | 2008-05-21 14:12:46 -0700 | [diff] [blame] | 589 | if (dev_isalive(dev)) { |
Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 590 | struct rtnl_link_stats64 temp; |
| 591 | const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); |
| 592 | |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 593 | ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); |
Pavel Emelyanov | 96e7408 | 2008-05-21 14:12:46 -0700 | [diff] [blame] | 594 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | read_unlock(&dev_base_lock); |
| 596 | return ret; |
| 597 | } |
| 598 | |
| 599 | /* generate a read-only statistics attribute */ |
| 600 | #define NETSTAT_ENTRY(name) \ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 601 | static ssize_t name##_show(struct device *d, \ |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 602 | struct device_attribute *attr, char *buf) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | { \ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 604 | return netstat_show(d, attr, buf, \ |
Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 605 | offsetof(struct rtnl_link_stats64, name)); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | } \ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 607 | static DEVICE_ATTR_RO(name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | |
| 609 | NETSTAT_ENTRY(rx_packets); |
| 610 | NETSTAT_ENTRY(tx_packets); |
| 611 | NETSTAT_ENTRY(rx_bytes); |
| 612 | NETSTAT_ENTRY(tx_bytes); |
| 613 | NETSTAT_ENTRY(rx_errors); |
| 614 | NETSTAT_ENTRY(tx_errors); |
| 615 | NETSTAT_ENTRY(rx_dropped); |
| 616 | NETSTAT_ENTRY(tx_dropped); |
| 617 | NETSTAT_ENTRY(multicast); |
| 618 | NETSTAT_ENTRY(collisions); |
| 619 | NETSTAT_ENTRY(rx_length_errors); |
| 620 | NETSTAT_ENTRY(rx_over_errors); |
| 621 | NETSTAT_ENTRY(rx_crc_errors); |
| 622 | NETSTAT_ENTRY(rx_frame_errors); |
| 623 | NETSTAT_ENTRY(rx_fifo_errors); |
| 624 | NETSTAT_ENTRY(rx_missed_errors); |
| 625 | NETSTAT_ENTRY(tx_aborted_errors); |
| 626 | NETSTAT_ENTRY(tx_carrier_errors); |
| 627 | NETSTAT_ENTRY(tx_fifo_errors); |
| 628 | NETSTAT_ENTRY(tx_heartbeat_errors); |
| 629 | NETSTAT_ENTRY(tx_window_errors); |
| 630 | NETSTAT_ENTRY(rx_compressed); |
| 631 | NETSTAT_ENTRY(tx_compressed); |
Jarod Wilson | 6e7333d | 2016-02-01 18:51:05 -0500 | [diff] [blame] | 632 | NETSTAT_ENTRY(rx_nohandler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | |
stephen hemminger | ec6cc59 | 2017-08-18 13:46:23 -0700 | [diff] [blame] | 634 | static struct attribute *netstat_attrs[] __ro_after_init = { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 635 | &dev_attr_rx_packets.attr, |
| 636 | &dev_attr_tx_packets.attr, |
| 637 | &dev_attr_rx_bytes.attr, |
| 638 | &dev_attr_tx_bytes.attr, |
| 639 | &dev_attr_rx_errors.attr, |
| 640 | &dev_attr_tx_errors.attr, |
| 641 | &dev_attr_rx_dropped.attr, |
| 642 | &dev_attr_tx_dropped.attr, |
| 643 | &dev_attr_multicast.attr, |
| 644 | &dev_attr_collisions.attr, |
| 645 | &dev_attr_rx_length_errors.attr, |
| 646 | &dev_attr_rx_over_errors.attr, |
| 647 | &dev_attr_rx_crc_errors.attr, |
| 648 | &dev_attr_rx_frame_errors.attr, |
| 649 | &dev_attr_rx_fifo_errors.attr, |
| 650 | &dev_attr_rx_missed_errors.attr, |
| 651 | &dev_attr_tx_aborted_errors.attr, |
| 652 | &dev_attr_tx_carrier_errors.attr, |
| 653 | &dev_attr_tx_fifo_errors.attr, |
| 654 | &dev_attr_tx_heartbeat_errors.attr, |
| 655 | &dev_attr_tx_window_errors.attr, |
| 656 | &dev_attr_rx_compressed.attr, |
| 657 | &dev_attr_tx_compressed.attr, |
Jarod Wilson | 6e7333d | 2016-02-01 18:51:05 -0500 | [diff] [blame] | 658 | &dev_attr_rx_nohandler.attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | NULL |
| 660 | }; |
| 661 | |
Arvind Yadav | 38ef00c | 2017-06-29 16:31:26 +0530 | [diff] [blame] | 662 | static const struct attribute_group netstat_group = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | .name = "statistics", |
| 664 | .attrs = netstat_attrs, |
| 665 | }; |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 666 | |
| 667 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) |
| 668 | static struct attribute *wireless_attrs[] = { |
| 669 | NULL |
| 670 | }; |
| 671 | |
Arvind Yadav | 38ef00c | 2017-06-29 16:31:26 +0530 | [diff] [blame] | 672 | static const struct attribute_group wireless_group = { |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 673 | .name = "wireless", |
| 674 | .attrs = wireless_attrs, |
| 675 | }; |
| 676 | #endif |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 677 | |
| 678 | #else /* CONFIG_SYSFS */ |
| 679 | #define net_class_groups NULL |
Eric W. Biederman | d6523dd | 2010-05-16 21:59:45 -0700 | [diff] [blame] | 680 | #endif /* CONFIG_SYSFS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 682 | #ifdef CONFIG_SYSFS |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 683 | #define to_rx_queue_attr(_attr) \ |
| 684 | container_of(_attr, struct rx_queue_attribute, attr) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 685 | |
| 686 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) |
| 687 | |
| 688 | static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, |
| 689 | char *buf) |
| 690 | { |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 691 | const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 692 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
| 693 | |
| 694 | if (!attribute->show) |
| 695 | return -EIO; |
| 696 | |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 697 | return attribute->show(queue, buf); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 698 | } |
| 699 | |
| 700 | static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, |
| 701 | const char *buf, size_t count) |
| 702 | { |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 703 | const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 704 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
| 705 | |
| 706 | if (!attribute->store) |
| 707 | return -EIO; |
| 708 | |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 709 | return attribute->store(queue, buf, count); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 710 | } |
| 711 | |
stephen hemminger | fa50d64 | 2010-08-31 12:14:13 +0000 | [diff] [blame] | 712 | static const struct sysfs_ops rx_queue_sysfs_ops = { |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 713 | .show = rx_queue_attr_show, |
| 714 | .store = rx_queue_attr_store, |
| 715 | }; |
| 716 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 717 | #ifdef CONFIG_RPS |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 718 | static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 719 | { |
| 720 | struct rps_map *map; |
| 721 | cpumask_var_t mask; |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 722 | int i, len; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 723 | |
| 724 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) |
| 725 | return -ENOMEM; |
| 726 | |
| 727 | rcu_read_lock(); |
| 728 | map = rcu_dereference(queue->rps_map); |
| 729 | if (map) |
| 730 | for (i = 0; i < map->len; i++) |
| 731 | cpumask_set_cpu(map->cpus[i], mask); |
| 732 | |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 733 | len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 734 | rcu_read_unlock(); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 735 | free_cpumask_var(mask); |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 736 | |
| 737 | return len < PAGE_SIZE ? len : -EINVAL; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 738 | } |
| 739 | |
Eric Dumazet | f5acb90 | 2010-04-19 14:40:57 -0700 | [diff] [blame] | 740 | static ssize_t store_rps_map(struct netdev_rx_queue *queue, |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 741 | const char *buf, size_t len) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 742 | { |
| 743 | struct rps_map *old_map, *map; |
| 744 | cpumask_var_t mask; |
Alex Belits | 07bbecb | 2020-06-25 18:34:43 -0400 | [diff] [blame] | 745 | int err, cpu, i, hk_flags; |
Sasha Levin | da65ad1 | 2015-08-13 14:03:16 -0400 | [diff] [blame] | 746 | static DEFINE_MUTEX(rps_map_mutex); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 747 | |
| 748 | if (!capable(CAP_NET_ADMIN)) |
| 749 | return -EPERM; |
| 750 | |
| 751 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 752 | return -ENOMEM; |
| 753 | |
| 754 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); |
| 755 | if (err) { |
| 756 | free_cpumask_var(mask); |
| 757 | return err; |
| 758 | } |
| 759 | |
Eric Dumazet | 2e0d8fe | 2020-08-11 18:34:40 -0700 | [diff] [blame] | 760 | if (!cpumask_empty(mask)) { |
| 761 | hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; |
| 762 | cpumask_and(mask, mask, housekeeping_cpumask(hk_flags)); |
| 763 | if (cpumask_empty(mask)) { |
| 764 | free_cpumask_var(mask); |
| 765 | return -EINVAL; |
| 766 | } |
Alex Belits | 07bbecb | 2020-06-25 18:34:43 -0400 | [diff] [blame] | 767 | } |
| 768 | |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 769 | map = kzalloc(max_t(unsigned int, |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 770 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), |
| 771 | GFP_KERNEL); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 772 | if (!map) { |
| 773 | free_cpumask_var(mask); |
| 774 | return -ENOMEM; |
| 775 | } |
| 776 | |
| 777 | i = 0; |
| 778 | for_each_cpu_and(cpu, mask, cpu_online_mask) |
| 779 | map->cpus[i++] = cpu; |
| 780 | |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 781 | if (i) { |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 782 | map->len = i; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 783 | } else { |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 784 | kfree(map); |
| 785 | map = NULL; |
| 786 | } |
| 787 | |
Sasha Levin | da65ad1 | 2015-08-13 14:03:16 -0400 | [diff] [blame] | 788 | mutex_lock(&rps_map_mutex); |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 789 | old_map = rcu_dereference_protected(queue->rps_map, |
Sasha Levin | da65ad1 | 2015-08-13 14:03:16 -0400 | [diff] [blame] | 790 | mutex_is_locked(&rps_map_mutex)); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 791 | rcu_assign_pointer(queue->rps_map, map); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 792 | |
Eric Dumazet | adc9300 | 2011-11-17 03:13:26 +0000 | [diff] [blame] | 793 | if (map) |
Eric Dumazet | dc05360 | 2019-03-22 08:56:38 -0700 | [diff] [blame] | 794 | static_branch_inc(&rps_needed); |
Tom Herbert | 10e4ea7 | 2015-08-05 09:39:27 -0700 | [diff] [blame] | 795 | if (old_map) |
Eric Dumazet | dc05360 | 2019-03-22 08:56:38 -0700 | [diff] [blame] | 796 | static_branch_dec(&rps_needed); |
Tom Herbert | 10e4ea7 | 2015-08-05 09:39:27 -0700 | [diff] [blame] | 797 | |
Sasha Levin | da65ad1 | 2015-08-13 14:03:16 -0400 | [diff] [blame] | 798 | mutex_unlock(&rps_map_mutex); |
Tom Herbert | 10e4ea7 | 2015-08-05 09:39:27 -0700 | [diff] [blame] | 799 | |
| 800 | if (old_map) |
| 801 | kfree_rcu(old_map, rcu); |
| 802 | |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 803 | free_cpumask_var(mask); |
| 804 | return len; |
| 805 | } |
| 806 | |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 807 | static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 808 | char *buf) |
| 809 | { |
| 810 | struct rps_dev_flow_table *flow_table; |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 811 | unsigned long val = 0; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 812 | |
| 813 | rcu_read_lock(); |
| 814 | flow_table = rcu_dereference(queue->rps_flow_table); |
| 815 | if (flow_table) |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 816 | val = (unsigned long)flow_table->mask + 1; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 817 | rcu_read_unlock(); |
| 818 | |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 819 | return sprintf(buf, "%lu\n", val); |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 820 | } |
| 821 | |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 822 | static void rps_dev_flow_table_release(struct rcu_head *rcu) |
| 823 | { |
| 824 | struct rps_dev_flow_table *table = container_of(rcu, |
| 825 | struct rps_dev_flow_table, rcu); |
Al Viro | 243198d | 2013-05-05 16:05:55 +0000 | [diff] [blame] | 826 | vfree(table); |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 827 | } |
| 828 | |
Eric Dumazet | f5acb90 | 2010-04-19 14:40:57 -0700 | [diff] [blame] | 829 | static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 830 | const char *buf, size_t len) |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 831 | { |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 832 | unsigned long mask, count; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 833 | struct rps_dev_flow_table *table, *old_table; |
| 834 | static DEFINE_SPINLOCK(rps_dev_flow_lock); |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 835 | int rc; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 836 | |
| 837 | if (!capable(CAP_NET_ADMIN)) |
| 838 | return -EPERM; |
| 839 | |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 840 | rc = kstrtoul(buf, 0, &count); |
| 841 | if (rc < 0) |
| 842 | return rc; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 843 | |
| 844 | if (count) { |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 845 | mask = count - 1; |
| 846 | /* mask = roundup_pow_of_two(count) - 1; |
| 847 | * without overflows... |
| 848 | */ |
| 849 | while ((mask | (mask >> 1)) != mask) |
| 850 | mask |= (mask >> 1); |
| 851 | /* On 64 bit arches, must check mask fits in table->mask (u32), |
stephen hemminger | 8e3bff9 | 2013-12-08 12:15:44 -0800 | [diff] [blame] | 852 | * and on 32bit arches, must check |
| 853 | * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 854 | */ |
| 855 | #if BITS_PER_LONG > 32 |
| 856 | if (mask > (unsigned long)(u32)mask) |
Xi Wang | a0a129f | 2011-12-22 13:35:22 +0000 | [diff] [blame] | 857 | return -EINVAL; |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 858 | #else |
| 859 | if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) |
Xi Wang | a0a129f | 2011-12-22 13:35:22 +0000 | [diff] [blame] | 860 | / sizeof(struct rps_dev_flow)) { |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 861 | /* Enforce a limit to prevent overflow */ |
| 862 | return -EINVAL; |
| 863 | } |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 864 | #endif |
| 865 | table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 866 | if (!table) |
| 867 | return -ENOMEM; |
| 868 | |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 869 | table->mask = mask; |
| 870 | for (count = 0; count <= mask; count++) |
| 871 | table->flows[count].cpu = RPS_NO_CPU; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 872 | } else { |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 873 | table = NULL; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 874 | } |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 875 | |
| 876 | spin_lock(&rps_dev_flow_lock); |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 877 | old_table = rcu_dereference_protected(queue->rps_flow_table, |
| 878 | lockdep_is_held(&rps_dev_flow_lock)); |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 879 | rcu_assign_pointer(queue->rps_flow_table, table); |
| 880 | spin_unlock(&rps_dev_flow_lock); |
| 881 | |
| 882 | if (old_table) |
| 883 | call_rcu(&old_table->rcu, rps_dev_flow_table_release); |
| 884 | |
| 885 | return len; |
| 886 | } |
| 887 | |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 888 | static struct rx_queue_attribute rps_cpus_attribute __ro_after_init |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 889 | = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 890 | |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 891 | static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 892 | = __ATTR(rps_flow_cnt, 0644, |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 893 | show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 894 | #endif /* CONFIG_RPS */ |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 895 | |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 896 | static struct attribute *rx_queue_default_attrs[] __ro_after_init = { |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 897 | #ifdef CONFIG_RPS |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 898 | &rps_cpus_attribute.attr, |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 899 | &rps_dev_flow_table_cnt_attribute.attr, |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 900 | #endif |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 901 | NULL |
| 902 | }; |
Kimberly Brown | be0d692 | 2019-04-01 22:51:35 -0400 | [diff] [blame] | 903 | ATTRIBUTE_GROUPS(rx_queue_default); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 904 | |
| 905 | static void rx_queue_release(struct kobject *kobj) |
| 906 | { |
| 907 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 908 | #ifdef CONFIG_RPS |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 909 | struct rps_map *map; |
| 910 | struct rps_dev_flow_table *flow_table; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 911 | |
Eric Dumazet | 33d480c | 2011-08-11 19:30:52 +0000 | [diff] [blame] | 912 | map = rcu_dereference_protected(queue->rps_map, 1); |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 913 | if (map) { |
| 914 | RCU_INIT_POINTER(queue->rps_map, NULL); |
Lai Jiangshan | f6f8023 | 2011-03-18 12:01:31 +0800 | [diff] [blame] | 915 | kfree_rcu(map, rcu); |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 916 | } |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 917 | |
Eric Dumazet | 33d480c | 2011-08-11 19:30:52 +0000 | [diff] [blame] | 918 | flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 919 | if (flow_table) { |
| 920 | RCU_INIT_POINTER(queue->rps_flow_table, NULL); |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 921 | call_rcu(&flow_table->rcu, rps_dev_flow_table_release); |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 922 | } |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 923 | #endif |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 924 | |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 925 | memset(kobj, 0, sizeof(*kobj)); |
Tom Herbert | fe82224 | 2010-11-09 10:47:38 +0000 | [diff] [blame] | 926 | dev_put(queue->dev); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 927 | } |
| 928 | |
Weilong Chen | 82ef3d5 | 2014-01-16 17:24:31 +0800 | [diff] [blame] | 929 | static const void *rx_queue_namespace(struct kobject *kobj) |
| 930 | { |
| 931 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
| 932 | struct device *dev = &queue->dev->dev; |
| 933 | const void *ns = NULL; |
| 934 | |
| 935 | if (dev->class && dev->class->ns_type) |
| 936 | ns = dev->class->namespace(dev); |
| 937 | |
| 938 | return ns; |
| 939 | } |
| 940 | |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 941 | static void rx_queue_get_ownership(struct kobject *kobj, |
| 942 | kuid_t *uid, kgid_t *gid) |
| 943 | { |
| 944 | const struct net *net = rx_queue_namespace(kobj); |
| 945 | |
| 946 | net_ns_get_ownership(net, uid, gid); |
| 947 | } |
| 948 | |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 949 | static struct kobj_type rx_queue_ktype __ro_after_init = { |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 950 | .sysfs_ops = &rx_queue_sysfs_ops, |
| 951 | .release = rx_queue_release, |
Kimberly Brown | be0d692 | 2019-04-01 22:51:35 -0400 | [diff] [blame] | 952 | .default_groups = rx_queue_default_groups, |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 953 | .namespace = rx_queue_namespace, |
| 954 | .get_ownership = rx_queue_get_ownership, |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 955 | }; |
| 956 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 957 | static int rx_queue_add_kobject(struct net_device *dev, int index) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 958 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 959 | struct netdev_rx_queue *queue = dev->_rx + index; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 960 | struct kobject *kobj = &queue->kobj; |
| 961 | int error = 0; |
| 962 | |
Jouni Hogander | ddd9b5e | 2019-12-17 13:46:34 +0200 | [diff] [blame] | 963 | /* Kobject_put later will trigger rx_queue_release call which |
| 964 | * decreases dev refcount: Take that reference here |
| 965 | */ |
| 966 | dev_hold(queue->dev); |
| 967 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 968 | kobj->kset = dev->queues_kset; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 969 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 970 | "rx-%u", index); |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 971 | if (error) |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 972 | goto err; |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 973 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 974 | if (dev->sysfs_rx_queue_group) { |
| 975 | error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 976 | if (error) |
| 977 | goto err; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 978 | } |
| 979 | |
| 980 | kobject_uevent(kobj, KOBJ_ADD); |
| 981 | |
| 982 | return error; |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 983 | |
| 984 | err: |
| 985 | kobject_put(kobj); |
| 986 | return error; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 987 | } |
Christian Brauner | d755407 | 2020-02-27 04:37:18 +0100 | [diff] [blame] | 988 | |
| 989 | static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid, |
| 990 | kgid_t kgid) |
| 991 | { |
| 992 | struct netdev_rx_queue *queue = dev->_rx + index; |
| 993 | struct kobject *kobj = &queue->kobj; |
| 994 | int error; |
| 995 | |
| 996 | error = sysfs_change_owner(kobj, kuid, kgid); |
| 997 | if (error) |
| 998 | return error; |
| 999 | |
| 1000 | if (dev->sysfs_rx_queue_group) |
| 1001 | error = sysfs_group_change_owner( |
| 1002 | kobj, dev->sysfs_rx_queue_group, kuid, kgid); |
| 1003 | |
| 1004 | return error; |
| 1005 | } |
Paul Bolle | 80dd6ea | 2014-02-09 14:07:11 +0100 | [diff] [blame] | 1006 | #endif /* CONFIG_SYSFS */ |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1007 | |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1008 | int |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1009 | net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1010 | { |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 1011 | #ifdef CONFIG_SYSFS |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1012 | int i; |
| 1013 | int error = 0; |
| 1014 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 1015 | #ifndef CONFIG_RPS |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1016 | if (!dev->sysfs_rx_queue_group) |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 1017 | return 0; |
| 1018 | #endif |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1019 | for (i = old_num; i < new_num; i++) { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1020 | error = rx_queue_add_kobject(dev, i); |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1021 | if (error) { |
| 1022 | new_num = old_num; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1023 | break; |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1024 | } |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1025 | } |
| 1026 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 1027 | while (--i >= new_num) { |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 1028 | struct kobject *kobj = &dev->_rx[i].kobj; |
| 1029 | |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 1030 | if (!refcount_read(&dev_net(dev)->ns.count)) |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 1031 | kobj->uevent_suppress = 1; |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1032 | if (dev->sysfs_rx_queue_group) |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 1033 | sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); |
| 1034 | kobject_put(kobj); |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 1035 | } |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1036 | |
| 1037 | return error; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1038 | #else |
| 1039 | return 0; |
| 1040 | #endif |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1041 | } |
| 1042 | |
Christian Brauner | d755407 | 2020-02-27 04:37:18 +0100 | [diff] [blame] | 1043 | static int net_rx_queue_change_owner(struct net_device *dev, int num, |
| 1044 | kuid_t kuid, kgid_t kgid) |
| 1045 | { |
| 1046 | #ifdef CONFIG_SYSFS |
| 1047 | int error = 0; |
| 1048 | int i; |
| 1049 | |
| 1050 | #ifndef CONFIG_RPS |
| 1051 | if (!dev->sysfs_rx_queue_group) |
| 1052 | return 0; |
| 1053 | #endif |
| 1054 | for (i = 0; i < num; i++) { |
| 1055 | error = rx_queue_change_owner(dev, i, kuid, kgid); |
| 1056 | if (error) |
| 1057 | break; |
| 1058 | } |
| 1059 | |
| 1060 | return error; |
| 1061 | #else |
| 1062 | return 0; |
| 1063 | #endif |
| 1064 | } |
| 1065 | |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1066 | #ifdef CONFIG_SYSFS |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1067 | /* |
| 1068 | * netdev_queue sysfs structures and functions. |
| 1069 | */ |
| 1070 | struct netdev_queue_attribute { |
| 1071 | struct attribute attr; |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 1072 | ssize_t (*show)(struct netdev_queue *queue, char *buf); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1073 | ssize_t (*store)(struct netdev_queue *queue, |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 1074 | const char *buf, size_t len); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1075 | }; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1076 | #define to_netdev_queue_attr(_attr) \ |
| 1077 | container_of(_attr, struct netdev_queue_attribute, attr) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1078 | |
| 1079 | #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) |
| 1080 | |
| 1081 | static ssize_t netdev_queue_attr_show(struct kobject *kobj, |
| 1082 | struct attribute *attr, char *buf) |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1083 | { |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 1084 | const struct netdev_queue_attribute *attribute |
| 1085 | = to_netdev_queue_attr(attr); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1086 | struct netdev_queue *queue = to_netdev_queue(kobj); |
| 1087 | |
| 1088 | if (!attribute->show) |
| 1089 | return -EIO; |
| 1090 | |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 1091 | return attribute->show(queue, buf); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1092 | } |
| 1093 | |
| 1094 | static ssize_t netdev_queue_attr_store(struct kobject *kobj, |
| 1095 | struct attribute *attr, |
| 1096 | const char *buf, size_t count) |
| 1097 | { |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 1098 | const struct netdev_queue_attribute *attribute |
| 1099 | = to_netdev_queue_attr(attr); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1100 | struct netdev_queue *queue = to_netdev_queue(kobj); |
| 1101 | |
| 1102 | if (!attribute->store) |
| 1103 | return -EIO; |
| 1104 | |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 1105 | return attribute->store(queue, buf, count); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1106 | } |
| 1107 | |
| 1108 | static const struct sysfs_ops netdev_queue_sysfs_ops = { |
| 1109 | .show = netdev_queue_attr_show, |
| 1110 | .store = netdev_queue_attr_store, |
| 1111 | }; |
| 1112 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1113 | static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1114 | { |
| 1115 | unsigned long trans_timeout; |
| 1116 | |
| 1117 | spin_lock_irq(&queue->_xmit_lock); |
| 1118 | trans_timeout = queue->trans_timeout; |
| 1119 | spin_unlock_irq(&queue->_xmit_lock); |
| 1120 | |
Xiongfeng Wang | 9bb5fbe | 2020-07-21 15:02:57 +0800 | [diff] [blame] | 1121 | return sprintf(buf, fmt_ulong, trans_timeout); |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1122 | } |
| 1123 | |
Thadeu Lima de Souza Cascardo | c4047f5 | 2015-09-15 18:28:00 -0300 | [diff] [blame] | 1124 | static unsigned int get_netdev_queue_index(struct netdev_queue *queue) |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1125 | { |
| 1126 | struct net_device *dev = queue->dev; |
Thadeu Lima de Souza Cascardo | c4047f5 | 2015-09-15 18:28:00 -0300 | [diff] [blame] | 1127 | unsigned int i; |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1128 | |
Thadeu Lima de Souza Cascardo | c4047f5 | 2015-09-15 18:28:00 -0300 | [diff] [blame] | 1129 | i = queue - dev->_tx; |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1130 | BUG_ON(i >= dev->num_tx_queues); |
| 1131 | |
| 1132 | return i; |
| 1133 | } |
| 1134 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1135 | static ssize_t traffic_class_show(struct netdev_queue *queue, |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1136 | char *buf) |
| 1137 | { |
| 1138 | struct net_device *dev = queue->dev; |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1139 | int index; |
| 1140 | int tc; |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1141 | |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1142 | if (!netif_is_multiqueue(dev)) |
| 1143 | return -ENOENT; |
| 1144 | |
| 1145 | index = get_netdev_queue_index(queue); |
Alexander Duyck | ffcfe25 | 2018-07-09 12:19:38 -0400 | [diff] [blame] | 1146 | |
| 1147 | /* If queue belongs to subordinate dev use its TC mapping */ |
| 1148 | dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; |
| 1149 | |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1150 | tc = netdev_txq_to_tc(dev, index); |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1151 | if (tc < 0) |
| 1152 | return -EINVAL; |
| 1153 | |
Alexander Duyck | ffcfe25 | 2018-07-09 12:19:38 -0400 | [diff] [blame] | 1154 | /* We can report the traffic class one of two ways: |
| 1155 | * Subordinate device traffic classes are reported with the traffic |
| 1156 | * class first, and then the subordinate class so for example TC0 on |
| 1157 | * subordinate device 2 will be reported as "0-2". If the queue |
| 1158 | * belongs to the root device it will be reported with just the |
| 1159 | * traffic class, so just "0" for TC 0 for example. |
| 1160 | */ |
Ye Bin | 000fe26 | 2020-09-30 09:08:38 +0800 | [diff] [blame] | 1161 | return dev->num_tc < 0 ? sprintf(buf, "%d%d\n", tc, dev->num_tc) : |
| 1162 | sprintf(buf, "%d\n", tc); |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1163 | } |
| 1164 | |
| 1165 | #ifdef CONFIG_XPS |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1166 | static ssize_t tx_maxrate_show(struct netdev_queue *queue, |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1167 | char *buf) |
| 1168 | { |
| 1169 | return sprintf(buf, "%lu\n", queue->tx_maxrate); |
| 1170 | } |
| 1171 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1172 | static ssize_t tx_maxrate_store(struct netdev_queue *queue, |
| 1173 | const char *buf, size_t len) |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1174 | { |
| 1175 | struct net_device *dev = queue->dev; |
| 1176 | int err, index = get_netdev_queue_index(queue); |
| 1177 | u32 rate = 0; |
| 1178 | |
Tyler Hicks | 3033fce | 2018-07-20 21:56:51 +0000 | [diff] [blame] | 1179 | if (!capable(CAP_NET_ADMIN)) |
| 1180 | return -EPERM; |
| 1181 | |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1182 | err = kstrtou32(buf, 10, &rate); |
| 1183 | if (err < 0) |
| 1184 | return err; |
| 1185 | |
| 1186 | if (!rtnl_trylock()) |
| 1187 | return restart_syscall(); |
| 1188 | |
| 1189 | err = -EOPNOTSUPP; |
| 1190 | if (dev->netdev_ops->ndo_set_tx_maxrate) |
| 1191 | err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); |
| 1192 | |
| 1193 | rtnl_unlock(); |
| 1194 | if (!err) { |
| 1195 | queue->tx_maxrate = rate; |
| 1196 | return len; |
| 1197 | } |
| 1198 | return err; |
| 1199 | } |
| 1200 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1201 | static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init |
| 1202 | = __ATTR_RW(tx_maxrate); |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1203 | #endif |
| 1204 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1205 | static struct netdev_queue_attribute queue_trans_timeout __ro_after_init |
| 1206 | = __ATTR_RO(tx_timeout); |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1207 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1208 | static struct netdev_queue_attribute queue_traffic_class __ro_after_init |
| 1209 | = __ATTR_RO(traffic_class); |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1210 | |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1211 | #ifdef CONFIG_BQL |
| 1212 | /* |
| 1213 | * Byte queue limits sysfs structures and functions. |
| 1214 | */ |
| 1215 | static ssize_t bql_show(char *buf, unsigned int value) |
| 1216 | { |
| 1217 | return sprintf(buf, "%u\n", value); |
| 1218 | } |
| 1219 | |
| 1220 | static ssize_t bql_set(const char *buf, const size_t count, |
| 1221 | unsigned int *pvalue) |
| 1222 | { |
| 1223 | unsigned int value; |
| 1224 | int err; |
| 1225 | |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1226 | if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1227 | value = DQL_MAX_LIMIT; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1228 | } else { |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1229 | err = kstrtouint(buf, 10, &value); |
| 1230 | if (err < 0) |
| 1231 | return err; |
| 1232 | if (value > DQL_MAX_LIMIT) |
| 1233 | return -EINVAL; |
| 1234 | } |
| 1235 | |
| 1236 | *pvalue = value; |
| 1237 | |
| 1238 | return count; |
| 1239 | } |
| 1240 | |
| 1241 | static ssize_t bql_show_hold_time(struct netdev_queue *queue, |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1242 | char *buf) |
| 1243 | { |
| 1244 | struct dql *dql = &queue->dql; |
| 1245 | |
| 1246 | return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); |
| 1247 | } |
| 1248 | |
| 1249 | static ssize_t bql_set_hold_time(struct netdev_queue *queue, |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1250 | const char *buf, size_t len) |
| 1251 | { |
| 1252 | struct dql *dql = &queue->dql; |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 1253 | unsigned int value; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1254 | int err; |
| 1255 | |
| 1256 | err = kstrtouint(buf, 10, &value); |
| 1257 | if (err < 0) |
| 1258 | return err; |
| 1259 | |
| 1260 | dql->slack_hold_time = msecs_to_jiffies(value); |
| 1261 | |
| 1262 | return len; |
| 1263 | } |
| 1264 | |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1265 | static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 1266 | = __ATTR(hold_time, 0644, |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1267 | bql_show_hold_time, bql_set_hold_time); |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1268 | |
| 1269 | static ssize_t bql_show_inflight(struct netdev_queue *queue, |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1270 | char *buf) |
| 1271 | { |
| 1272 | struct dql *dql = &queue->dql; |
| 1273 | |
| 1274 | return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); |
| 1275 | } |
| 1276 | |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1277 | static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 1278 | __ATTR(inflight, 0444, bql_show_inflight, NULL); |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1279 | |
| 1280 | #define BQL_ATTR(NAME, FIELD) \ |
| 1281 | static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1282 | char *buf) \ |
| 1283 | { \ |
| 1284 | return bql_show(buf, queue->dql.FIELD); \ |
| 1285 | } \ |
| 1286 | \ |
| 1287 | static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1288 | const char *buf, size_t len) \ |
| 1289 | { \ |
| 1290 | return bql_set(buf, len, &queue->dql.FIELD); \ |
| 1291 | } \ |
| 1292 | \ |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1293 | static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 1294 | = __ATTR(NAME, 0644, \ |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1295 | bql_show_ ## NAME, bql_set_ ## NAME) |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1296 | |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1297 | BQL_ATTR(limit, limit); |
| 1298 | BQL_ATTR(limit_max, max_limit); |
| 1299 | BQL_ATTR(limit_min, min_limit); |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1300 | |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1301 | static struct attribute *dql_attrs[] __ro_after_init = { |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1302 | &bql_limit_attribute.attr, |
| 1303 | &bql_limit_max_attribute.attr, |
| 1304 | &bql_limit_min_attribute.attr, |
| 1305 | &bql_hold_time_attribute.attr, |
| 1306 | &bql_inflight_attribute.attr, |
| 1307 | NULL |
| 1308 | }; |
| 1309 | |
Arvind Yadav | 38ef00c | 2017-06-29 16:31:26 +0530 | [diff] [blame] | 1310 | static const struct attribute_group dql_group = { |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1311 | .name = "byte_queue_limits", |
| 1312 | .attrs = dql_attrs, |
| 1313 | }; |
| 1314 | #endif /* CONFIG_BQL */ |
| 1315 | |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1316 | #ifdef CONFIG_XPS |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1317 | static ssize_t xps_cpus_show(struct netdev_queue *queue, |
| 1318 | char *buf) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1319 | { |
Antoine Tenart | fb25038 | 2020-12-23 22:23:21 +0100 | [diff] [blame] | 1320 | int cpu, len, ret, num_tc = 1, tc = 0; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1321 | struct net_device *dev = queue->dev; |
| 1322 | struct xps_dev_maps *dev_maps; |
| 1323 | cpumask_var_t mask; |
| 1324 | unsigned long index; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1325 | |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1326 | if (!netif_is_multiqueue(dev)) |
| 1327 | return -ENOENT; |
| 1328 | |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1329 | index = get_netdev_queue_index(queue); |
| 1330 | |
Antoine Tenart | fb25038 | 2020-12-23 22:23:21 +0100 | [diff] [blame] | 1331 | if (!rtnl_trylock()) |
| 1332 | return restart_syscall(); |
| 1333 | |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1334 | if (dev->num_tc) { |
Alexander Duyck | ffcfe25 | 2018-07-09 12:19:38 -0400 | [diff] [blame] | 1335 | /* Do not allow XPS on subordinate device directly */ |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1336 | num_tc = dev->num_tc; |
Antoine Tenart | fb25038 | 2020-12-23 22:23:21 +0100 | [diff] [blame] | 1337 | if (num_tc < 0) { |
| 1338 | ret = -EINVAL; |
| 1339 | goto err_rtnl_unlock; |
| 1340 | } |
Alexander Duyck | ffcfe25 | 2018-07-09 12:19:38 -0400 | [diff] [blame] | 1341 | |
| 1342 | /* If queue belongs to subordinate dev use its map */ |
| 1343 | dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; |
| 1344 | |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1345 | tc = netdev_txq_to_tc(dev, index); |
Antoine Tenart | fb25038 | 2020-12-23 22:23:21 +0100 | [diff] [blame] | 1346 | if (tc < 0) { |
| 1347 | ret = -EINVAL; |
| 1348 | goto err_rtnl_unlock; |
| 1349 | } |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1350 | } |
| 1351 | |
Antoine Tenart | fb25038 | 2020-12-23 22:23:21 +0100 | [diff] [blame] | 1352 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { |
| 1353 | ret = -ENOMEM; |
| 1354 | goto err_rtnl_unlock; |
| 1355 | } |
Alexander Duyck | 664088f | 2018-05-31 15:59:46 -0400 | [diff] [blame] | 1356 | |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1357 | rcu_read_lock(); |
Amritha Nambiar | 80d1966 | 2018-06-29 21:26:41 -0700 | [diff] [blame] | 1358 | dev_maps = rcu_dereference(dev->xps_cpus_map); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1359 | if (dev_maps) { |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1360 | for_each_possible_cpu(cpu) { |
| 1361 | int i, tci = cpu * num_tc + tc; |
| 1362 | struct xps_map *map; |
| 1363 | |
Amritha Nambiar | 80d1966 | 2018-06-29 21:26:41 -0700 | [diff] [blame] | 1364 | map = rcu_dereference(dev_maps->attr_map[tci]); |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1365 | if (!map) |
| 1366 | continue; |
| 1367 | |
| 1368 | for (i = map->len; i--;) { |
| 1369 | if (map->queues[i] == index) { |
| 1370 | cpumask_set_cpu(cpu, mask); |
| 1371 | break; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1372 | } |
| 1373 | } |
| 1374 | } |
| 1375 | } |
| 1376 | rcu_read_unlock(); |
| 1377 | |
Antoine Tenart | fb25038 | 2020-12-23 22:23:21 +0100 | [diff] [blame] | 1378 | rtnl_unlock(); |
| 1379 | |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 1380 | len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1381 | free_cpumask_var(mask); |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 1382 | return len < PAGE_SIZE ? len : -EINVAL; |
Antoine Tenart | fb25038 | 2020-12-23 22:23:21 +0100 | [diff] [blame] | 1383 | |
| 1384 | err_rtnl_unlock: |
| 1385 | rtnl_unlock(); |
| 1386 | return ret; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1387 | } |
| 1388 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1389 | static ssize_t xps_cpus_store(struct netdev_queue *queue, |
| 1390 | const char *buf, size_t len) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1391 | { |
| 1392 | struct net_device *dev = queue->dev; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1393 | unsigned long index; |
Alexander Duyck | 537c00d | 2013-01-10 08:57:02 +0000 | [diff] [blame] | 1394 | cpumask_var_t mask; |
| 1395 | int err; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1396 | |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1397 | if (!netif_is_multiqueue(dev)) |
| 1398 | return -ENOENT; |
| 1399 | |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1400 | if (!capable(CAP_NET_ADMIN)) |
| 1401 | return -EPERM; |
| 1402 | |
| 1403 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 1404 | return -ENOMEM; |
| 1405 | |
| 1406 | index = get_netdev_queue_index(queue); |
| 1407 | |
| 1408 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); |
| 1409 | if (err) { |
| 1410 | free_cpumask_var(mask); |
| 1411 | return err; |
| 1412 | } |
| 1413 | |
Antoine Tenart | 1ad58225 | 2020-12-23 22:23:20 +0100 | [diff] [blame] | 1414 | if (!rtnl_trylock()) { |
| 1415 | free_cpumask_var(mask); |
| 1416 | return restart_syscall(); |
| 1417 | } |
| 1418 | |
Alexander Duyck | 537c00d | 2013-01-10 08:57:02 +0000 | [diff] [blame] | 1419 | err = netif_set_xps_queue(dev, mask, index); |
Antoine Tenart | 1ad58225 | 2020-12-23 22:23:20 +0100 | [diff] [blame] | 1420 | rtnl_unlock(); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1421 | |
| 1422 | free_cpumask_var(mask); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1423 | |
Alexander Duyck | 537c00d | 2013-01-10 08:57:02 +0000 | [diff] [blame] | 1424 | return err ? : len; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1425 | } |
| 1426 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1427 | static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init |
| 1428 | = __ATTR_RW(xps_cpus); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1429 | |
| 1430 | static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) |
| 1431 | { |
| 1432 | struct net_device *dev = queue->dev; |
| 1433 | struct xps_dev_maps *dev_maps; |
| 1434 | unsigned long *mask, index; |
| 1435 | int j, len, num_tc = 1, tc = 0; |
| 1436 | |
| 1437 | index = get_netdev_queue_index(queue); |
| 1438 | |
| 1439 | if (dev->num_tc) { |
| 1440 | num_tc = dev->num_tc; |
| 1441 | tc = netdev_txq_to_tc(dev, index); |
| 1442 | if (tc < 0) |
| 1443 | return -EINVAL; |
| 1444 | } |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1445 | mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1446 | if (!mask) |
| 1447 | return -ENOMEM; |
| 1448 | |
| 1449 | rcu_read_lock(); |
| 1450 | dev_maps = rcu_dereference(dev->xps_rxqs_map); |
| 1451 | if (!dev_maps) |
| 1452 | goto out_no_maps; |
| 1453 | |
| 1454 | for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues), |
| 1455 | j < dev->num_rx_queues;) { |
| 1456 | int i, tci = j * num_tc + tc; |
| 1457 | struct xps_map *map; |
| 1458 | |
| 1459 | map = rcu_dereference(dev_maps->attr_map[tci]); |
| 1460 | if (!map) |
| 1461 | continue; |
| 1462 | |
| 1463 | for (i = map->len; i--;) { |
| 1464 | if (map->queues[i] == index) { |
| 1465 | set_bit(j, mask); |
| 1466 | break; |
| 1467 | } |
| 1468 | } |
| 1469 | } |
| 1470 | out_no_maps: |
| 1471 | rcu_read_unlock(); |
| 1472 | |
| 1473 | len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues); |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1474 | bitmap_free(mask); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1475 | |
| 1476 | return len < PAGE_SIZE ? len : -EINVAL; |
| 1477 | } |
| 1478 | |
| 1479 | static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, |
| 1480 | size_t len) |
| 1481 | { |
| 1482 | struct net_device *dev = queue->dev; |
| 1483 | struct net *net = dev_net(dev); |
| 1484 | unsigned long *mask, index; |
| 1485 | int err; |
| 1486 | |
| 1487 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
| 1488 | return -EPERM; |
| 1489 | |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1490 | mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1491 | if (!mask) |
| 1492 | return -ENOMEM; |
| 1493 | |
| 1494 | index = get_netdev_queue_index(queue); |
| 1495 | |
| 1496 | err = bitmap_parse(buf, len, mask, dev->num_rx_queues); |
| 1497 | if (err) { |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1498 | bitmap_free(mask); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1499 | return err; |
| 1500 | } |
| 1501 | |
Antoine Tenart | 2d57b4f | 2020-12-23 22:23:22 +0100 | [diff] [blame^] | 1502 | if (!rtnl_trylock()) { |
| 1503 | bitmap_free(mask); |
| 1504 | return restart_syscall(); |
| 1505 | } |
| 1506 | |
Andrei Vagin | 4d99f66 | 2018-08-08 20:07:35 -0700 | [diff] [blame] | 1507 | cpus_read_lock(); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1508 | err = __netif_set_xps_queue(dev, mask, index, true); |
Andrei Vagin | 4d99f66 | 2018-08-08 20:07:35 -0700 | [diff] [blame] | 1509 | cpus_read_unlock(); |
| 1510 | |
Antoine Tenart | 2d57b4f | 2020-12-23 22:23:22 +0100 | [diff] [blame^] | 1511 | rtnl_unlock(); |
| 1512 | |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1513 | bitmap_free(mask); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1514 | return err ? : len; |
| 1515 | } |
| 1516 | |
| 1517 | static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init |
| 1518 | = __ATTR_RW(xps_rxqs); |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1519 | #endif /* CONFIG_XPS */ |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1520 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1521 | static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1522 | &queue_trans_timeout.attr, |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1523 | &queue_traffic_class.attr, |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1524 | #ifdef CONFIG_XPS |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1525 | &xps_cpus_attribute.attr, |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1526 | &xps_rxqs_attribute.attr, |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1527 | &queue_tx_maxrate.attr, |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1528 | #endif |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1529 | NULL |
| 1530 | }; |
Kimberly Brown | be0d692 | 2019-04-01 22:51:35 -0400 | [diff] [blame] | 1531 | ATTRIBUTE_GROUPS(netdev_queue_default); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1532 | |
| 1533 | static void netdev_queue_release(struct kobject *kobj) |
| 1534 | { |
| 1535 | struct netdev_queue *queue = to_netdev_queue(kobj); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1536 | |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1537 | memset(kobj, 0, sizeof(*kobj)); |
| 1538 | dev_put(queue->dev); |
| 1539 | } |
| 1540 | |
Weilong Chen | 82ef3d5 | 2014-01-16 17:24:31 +0800 | [diff] [blame] | 1541 | static const void *netdev_queue_namespace(struct kobject *kobj) |
| 1542 | { |
| 1543 | struct netdev_queue *queue = to_netdev_queue(kobj); |
| 1544 | struct device *dev = &queue->dev->dev; |
| 1545 | const void *ns = NULL; |
| 1546 | |
| 1547 | if (dev->class && dev->class->ns_type) |
| 1548 | ns = dev->class->namespace(dev); |
| 1549 | |
| 1550 | return ns; |
| 1551 | } |
| 1552 | |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 1553 | static void netdev_queue_get_ownership(struct kobject *kobj, |
| 1554 | kuid_t *uid, kgid_t *gid) |
| 1555 | { |
| 1556 | const struct net *net = netdev_queue_namespace(kobj); |
| 1557 | |
| 1558 | net_ns_get_ownership(net, uid, gid); |
| 1559 | } |
| 1560 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1561 | static struct kobj_type netdev_queue_ktype __ro_after_init = { |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1562 | .sysfs_ops = &netdev_queue_sysfs_ops, |
| 1563 | .release = netdev_queue_release, |
Kimberly Brown | be0d692 | 2019-04-01 22:51:35 -0400 | [diff] [blame] | 1564 | .default_groups = netdev_queue_default_groups, |
Weilong Chen | 82ef3d5 | 2014-01-16 17:24:31 +0800 | [diff] [blame] | 1565 | .namespace = netdev_queue_namespace, |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 1566 | .get_ownership = netdev_queue_get_ownership, |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1567 | }; |
| 1568 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1569 | static int netdev_queue_add_kobject(struct net_device *dev, int index) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1570 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1571 | struct netdev_queue *queue = dev->_tx + index; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1572 | struct kobject *kobj = &queue->kobj; |
| 1573 | int error = 0; |
| 1574 | |
Jouni Hogander | e0b60903 | 2019-12-05 15:57:07 +0200 | [diff] [blame] | 1575 | /* Kobject_put later will trigger netdev_queue_release call |
| 1576 | * which decreases dev refcount: Take that reference here |
| 1577 | */ |
| 1578 | dev_hold(queue->dev); |
| 1579 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1580 | kobj->kset = dev->queues_kset; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1581 | error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1582 | "tx-%u", index); |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1583 | if (error) |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 1584 | goto err; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1585 | |
| 1586 | #ifdef CONFIG_BQL |
| 1587 | error = sysfs_create_group(kobj, &dql_group); |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 1588 | if (error) |
| 1589 | goto err; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1590 | #endif |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1591 | |
| 1592 | kobject_uevent(kobj, KOBJ_ADD); |
Eric Dumazet | 48a322b | 2019-11-20 19:19:07 -0800 | [diff] [blame] | 1593 | return 0; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1594 | |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 1595 | err: |
| 1596 | kobject_put(kobj); |
| 1597 | return error; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1598 | } |
Christian Brauner | d755407 | 2020-02-27 04:37:18 +0100 | [diff] [blame] | 1599 | |
| 1600 | static int tx_queue_change_owner(struct net_device *ndev, int index, |
| 1601 | kuid_t kuid, kgid_t kgid) |
| 1602 | { |
| 1603 | struct netdev_queue *queue = ndev->_tx + index; |
| 1604 | struct kobject *kobj = &queue->kobj; |
| 1605 | int error; |
| 1606 | |
| 1607 | error = sysfs_change_owner(kobj, kuid, kgid); |
| 1608 | if (error) |
| 1609 | return error; |
| 1610 | |
| 1611 | #ifdef CONFIG_BQL |
| 1612 | error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid); |
| 1613 | #endif |
| 1614 | return error; |
| 1615 | } |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1616 | #endif /* CONFIG_SYSFS */ |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1617 | |
| 1618 | int |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1619 | netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1620 | { |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1621 | #ifdef CONFIG_SYSFS |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1622 | int i; |
| 1623 | int error = 0; |
| 1624 | |
| 1625 | for (i = old_num; i < new_num; i++) { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1626 | error = netdev_queue_add_kobject(dev, i); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1627 | if (error) { |
| 1628 | new_num = old_num; |
| 1629 | break; |
| 1630 | } |
| 1631 | } |
| 1632 | |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1633 | while (--i >= new_num) { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1634 | struct netdev_queue *queue = dev->_tx + i; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1635 | |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 1636 | if (!refcount_read(&dev_net(dev)->ns.count)) |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 1637 | queue->kobj.uevent_suppress = 1; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1638 | #ifdef CONFIG_BQL |
| 1639 | sysfs_remove_group(&queue->kobj, &dql_group); |
| 1640 | #endif |
| 1641 | kobject_put(&queue->kobj); |
| 1642 | } |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1643 | |
| 1644 | return error; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1645 | #else |
| 1646 | return 0; |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1647 | #endif /* CONFIG_SYSFS */ |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1648 | } |
| 1649 | |
Christian Brauner | d755407 | 2020-02-27 04:37:18 +0100 | [diff] [blame] | 1650 | static int net_tx_queue_change_owner(struct net_device *dev, int num, |
| 1651 | kuid_t kuid, kgid_t kgid) |
| 1652 | { |
| 1653 | #ifdef CONFIG_SYSFS |
| 1654 | int error = 0; |
| 1655 | int i; |
| 1656 | |
| 1657 | for (i = 0; i < num; i++) { |
| 1658 | error = tx_queue_change_owner(dev, i, kuid, kgid); |
| 1659 | if (error) |
| 1660 | break; |
| 1661 | } |
| 1662 | |
| 1663 | return error; |
| 1664 | #else |
| 1665 | return 0; |
| 1666 | #endif /* CONFIG_SYSFS */ |
| 1667 | } |
| 1668 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1669 | static int register_queue_kobjects(struct net_device *dev) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1670 | { |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1671 | int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1672 | |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1673 | #ifdef CONFIG_SYSFS |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1674 | dev->queues_kset = kset_create_and_add("queues", |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1675 | NULL, &dev->dev.kobj); |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1676 | if (!dev->queues_kset) |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1677 | return -ENOMEM; |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1678 | real_rx = dev->real_num_rx_queues; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1679 | #endif |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1680 | real_tx = dev->real_num_tx_queues; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1681 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1682 | error = net_rx_queue_update_kobjects(dev, 0, real_rx); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1683 | if (error) |
| 1684 | goto error; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1685 | rxq = real_rx; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1686 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1687 | error = netdev_queue_update_kobjects(dev, 0, real_tx); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1688 | if (error) |
| 1689 | goto error; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1690 | txq = real_tx; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1691 | |
| 1692 | return 0; |
| 1693 | |
| 1694 | error: |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1695 | netdev_queue_update_kobjects(dev, txq, 0); |
| 1696 | net_rx_queue_update_kobjects(dev, rxq, 0); |
YueHaibing | 895a5e9 | 2019-03-02 10:34:55 +0800 | [diff] [blame] | 1697 | #ifdef CONFIG_SYSFS |
| 1698 | kset_unregister(dev->queues_kset); |
| 1699 | #endif |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1700 | return error; |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1701 | } |
| 1702 | |
Christian Brauner | d755407 | 2020-02-27 04:37:18 +0100 | [diff] [blame] | 1703 | static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid) |
| 1704 | { |
| 1705 | int error = 0, real_rx = 0, real_tx = 0; |
| 1706 | |
| 1707 | #ifdef CONFIG_SYSFS |
| 1708 | if (ndev->queues_kset) { |
| 1709 | error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid); |
| 1710 | if (error) |
| 1711 | return error; |
| 1712 | } |
| 1713 | real_rx = ndev->real_num_rx_queues; |
| 1714 | #endif |
| 1715 | real_tx = ndev->real_num_tx_queues; |
| 1716 | |
| 1717 | error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid); |
| 1718 | if (error) |
| 1719 | return error; |
| 1720 | |
| 1721 | error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid); |
| 1722 | if (error) |
| 1723 | return error; |
| 1724 | |
| 1725 | return 0; |
| 1726 | } |
| 1727 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1728 | static void remove_queue_kobjects(struct net_device *dev) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1729 | { |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1730 | int real_rx = 0, real_tx = 0; |
| 1731 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 1732 | #ifdef CONFIG_SYSFS |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1733 | real_rx = dev->real_num_rx_queues; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1734 | #endif |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1735 | real_tx = dev->real_num_tx_queues; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1736 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1737 | net_rx_queue_update_kobjects(dev, real_rx, 0); |
| 1738 | netdev_queue_update_kobjects(dev, real_tx, 0); |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1739 | #ifdef CONFIG_SYSFS |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1740 | kset_unregister(dev->queues_kset); |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1741 | #endif |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1742 | } |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1743 | |
Eric W. Biederman | 7dc5dbc | 2013-03-25 20:07:01 -0700 | [diff] [blame] | 1744 | static bool net_current_may_mount(void) |
| 1745 | { |
| 1746 | struct net *net = current->nsproxy->net_ns; |
| 1747 | |
| 1748 | return ns_capable(net->user_ns, CAP_SYS_ADMIN); |
| 1749 | } |
| 1750 | |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1751 | static void *net_grab_current_ns(void) |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1752 | { |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1753 | struct net *ns = current->nsproxy->net_ns; |
| 1754 | #ifdef CONFIG_NET_NS |
| 1755 | if (ns) |
Reshetova, Elena | c122e14 | 2017-06-30 13:08:08 +0300 | [diff] [blame] | 1756 | refcount_inc(&ns->passive); |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1757 | #endif |
| 1758 | return ns; |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1759 | } |
| 1760 | |
| 1761 | static const void *net_initial_ns(void) |
| 1762 | { |
| 1763 | return &init_net; |
| 1764 | } |
| 1765 | |
| 1766 | static const void *net_netlink_ns(struct sock *sk) |
| 1767 | { |
| 1768 | return sock_net(sk); |
| 1769 | } |
| 1770 | |
stephen hemminger | 737aec5 | 2017-08-18 13:46:22 -0700 | [diff] [blame] | 1771 | const struct kobj_ns_type_operations net_ns_type_operations = { |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1772 | .type = KOBJ_NS_TYPE_NET, |
Eric W. Biederman | 7dc5dbc | 2013-03-25 20:07:01 -0700 | [diff] [blame] | 1773 | .current_may_mount = net_current_may_mount, |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1774 | .grab_current_ns = net_grab_current_ns, |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1775 | .netlink_ns = net_netlink_ns, |
| 1776 | .initial_ns = net_initial_ns, |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1777 | .drop_ns = net_drop_ns, |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1778 | }; |
Johannes Berg | 0460079 | 2010-08-05 17:45:15 +0200 | [diff] [blame] | 1779 | EXPORT_SYMBOL_GPL(net_ns_type_operations); |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1780 | |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1781 | static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1782 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1783 | struct net_device *dev = to_net_dev(d); |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1784 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | |
Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 1786 | /* pass interface to uevent. */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1787 | retval = add_uevent_var(env, "INTERFACE=%s", dev->name); |
Eric Rannaud | bf62456 | 2007-03-30 22:23:12 -0700 | [diff] [blame] | 1788 | if (retval) |
| 1789 | goto exit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1790 | |
Jean Tourrilhes | ca2f37d | 2007-03-07 10:49:30 -0800 | [diff] [blame] | 1791 | /* pass ifindex to uevent. |
| 1792 | * ifindex is useful as it won't change (interface name may change) |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1793 | * and is what RtNetlink uses natively. |
| 1794 | */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1795 | retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); |
Jean Tourrilhes | ca2f37d | 2007-03-07 10:49:30 -0800 | [diff] [blame] | 1796 | |
Eric Rannaud | bf62456 | 2007-03-30 22:23:12 -0700 | [diff] [blame] | 1797 | exit: |
Eric Rannaud | bf62456 | 2007-03-30 22:23:12 -0700 | [diff] [blame] | 1798 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 | |
| 1801 | /* |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1802 | * netdev_release -- destroy and free a dead device. |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1803 | * Called when last reference to device kobject is gone. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1804 | */ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1805 | static void netdev_release(struct device *d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1806 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1807 | struct net_device *dev = to_net_dev(d); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1808 | |
| 1809 | BUG_ON(dev->reg_state != NETREG_RELEASED); |
| 1810 | |
Florian Westphal | 6c55700 | 2017-10-02 23:50:05 +0200 | [diff] [blame] | 1811 | /* no need to wait for rcu grace period: |
| 1812 | * device is dead and about to be freed. |
| 1813 | */ |
| 1814 | kfree(rcu_access_pointer(dev->ifalias)); |
Eric Dumazet | 74d332c | 2013-10-30 13:10:44 -0700 | [diff] [blame] | 1815 | netdev_freemem(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1816 | } |
| 1817 | |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1818 | static const void *net_namespace(struct device *d) |
| 1819 | { |
Geliang Tang | 5c29482 | 2015-12-22 23:11:49 +0800 | [diff] [blame] | 1820 | struct net_device *dev = to_net_dev(d); |
| 1821 | |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1822 | return dev_net(dev); |
| 1823 | } |
| 1824 | |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 1825 | static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid) |
| 1826 | { |
| 1827 | struct net_device *dev = to_net_dev(d); |
| 1828 | const struct net *net = dev_net(dev); |
| 1829 | |
| 1830 | net_ns_get_ownership(net, uid, gid); |
| 1831 | } |
| 1832 | |
stephen hemminger | e6d473e | 2017-08-18 13:46:21 -0700 | [diff] [blame] | 1833 | static struct class net_class __ro_after_init = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | .name = "net", |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1835 | .dev_release = netdev_release, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 1836 | .dev_groups = net_class_groups, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1837 | .dev_uevent = netdev_uevent, |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1838 | .ns_type = &net_ns_type_operations, |
| 1839 | .namespace = net_namespace, |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 1840 | .get_ownership = net_get_ownership, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1841 | }; |
| 1842 | |
Florian Fainelli | aa836df | 2015-03-09 14:31:20 -0700 | [diff] [blame] | 1843 | #ifdef CONFIG_OF_NET |
| 1844 | static int of_dev_node_match(struct device *dev, const void *data) |
| 1845 | { |
Tobias Waldekranz | 2e186a2 | 2020-05-15 11:52:52 +0200 | [diff] [blame] | 1846 | for (; dev; dev = dev->parent) { |
| 1847 | if (dev->of_node == data) |
| 1848 | return 1; |
| 1849 | } |
Florian Fainelli | aa836df | 2015-03-09 14:31:20 -0700 | [diff] [blame] | 1850 | |
Tobias Waldekranz | 2e186a2 | 2020-05-15 11:52:52 +0200 | [diff] [blame] | 1851 | return 0; |
Florian Fainelli | aa836df | 2015-03-09 14:31:20 -0700 | [diff] [blame] | 1852 | } |
| 1853 | |
Russell King | 9861f72 | 2015-09-24 20:36:33 +0100 | [diff] [blame] | 1854 | /* |
| 1855 | * of_find_net_device_by_node - lookup the net device for the device node |
| 1856 | * @np: OF device node |
| 1857 | * |
| 1858 | * Looks up the net_device structure corresponding with the device node. |
| 1859 | * If successful, returns a pointer to the net_device with the embedded |
| 1860 | * struct device refcount incremented by one, or NULL on failure. The |
| 1861 | * refcount must be dropped when done with the net_device. |
| 1862 | */ |
Florian Fainelli | aa836df | 2015-03-09 14:31:20 -0700 | [diff] [blame] | 1863 | struct net_device *of_find_net_device_by_node(struct device_node *np) |
| 1864 | { |
| 1865 | struct device *dev; |
| 1866 | |
| 1867 | dev = class_find_device(&net_class, NULL, np, of_dev_node_match); |
| 1868 | if (!dev) |
| 1869 | return NULL; |
| 1870 | |
| 1871 | return to_net_dev(dev); |
| 1872 | } |
| 1873 | EXPORT_SYMBOL(of_find_net_device_by_node); |
| 1874 | #endif |
| 1875 | |
Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 1876 | /* Delete sysfs entries but hold kobject reference until after all |
| 1877 | * netdev references are gone. |
| 1878 | */ |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1879 | void netdev_unregister_kobject(struct net_device *ndev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1880 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1881 | struct device *dev = &ndev->dev; |
Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 1882 | |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 1883 | if (!refcount_read(&dev_net(ndev)->ns.count)) |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 1884 | dev_set_uevent_suppress(dev, 1); |
| 1885 | |
Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 1886 | kobject_get(&dev->kobj); |
Eric W. Biederman | 3891845 | 2008-10-27 17:51:47 -0700 | [diff] [blame] | 1887 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1888 | remove_queue_kobjects(ndev); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1889 | |
Ming Lei | 9802c8e | 2013-02-22 16:34:16 -0800 | [diff] [blame] | 1890 | pm_runtime_set_memalloc_noio(dev, false); |
| 1891 | |
Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 1892 | device_del(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1893 | } |
| 1894 | |
| 1895 | /* Create sysfs entries for network device. */ |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1896 | int netdev_register_kobject(struct net_device *ndev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1898 | struct device *dev = &ndev->dev; |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1899 | const struct attribute_group **groups = ndev->sysfs_groups; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1900 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1901 | |
Eric W. Biederman | a1b3f59 | 2010-05-04 17:36:49 -0700 | [diff] [blame] | 1902 | device_initialize(dev); |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1903 | dev->class = &net_class; |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1904 | dev->platform_data = ndev; |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1905 | dev->groups = groups; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1906 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1907 | dev_set_name(dev, "%s", ndev->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1908 | |
Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 1909 | #ifdef CONFIG_SYSFS |
Eric W. Biederman | 0c509a6 | 2009-10-29 14:18:21 +0000 | [diff] [blame] | 1910 | /* Allow for a device specific group */ |
| 1911 | if (*groups) |
| 1912 | groups++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1913 | |
Eric W. Biederman | 0c509a6 | 2009-10-29 14:18:21 +0000 | [diff] [blame] | 1914 | *groups++ = &netstat_group; |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 1915 | |
| 1916 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1917 | if (ndev->ieee80211_ptr) |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 1918 | *groups++ = &wireless_group; |
| 1919 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1920 | else if (ndev->wireless_handlers) |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 1921 | *groups++ = &wireless_group; |
| 1922 | #endif |
| 1923 | #endif |
Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 1924 | #endif /* CONFIG_SYSFS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1925 | |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1926 | error = device_add(dev); |
| 1927 | if (error) |
Wang Hai | 8ed633b | 2019-04-12 16:36:33 -0400 | [diff] [blame] | 1928 | return error; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1929 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1930 | error = register_queue_kobjects(ndev); |
Wang Hai | 8ed633b | 2019-04-12 16:36:33 -0400 | [diff] [blame] | 1931 | if (error) { |
| 1932 | device_del(dev); |
| 1933 | return error; |
| 1934 | } |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1935 | |
Ming Lei | 9802c8e | 2013-02-22 16:34:16 -0800 | [diff] [blame] | 1936 | pm_runtime_set_memalloc_noio(dev, true); |
| 1937 | |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1938 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1939 | } |
| 1940 | |
Christian Brauner | e6dee9f | 2020-02-27 04:37:17 +0100 | [diff] [blame] | 1941 | /* Change owner for sysfs entries when moving network devices across network |
| 1942 | * namespaces owned by different user namespaces. |
| 1943 | */ |
| 1944 | int netdev_change_owner(struct net_device *ndev, const struct net *net_old, |
| 1945 | const struct net *net_new) |
| 1946 | { |
| 1947 | struct device *dev = &ndev->dev; |
| 1948 | kuid_t old_uid, new_uid; |
| 1949 | kgid_t old_gid, new_gid; |
| 1950 | int error; |
| 1951 | |
| 1952 | net_ns_get_ownership(net_old, &old_uid, &old_gid); |
| 1953 | net_ns_get_ownership(net_new, &new_uid, &new_gid); |
| 1954 | |
| 1955 | /* The network namespace was changed but the owning user namespace is |
| 1956 | * identical so there's no need to change the owner of sysfs entries. |
| 1957 | */ |
| 1958 | if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid)) |
| 1959 | return 0; |
| 1960 | |
| 1961 | error = device_change_owner(dev, new_uid, new_gid); |
| 1962 | if (error) |
| 1963 | return error; |
| 1964 | |
Christian Brauner | d755407 | 2020-02-27 04:37:18 +0100 | [diff] [blame] | 1965 | error = queue_change_owner(ndev, new_uid, new_gid); |
| 1966 | if (error) |
| 1967 | return error; |
| 1968 | |
Christian Brauner | e6dee9f | 2020-02-27 04:37:17 +0100 | [diff] [blame] | 1969 | return 0; |
| 1970 | } |
| 1971 | |
stephen hemminger | b793dc5 | 2017-08-18 13:46:20 -0700 | [diff] [blame] | 1972 | int netdev_class_create_file_ns(const struct class_attribute *class_attr, |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1973 | const void *ns) |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1974 | { |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1975 | return class_create_file_ns(&net_class, class_attr, ns); |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1976 | } |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1977 | EXPORT_SYMBOL(netdev_class_create_file_ns); |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1978 | |
stephen hemminger | b793dc5 | 2017-08-18 13:46:20 -0700 | [diff] [blame] | 1979 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1980 | const void *ns) |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1981 | { |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1982 | class_remove_file_ns(&net_class, class_attr, ns); |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1983 | } |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1984 | EXPORT_SYMBOL(netdev_class_remove_file_ns); |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1985 | |
Daniel Borkmann | a48d4bb | 2014-01-06 01:20:11 +0100 | [diff] [blame] | 1986 | int __init netdev_kobject_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1987 | { |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1988 | kobj_ns_type_register(&net_ns_type_operations); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1989 | return class_register(&net_class); |
| 1990 | } |