Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net-sysfs.c - network device class and attributes |
| 4 | * |
| 5 | * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
Randy Dunlap | 4fc268d | 2006-01-11 12:17:47 -0800 | [diff] [blame] | 8 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/netdevice.h> |
| 11 | #include <linux/if_arp.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 13 | #include <linux/sched/signal.h> |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 14 | #include <linux/nsproxy.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <net/sock.h> |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 16 | #include <net/net_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/rtnetlink.h> |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 18 | #include <linux/vmalloc.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 19 | #include <linux/export.h> |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 20 | #include <linux/jiffies.h> |
Ming Lei | 9802c8e | 2013-02-22 16:34:16 -0800 | [diff] [blame] | 21 | #include <linux/pm_runtime.h> |
Florian Fainelli | aa836df | 2015-03-09 14:31:20 -0700 | [diff] [blame] | 22 | #include <linux/of.h> |
Ben Dooks | 88832a2 | 2016-06-07 19:27:51 +0100 | [diff] [blame] | 23 | #include <linux/of_net.h> |
Andrei Vagin | 4d99f66 | 2018-08-08 20:07:35 -0700 | [diff] [blame] | 24 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Pavel Emelyanov | 342709e | 2007-10-23 21:14:45 -0700 | [diff] [blame] | 26 | #include "net-sysfs.h" |
| 27 | |
Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 28 | #ifdef CONFIG_SYSFS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | static const char fmt_hex[] = "%#x\n"; |
| 30 | static const char fmt_dec[] = "%d\n"; |
| 31 | static const char fmt_ulong[] = "%lu\n"; |
Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 32 | static const char fmt_u64[] = "%llu\n"; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 34 | static inline int dev_isalive(const struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | { |
Stephen Hemminger | fe9925b | 2006-05-06 17:56:03 -0700 | [diff] [blame] | 36 | return dev->reg_state <= NETREG_REGISTERED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | } |
| 38 | |
| 39 | /* use same locking rules as GIF* ioctl's */ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 40 | static ssize_t netdev_show(const struct device *dev, |
| 41 | struct device_attribute *attr, char *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | ssize_t (*format)(const struct net_device *, char *)) |
| 43 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 44 | struct net_device *ndev = to_net_dev(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | ssize_t ret = -EINVAL; |
| 46 | |
| 47 | read_lock(&dev_base_lock); |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 48 | if (dev_isalive(ndev)) |
| 49 | ret = (*format)(ndev, buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | read_unlock(&dev_base_lock); |
| 51 | |
| 52 | return ret; |
| 53 | } |
| 54 | |
| 55 | /* generate a show function for simple field */ |
| 56 | #define NETDEVICE_SHOW(field, format_string) \ |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 57 | static ssize_t format_##field(const struct net_device *dev, char *buf) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | { \ |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 59 | return sprintf(buf, format_string, dev->field); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | } \ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 61 | static ssize_t field##_show(struct device *dev, \ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 62 | struct device_attribute *attr, char *buf) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | { \ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 64 | return netdev_show(dev, attr, buf, format_##field); \ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 65 | } \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 67 | #define NETDEVICE_SHOW_RO(field, format_string) \ |
| 68 | NETDEVICE_SHOW(field, format_string); \ |
| 69 | static DEVICE_ATTR_RO(field) |
| 70 | |
| 71 | #define NETDEVICE_SHOW_RW(field, format_string) \ |
| 72 | NETDEVICE_SHOW(field, format_string); \ |
| 73 | static DEVICE_ATTR_RW(field) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
| 75 | /* use same locking and permission rules as SIF* ioctl's */ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 76 | static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | const char *buf, size_t len, |
| 78 | int (*set)(struct net_device *, unsigned long)) |
| 79 | { |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 80 | struct net_device *netdev = to_net_dev(dev); |
| 81 | struct net *net = dev_net(netdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | unsigned long new; |
| 83 | int ret = -EINVAL; |
| 84 | |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 85 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | return -EPERM; |
| 87 | |
Shuah Khan | e1e420c | 2012-04-12 09:28:13 +0000 | [diff] [blame] | 88 | ret = kstrtoul(buf, 0, &new); |
| 89 | if (ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | goto err; |
| 91 | |
Stephen Hemminger | 5a5990d | 2009-02-26 06:49:24 +0000 | [diff] [blame] | 92 | if (!rtnl_trylock()) |
Eric W. Biederman | 336ca57 | 2009-05-13 16:57:25 +0000 | [diff] [blame] | 93 | return restart_syscall(); |
Stephen Hemminger | 5a5990d | 2009-02-26 06:49:24 +0000 | [diff] [blame] | 94 | |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 95 | if (dev_isalive(netdev)) { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 96 | ret = (*set)(netdev, new); |
| 97 | if (ret == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | ret = len; |
| 99 | } |
| 100 | rtnl_unlock(); |
| 101 | err: |
| 102 | return ret; |
| 103 | } |
| 104 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 105 | NETDEVICE_SHOW_RO(dev_id, fmt_hex); |
Amir Vadai | 3f85944 | 2014-02-25 18:17:50 +0200 | [diff] [blame] | 106 | NETDEVICE_SHOW_RO(dev_port, fmt_dec); |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 107 | NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); |
| 108 | NETDEVICE_SHOW_RO(addr_len, fmt_dec); |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 109 | NETDEVICE_SHOW_RO(ifindex, fmt_dec); |
| 110 | NETDEVICE_SHOW_RO(type, fmt_dec); |
| 111 | NETDEVICE_SHOW_RO(link_mode, fmt_dec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
Nicolas Dichtel | a54acb3 | 2015-04-02 17:07:00 +0200 | [diff] [blame] | 113 | static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, |
| 114 | char *buf) |
| 115 | { |
| 116 | struct net_device *ndev = to_net_dev(dev); |
| 117 | |
| 118 | return sprintf(buf, fmt_dec, dev_get_iflink(ndev)); |
| 119 | } |
| 120 | static DEVICE_ATTR_RO(iflink); |
| 121 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 122 | static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 123 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 124 | return sprintf(buf, fmt_dec, dev->name_assign_type); |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static ssize_t name_assign_type_show(struct device *dev, |
| 128 | struct device_attribute *attr, |
| 129 | char *buf) |
| 130 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 131 | struct net_device *ndev = to_net_dev(dev); |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 132 | ssize_t ret = -EINVAL; |
| 133 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 134 | if (ndev->name_assign_type != NET_NAME_UNKNOWN) |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 135 | ret = netdev_show(dev, attr, buf, format_name_assign_type); |
| 136 | |
| 137 | return ret; |
| 138 | } |
| 139 | static DEVICE_ATTR_RO(name_assign_type); |
| 140 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | /* use same locking rules as GIFHWADDR ioctl's */ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 142 | static ssize_t address_show(struct device *dev, struct device_attribute *attr, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 143 | char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 145 | struct net_device *ndev = to_net_dev(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | ssize_t ret = -EINVAL; |
| 147 | |
| 148 | read_lock(&dev_base_lock); |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 149 | if (dev_isalive(ndev)) |
| 150 | ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | read_unlock(&dev_base_lock); |
| 152 | return ret; |
| 153 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 154 | static DEVICE_ATTR_RO(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 156 | static ssize_t broadcast_show(struct device *dev, |
| 157 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 159 | struct net_device *ndev = to_net_dev(dev); |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 160 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 161 | if (dev_isalive(ndev)) |
| 162 | return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | return -EINVAL; |
| 164 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 165 | static DEVICE_ATTR_RO(broadcast); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 167 | static int change_carrier(struct net_device *dev, unsigned long new_carrier) |
Jiri Pirko | fdae0fd | 2012-12-27 23:49:38 +0000 | [diff] [blame] | 168 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 169 | if (!netif_running(dev)) |
Jiri Pirko | fdae0fd | 2012-12-27 23:49:38 +0000 | [diff] [blame] | 170 | return -EINVAL; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 171 | return dev_change_carrier(dev, (bool)new_carrier); |
Jiri Pirko | fdae0fd | 2012-12-27 23:49:38 +0000 | [diff] [blame] | 172 | } |
| 173 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 174 | static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, |
| 175 | const char *buf, size_t len) |
Jiri Pirko | fdae0fd | 2012-12-27 23:49:38 +0000 | [diff] [blame] | 176 | { |
| 177 | return netdev_store(dev, attr, buf, len, change_carrier); |
| 178 | } |
| 179 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 180 | static ssize_t carrier_show(struct device *dev, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 181 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | { |
| 183 | struct net_device *netdev = to_net_dev(dev); |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 184 | |
| 185 | if (netif_running(netdev)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 187 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | return -EINVAL; |
| 189 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 190 | static DEVICE_ATTR_RW(carrier); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 192 | static ssize_t speed_show(struct device *dev, |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 193 | struct device_attribute *attr, char *buf) |
| 194 | { |
| 195 | struct net_device *netdev = to_net_dev(dev); |
| 196 | int ret = -EINVAL; |
| 197 | |
| 198 | if (!rtnl_trylock()) |
| 199 | return restart_syscall(); |
| 200 | |
David Decotigny | 8ae6daca | 2011-04-27 18:32:38 +0000 | [diff] [blame] | 201 | if (netif_running(netdev)) { |
David Decotigny | 7cad1ba | 2016-02-24 10:58:10 -0800 | [diff] [blame] | 202 | struct ethtool_link_ksettings cmd; |
| 203 | |
| 204 | if (!__ethtool_get_link_ksettings(netdev, &cmd)) |
| 205 | ret = sprintf(buf, fmt_dec, cmd.base.speed); |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 206 | } |
| 207 | rtnl_unlock(); |
| 208 | return ret; |
| 209 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 210 | static DEVICE_ATTR_RO(speed); |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 211 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 212 | static ssize_t duplex_show(struct device *dev, |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 213 | struct device_attribute *attr, char *buf) |
| 214 | { |
| 215 | struct net_device *netdev = to_net_dev(dev); |
| 216 | int ret = -EINVAL; |
| 217 | |
| 218 | if (!rtnl_trylock()) |
| 219 | return restart_syscall(); |
| 220 | |
David Decotigny | 8ae6daca | 2011-04-27 18:32:38 +0000 | [diff] [blame] | 221 | if (netif_running(netdev)) { |
David Decotigny | 7cad1ba | 2016-02-24 10:58:10 -0800 | [diff] [blame] | 222 | struct ethtool_link_ksettings cmd; |
| 223 | |
| 224 | if (!__ethtool_get_link_ksettings(netdev, &cmd)) { |
Nikolay Aleksandrov | c6c1396 | 2012-09-05 04:11:28 +0000 | [diff] [blame] | 225 | const char *duplex; |
David Decotigny | 7cad1ba | 2016-02-24 10:58:10 -0800 | [diff] [blame] | 226 | |
| 227 | switch (cmd.base.duplex) { |
Nikolay Aleksandrov | c6c1396 | 2012-09-05 04:11:28 +0000 | [diff] [blame] | 228 | case DUPLEX_HALF: |
| 229 | duplex = "half"; |
| 230 | break; |
| 231 | case DUPLEX_FULL: |
| 232 | duplex = "full"; |
| 233 | break; |
| 234 | default: |
| 235 | duplex = "unknown"; |
| 236 | break; |
| 237 | } |
| 238 | ret = sprintf(buf, "%s\n", duplex); |
| 239 | } |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 240 | } |
| 241 | rtnl_unlock(); |
| 242 | return ret; |
| 243 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 244 | static DEVICE_ATTR_RO(duplex); |
Andy Gospodarek | d519e17 | 2009-10-02 09:26:12 +0000 | [diff] [blame] | 245 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 246 | static ssize_t dormant_show(struct device *dev, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 247 | struct device_attribute *attr, char *buf) |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 248 | { |
| 249 | struct net_device *netdev = to_net_dev(dev); |
| 250 | |
| 251 | if (netif_running(netdev)) |
| 252 | return sprintf(buf, fmt_dec, !!netif_dormant(netdev)); |
| 253 | |
| 254 | return -EINVAL; |
| 255 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 256 | static DEVICE_ATTR_RO(dormant); |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 257 | |
Jan Engelhardt | 36cbd3d | 2009-08-05 10:42:58 -0700 | [diff] [blame] | 258 | static const char *const operstates[] = { |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 259 | "unknown", |
| 260 | "notpresent", /* currently unused */ |
| 261 | "down", |
| 262 | "lowerlayerdown", |
| 263 | "testing", /* currently unused */ |
| 264 | "dormant", |
| 265 | "up" |
| 266 | }; |
| 267 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 268 | static ssize_t operstate_show(struct device *dev, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 269 | struct device_attribute *attr, char *buf) |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 270 | { |
| 271 | const struct net_device *netdev = to_net_dev(dev); |
| 272 | unsigned char operstate; |
| 273 | |
| 274 | read_lock(&dev_base_lock); |
| 275 | operstate = netdev->operstate; |
| 276 | if (!netif_running(netdev)) |
| 277 | operstate = IF_OPER_DOWN; |
| 278 | read_unlock(&dev_base_lock); |
| 279 | |
Adrian Bunk | e3a5cd9 | 2006-04-05 22:19:47 -0700 | [diff] [blame] | 280 | if (operstate >= ARRAY_SIZE(operstates)) |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 281 | return -EINVAL; /* should not happen */ |
| 282 | |
| 283 | return sprintf(buf, "%s\n", operstates[operstate]); |
| 284 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 285 | static DEVICE_ATTR_RO(operstate); |
Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 286 | |
david decotigny | 2d3b479 | 2014-03-29 09:48:35 -0700 | [diff] [blame] | 287 | static ssize_t carrier_changes_show(struct device *dev, |
| 288 | struct device_attribute *attr, |
| 289 | char *buf) |
| 290 | { |
| 291 | struct net_device *netdev = to_net_dev(dev); |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 292 | |
david decotigny | 2d3b479 | 2014-03-29 09:48:35 -0700 | [diff] [blame] | 293 | return sprintf(buf, fmt_dec, |
David Decotigny | b2d3bcf | 2018-01-18 09:59:13 -0800 | [diff] [blame] | 294 | atomic_read(&netdev->carrier_up_count) + |
| 295 | atomic_read(&netdev->carrier_down_count)); |
david decotigny | 2d3b479 | 2014-03-29 09:48:35 -0700 | [diff] [blame] | 296 | } |
| 297 | static DEVICE_ATTR_RO(carrier_changes); |
| 298 | |
David Decotigny | b2d3bcf | 2018-01-18 09:59:13 -0800 | [diff] [blame] | 299 | static ssize_t carrier_up_count_show(struct device *dev, |
| 300 | struct device_attribute *attr, |
| 301 | char *buf) |
| 302 | { |
| 303 | struct net_device *netdev = to_net_dev(dev); |
| 304 | |
| 305 | return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); |
| 306 | } |
| 307 | static DEVICE_ATTR_RO(carrier_up_count); |
| 308 | |
| 309 | static ssize_t carrier_down_count_show(struct device *dev, |
| 310 | struct device_attribute *attr, |
| 311 | char *buf) |
| 312 | { |
| 313 | struct net_device *netdev = to_net_dev(dev); |
| 314 | |
| 315 | return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); |
| 316 | } |
| 317 | static DEVICE_ATTR_RO(carrier_down_count); |
| 318 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | /* read-write attributes */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 321 | static int change_mtu(struct net_device *dev, unsigned long new_mtu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 323 | return dev_set_mtu(dev, (int)new_mtu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | } |
| 325 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 326 | static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 327 | const char *buf, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 329 | return netdev_store(dev, attr, buf, len, change_mtu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 331 | NETDEVICE_SHOW_RW(mtu, fmt_dec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 333 | static int change_flags(struct net_device *dev, unsigned long new_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | { |
Petr Machata | 567c5e1 | 2018-12-06 17:05:42 +0000 | [diff] [blame] | 335 | return dev_change_flags(dev, (unsigned int)new_flags, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | } |
| 337 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 338 | static ssize_t flags_store(struct device *dev, struct device_attribute *attr, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 339 | const char *buf, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 341 | return netdev_store(dev, attr, buf, len, change_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 343 | NETDEVICE_SHOW_RW(flags, fmt_hex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 345 | static ssize_t tx_queue_len_store(struct device *dev, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 346 | struct device_attribute *attr, |
| 347 | const char *buf, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | { |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 349 | if (!capable(CAP_NET_ADMIN)) |
| 350 | return -EPERM; |
| 351 | |
Cong Wang | 6a643dd | 2018-01-25 18:26:22 -0800 | [diff] [blame] | 352 | return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | } |
Alexey Dobriyan | 0cd2950 | 2017-05-17 13:30:44 +0300 | [diff] [blame] | 354 | NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | |
Eric Dumazet | 3b47d30 | 2014-11-06 21:09:44 -0800 | [diff] [blame] | 356 | static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) |
| 357 | { |
| 358 | dev->gro_flush_timeout = val; |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | static ssize_t gro_flush_timeout_store(struct device *dev, |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 363 | struct device_attribute *attr, |
| 364 | const char *buf, size_t len) |
Eric Dumazet | 3b47d30 | 2014-11-06 21:09:44 -0800 | [diff] [blame] | 365 | { |
| 366 | if (!capable(CAP_NET_ADMIN)) |
| 367 | return -EPERM; |
| 368 | |
| 369 | return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); |
| 370 | } |
| 371 | NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); |
| 372 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 373 | static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 374 | const char *buf, size_t len) |
| 375 | { |
| 376 | struct net_device *netdev = to_net_dev(dev); |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 377 | struct net *net = dev_net(netdev); |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 378 | size_t count = len; |
Roopa Prabhu | c92eb77 | 2017-11-13 23:21:36 -0800 | [diff] [blame] | 379 | ssize_t ret = 0; |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 380 | |
Eric W. Biederman | 5e1fccc | 2012-11-16 03:03:04 +0000 | [diff] [blame] | 381 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 382 | return -EPERM; |
| 383 | |
| 384 | /* ignore trailing newline */ |
| 385 | if (len > 0 && buf[len - 1] == '\n') |
| 386 | --count; |
| 387 | |
Roopa Prabhu | c92eb77 | 2017-11-13 23:21:36 -0800 | [diff] [blame] | 388 | if (!rtnl_trylock()) |
| 389 | return restart_syscall(); |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 390 | |
Roopa Prabhu | c92eb77 | 2017-11-13 23:21:36 -0800 | [diff] [blame] | 391 | if (dev_isalive(netdev)) { |
| 392 | ret = dev_set_alias(netdev, buf, count); |
| 393 | if (ret < 0) |
| 394 | goto err; |
| 395 | ret = len; |
| 396 | netdev_state_change(netdev); |
| 397 | } |
| 398 | err: |
| 399 | rtnl_unlock(); |
| 400 | |
| 401 | return ret; |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 402 | } |
| 403 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 404 | static ssize_t ifalias_show(struct device *dev, |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 405 | struct device_attribute *attr, char *buf) |
| 406 | { |
| 407 | const struct net_device *netdev = to_net_dev(dev); |
Florian Westphal | 6c55700 | 2017-10-02 23:50:05 +0200 | [diff] [blame] | 408 | char tmp[IFALIASZ]; |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 409 | ssize_t ret = 0; |
| 410 | |
Florian Westphal | 6c55700 | 2017-10-02 23:50:05 +0200 | [diff] [blame] | 411 | ret = dev_get_alias(netdev, tmp, sizeof(tmp)); |
| 412 | if (ret > 0) |
| 413 | ret = sprintf(buf, "%s\n", tmp); |
Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 414 | return ret; |
| 415 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 416 | static DEVICE_ATTR_RW(ifalias); |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 417 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 418 | static int change_group(struct net_device *dev, unsigned long new_group) |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 419 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 420 | dev_set_group(dev, (int)new_group); |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 421 | return 0; |
| 422 | } |
| 423 | |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 424 | static ssize_t group_store(struct device *dev, struct device_attribute *attr, |
| 425 | const char *buf, size_t len) |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 426 | { |
| 427 | return netdev_store(dev, attr, buf, len, change_group); |
| 428 | } |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 429 | NETDEVICE_SHOW(group, fmt_dec); |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 430 | static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); |
Vlad Dogaru | a512b92 | 2011-01-24 03:37:29 +0000 | [diff] [blame] | 431 | |
Anuradha Karuppiah | d746d70 | 2015-07-14 13:43:19 -0700 | [diff] [blame] | 432 | static int change_proto_down(struct net_device *dev, unsigned long proto_down) |
| 433 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 434 | return dev_change_proto_down(dev, (bool)proto_down); |
Anuradha Karuppiah | d746d70 | 2015-07-14 13:43:19 -0700 | [diff] [blame] | 435 | } |
| 436 | |
| 437 | static ssize_t proto_down_store(struct device *dev, |
| 438 | struct device_attribute *attr, |
| 439 | const char *buf, size_t len) |
| 440 | { |
| 441 | return netdev_store(dev, attr, buf, len, change_proto_down); |
| 442 | } |
| 443 | NETDEVICE_SHOW_RW(proto_down, fmt_dec); |
| 444 | |
Linus Torvalds | cc998ff | 2013-09-05 14:54:29 -0700 | [diff] [blame] | 445 | static ssize_t phys_port_id_show(struct device *dev, |
Jiri Pirko | ff80e51 | 2013-07-29 18:16:51 +0200 | [diff] [blame] | 446 | struct device_attribute *attr, char *buf) |
| 447 | { |
| 448 | struct net_device *netdev = to_net_dev(dev); |
| 449 | ssize_t ret = -EINVAL; |
| 450 | |
| 451 | if (!rtnl_trylock()) |
| 452 | return restart_syscall(); |
| 453 | |
| 454 | if (dev_isalive(netdev)) { |
Jiri Pirko | 02637fc | 2014-11-28 14:34:16 +0100 | [diff] [blame] | 455 | struct netdev_phys_item_id ppid; |
Jiri Pirko | ff80e51 | 2013-07-29 18:16:51 +0200 | [diff] [blame] | 456 | |
| 457 | ret = dev_get_phys_port_id(netdev, &ppid); |
| 458 | if (!ret) |
| 459 | ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); |
| 460 | } |
| 461 | rtnl_unlock(); |
| 462 | |
| 463 | return ret; |
| 464 | } |
Linus Torvalds | cc998ff | 2013-09-05 14:54:29 -0700 | [diff] [blame] | 465 | static DEVICE_ATTR_RO(phys_port_id); |
Jiri Pirko | ff80e51 | 2013-07-29 18:16:51 +0200 | [diff] [blame] | 466 | |
David Ahern | db24a90 | 2015-03-17 20:23:15 -0600 | [diff] [blame] | 467 | static ssize_t phys_port_name_show(struct device *dev, |
| 468 | struct device_attribute *attr, char *buf) |
| 469 | { |
| 470 | struct net_device *netdev = to_net_dev(dev); |
| 471 | ssize_t ret = -EINVAL; |
| 472 | |
| 473 | if (!rtnl_trylock()) |
| 474 | return restart_syscall(); |
| 475 | |
| 476 | if (dev_isalive(netdev)) { |
| 477 | char name[IFNAMSIZ]; |
| 478 | |
| 479 | ret = dev_get_phys_port_name(netdev, name, sizeof(name)); |
| 480 | if (!ret) |
| 481 | ret = sprintf(buf, "%s\n", name); |
| 482 | } |
| 483 | rtnl_unlock(); |
| 484 | |
| 485 | return ret; |
| 486 | } |
| 487 | static DEVICE_ATTR_RO(phys_port_name); |
| 488 | |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 489 | static ssize_t phys_switch_id_show(struct device *dev, |
| 490 | struct device_attribute *attr, char *buf) |
| 491 | { |
| 492 | struct net_device *netdev = to_net_dev(dev); |
| 493 | ssize_t ret = -EINVAL; |
| 494 | |
| 495 | if (!rtnl_trylock()) |
| 496 | return restart_syscall(); |
| 497 | |
| 498 | if (dev_isalive(netdev)) { |
Florian Fainelli | bccb302 | 2019-02-06 09:45:46 -0800 | [diff] [blame] | 499 | struct netdev_phys_item_id ppid = { }; |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 500 | |
Florian Fainelli | bccb302 | 2019-02-06 09:45:46 -0800 | [diff] [blame] | 501 | ret = dev_get_port_parent_id(netdev, &ppid, false); |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 502 | if (!ret) |
Florian Fainelli | bccb302 | 2019-02-06 09:45:46 -0800 | [diff] [blame] | 503 | ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 504 | } |
| 505 | rtnl_unlock(); |
| 506 | |
| 507 | return ret; |
| 508 | } |
| 509 | static DEVICE_ATTR_RO(phys_switch_id); |
| 510 | |
stephen hemminger | ec6cc59 | 2017-08-18 13:46:23 -0700 | [diff] [blame] | 511 | static struct attribute *net_class_attrs[] __ro_after_init = { |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 512 | &dev_attr_netdev_group.attr, |
| 513 | &dev_attr_type.attr, |
| 514 | &dev_attr_dev_id.attr, |
Amir Vadai | 3f85944 | 2014-02-25 18:17:50 +0200 | [diff] [blame] | 515 | &dev_attr_dev_port.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 516 | &dev_attr_iflink.attr, |
| 517 | &dev_attr_ifindex.attr, |
Tom Gundersen | 685343f | 2014-07-14 16:37:22 +0200 | [diff] [blame] | 518 | &dev_attr_name_assign_type.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 519 | &dev_attr_addr_assign_type.attr, |
| 520 | &dev_attr_addr_len.attr, |
| 521 | &dev_attr_link_mode.attr, |
| 522 | &dev_attr_address.attr, |
| 523 | &dev_attr_broadcast.attr, |
| 524 | &dev_attr_speed.attr, |
| 525 | &dev_attr_duplex.attr, |
| 526 | &dev_attr_dormant.attr, |
| 527 | &dev_attr_operstate.attr, |
david decotigny | 2d3b479 | 2014-03-29 09:48:35 -0700 | [diff] [blame] | 528 | &dev_attr_carrier_changes.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 529 | &dev_attr_ifalias.attr, |
| 530 | &dev_attr_carrier.attr, |
| 531 | &dev_attr_mtu.attr, |
| 532 | &dev_attr_flags.attr, |
| 533 | &dev_attr_tx_queue_len.attr, |
Eric Dumazet | 3b47d30 | 2014-11-06 21:09:44 -0800 | [diff] [blame] | 534 | &dev_attr_gro_flush_timeout.attr, |
Linus Torvalds | cc998ff | 2013-09-05 14:54:29 -0700 | [diff] [blame] | 535 | &dev_attr_phys_port_id.attr, |
David Ahern | db24a90 | 2015-03-17 20:23:15 -0600 | [diff] [blame] | 536 | &dev_attr_phys_port_name.attr, |
Jiri Pirko | aecbe01 | 2014-11-28 14:34:19 +0100 | [diff] [blame] | 537 | &dev_attr_phys_switch_id.attr, |
Anuradha Karuppiah | d746d70 | 2015-07-14 13:43:19 -0700 | [diff] [blame] | 538 | &dev_attr_proto_down.attr, |
David Decotigny | b2d3bcf | 2018-01-18 09:59:13 -0800 | [diff] [blame] | 539 | &dev_attr_carrier_up_count.attr, |
| 540 | &dev_attr_carrier_down_count.attr, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 541 | NULL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | }; |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 543 | ATTRIBUTE_GROUPS(net_class); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | |
| 545 | /* Show a given an attribute in the statistics group */ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 546 | static ssize_t netstat_show(const struct device *d, |
| 547 | struct device_attribute *attr, char *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | unsigned long offset) |
| 549 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 550 | struct net_device *dev = to_net_dev(d); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | ssize_t ret = -EINVAL; |
| 552 | |
Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 553 | WARN_ON(offset > sizeof(struct rtnl_link_stats64) || |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 554 | offset % sizeof(u64) != 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | |
| 556 | read_lock(&dev_base_lock); |
Pavel Emelyanov | 96e7408 | 2008-05-21 14:12:46 -0700 | [diff] [blame] | 557 | if (dev_isalive(dev)) { |
Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 558 | struct rtnl_link_stats64 temp; |
| 559 | const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); |
| 560 | |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 561 | ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); |
Pavel Emelyanov | 96e7408 | 2008-05-21 14:12:46 -0700 | [diff] [blame] | 562 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | read_unlock(&dev_base_lock); |
| 564 | return ret; |
| 565 | } |
| 566 | |
| 567 | /* generate a read-only statistics attribute */ |
| 568 | #define NETSTAT_ENTRY(name) \ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 569 | static ssize_t name##_show(struct device *d, \ |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 570 | struct device_attribute *attr, char *buf) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | { \ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 572 | return netstat_show(d, attr, buf, \ |
Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 573 | offsetof(struct rtnl_link_stats64, name)); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | } \ |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 575 | static DEVICE_ATTR_RO(name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | |
| 577 | NETSTAT_ENTRY(rx_packets); |
| 578 | NETSTAT_ENTRY(tx_packets); |
| 579 | NETSTAT_ENTRY(rx_bytes); |
| 580 | NETSTAT_ENTRY(tx_bytes); |
| 581 | NETSTAT_ENTRY(rx_errors); |
| 582 | NETSTAT_ENTRY(tx_errors); |
| 583 | NETSTAT_ENTRY(rx_dropped); |
| 584 | NETSTAT_ENTRY(tx_dropped); |
| 585 | NETSTAT_ENTRY(multicast); |
| 586 | NETSTAT_ENTRY(collisions); |
| 587 | NETSTAT_ENTRY(rx_length_errors); |
| 588 | NETSTAT_ENTRY(rx_over_errors); |
| 589 | NETSTAT_ENTRY(rx_crc_errors); |
| 590 | NETSTAT_ENTRY(rx_frame_errors); |
| 591 | NETSTAT_ENTRY(rx_fifo_errors); |
| 592 | NETSTAT_ENTRY(rx_missed_errors); |
| 593 | NETSTAT_ENTRY(tx_aborted_errors); |
| 594 | NETSTAT_ENTRY(tx_carrier_errors); |
| 595 | NETSTAT_ENTRY(tx_fifo_errors); |
| 596 | NETSTAT_ENTRY(tx_heartbeat_errors); |
| 597 | NETSTAT_ENTRY(tx_window_errors); |
| 598 | NETSTAT_ENTRY(rx_compressed); |
| 599 | NETSTAT_ENTRY(tx_compressed); |
Jarod Wilson | 6e7333d | 2016-02-01 18:51:05 -0500 | [diff] [blame] | 600 | NETSTAT_ENTRY(rx_nohandler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | |
stephen hemminger | ec6cc59 | 2017-08-18 13:46:23 -0700 | [diff] [blame] | 602 | static struct attribute *netstat_attrs[] __ro_after_init = { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 603 | &dev_attr_rx_packets.attr, |
| 604 | &dev_attr_tx_packets.attr, |
| 605 | &dev_attr_rx_bytes.attr, |
| 606 | &dev_attr_tx_bytes.attr, |
| 607 | &dev_attr_rx_errors.attr, |
| 608 | &dev_attr_tx_errors.attr, |
| 609 | &dev_attr_rx_dropped.attr, |
| 610 | &dev_attr_tx_dropped.attr, |
| 611 | &dev_attr_multicast.attr, |
| 612 | &dev_attr_collisions.attr, |
| 613 | &dev_attr_rx_length_errors.attr, |
| 614 | &dev_attr_rx_over_errors.attr, |
| 615 | &dev_attr_rx_crc_errors.attr, |
| 616 | &dev_attr_rx_frame_errors.attr, |
| 617 | &dev_attr_rx_fifo_errors.attr, |
| 618 | &dev_attr_rx_missed_errors.attr, |
| 619 | &dev_attr_tx_aborted_errors.attr, |
| 620 | &dev_attr_tx_carrier_errors.attr, |
| 621 | &dev_attr_tx_fifo_errors.attr, |
| 622 | &dev_attr_tx_heartbeat_errors.attr, |
| 623 | &dev_attr_tx_window_errors.attr, |
| 624 | &dev_attr_rx_compressed.attr, |
| 625 | &dev_attr_tx_compressed.attr, |
Jarod Wilson | 6e7333d | 2016-02-01 18:51:05 -0500 | [diff] [blame] | 626 | &dev_attr_rx_nohandler.attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | NULL |
| 628 | }; |
| 629 | |
Arvind Yadav | 38ef00c | 2017-06-29 16:31:26 +0530 | [diff] [blame] | 630 | static const struct attribute_group netstat_group = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | .name = "statistics", |
| 632 | .attrs = netstat_attrs, |
| 633 | }; |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 634 | |
| 635 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) |
| 636 | static struct attribute *wireless_attrs[] = { |
| 637 | NULL |
| 638 | }; |
| 639 | |
Arvind Yadav | 38ef00c | 2017-06-29 16:31:26 +0530 | [diff] [blame] | 640 | static const struct attribute_group wireless_group = { |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 641 | .name = "wireless", |
| 642 | .attrs = wireless_attrs, |
| 643 | }; |
| 644 | #endif |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 645 | |
| 646 | #else /* CONFIG_SYSFS */ |
| 647 | #define net_class_groups NULL |
Eric W. Biederman | d6523dd | 2010-05-16 21:59:45 -0700 | [diff] [blame] | 648 | #endif /* CONFIG_SYSFS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 650 | #ifdef CONFIG_SYSFS |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 651 | #define to_rx_queue_attr(_attr) \ |
| 652 | container_of(_attr, struct rx_queue_attribute, attr) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 653 | |
| 654 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) |
| 655 | |
| 656 | static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, |
| 657 | char *buf) |
| 658 | { |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 659 | const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 660 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
| 661 | |
| 662 | if (!attribute->show) |
| 663 | return -EIO; |
| 664 | |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 665 | return attribute->show(queue, buf); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 666 | } |
| 667 | |
| 668 | static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, |
| 669 | const char *buf, size_t count) |
| 670 | { |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 671 | const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 672 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
| 673 | |
| 674 | if (!attribute->store) |
| 675 | return -EIO; |
| 676 | |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 677 | return attribute->store(queue, buf, count); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 678 | } |
| 679 | |
stephen hemminger | fa50d64 | 2010-08-31 12:14:13 +0000 | [diff] [blame] | 680 | static const struct sysfs_ops rx_queue_sysfs_ops = { |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 681 | .show = rx_queue_attr_show, |
| 682 | .store = rx_queue_attr_store, |
| 683 | }; |
| 684 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 685 | #ifdef CONFIG_RPS |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 686 | static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 687 | { |
| 688 | struct rps_map *map; |
| 689 | cpumask_var_t mask; |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 690 | int i, len; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 691 | |
| 692 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) |
| 693 | return -ENOMEM; |
| 694 | |
| 695 | rcu_read_lock(); |
| 696 | map = rcu_dereference(queue->rps_map); |
| 697 | if (map) |
| 698 | for (i = 0; i < map->len; i++) |
| 699 | cpumask_set_cpu(map->cpus[i], mask); |
| 700 | |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 701 | len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 702 | rcu_read_unlock(); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 703 | free_cpumask_var(mask); |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 704 | |
| 705 | return len < PAGE_SIZE ? len : -EINVAL; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 706 | } |
| 707 | |
Eric Dumazet | f5acb90 | 2010-04-19 14:40:57 -0700 | [diff] [blame] | 708 | static ssize_t store_rps_map(struct netdev_rx_queue *queue, |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 709 | const char *buf, size_t len) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 710 | { |
| 711 | struct rps_map *old_map, *map; |
| 712 | cpumask_var_t mask; |
| 713 | int err, cpu, i; |
Sasha Levin | da65ad1 | 2015-08-13 14:03:16 -0400 | [diff] [blame] | 714 | static DEFINE_MUTEX(rps_map_mutex); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 715 | |
| 716 | if (!capable(CAP_NET_ADMIN)) |
| 717 | return -EPERM; |
| 718 | |
| 719 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 720 | return -ENOMEM; |
| 721 | |
| 722 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); |
| 723 | if (err) { |
| 724 | free_cpumask_var(mask); |
| 725 | return err; |
| 726 | } |
| 727 | |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 728 | map = kzalloc(max_t(unsigned int, |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 729 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), |
| 730 | GFP_KERNEL); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 731 | if (!map) { |
| 732 | free_cpumask_var(mask); |
| 733 | return -ENOMEM; |
| 734 | } |
| 735 | |
| 736 | i = 0; |
| 737 | for_each_cpu_and(cpu, mask, cpu_online_mask) |
| 738 | map->cpus[i++] = cpu; |
| 739 | |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 740 | if (i) { |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 741 | map->len = i; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 742 | } else { |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 743 | kfree(map); |
| 744 | map = NULL; |
| 745 | } |
| 746 | |
Sasha Levin | da65ad1 | 2015-08-13 14:03:16 -0400 | [diff] [blame] | 747 | mutex_lock(&rps_map_mutex); |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 748 | old_map = rcu_dereference_protected(queue->rps_map, |
Sasha Levin | da65ad1 | 2015-08-13 14:03:16 -0400 | [diff] [blame] | 749 | mutex_is_locked(&rps_map_mutex)); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 750 | rcu_assign_pointer(queue->rps_map, map); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 751 | |
Eric Dumazet | adc9300 | 2011-11-17 03:13:26 +0000 | [diff] [blame] | 752 | if (map) |
Eric Dumazet | dc05360 | 2019-03-22 08:56:38 -0700 | [diff] [blame] | 753 | static_branch_inc(&rps_needed); |
Tom Herbert | 10e4ea7 | 2015-08-05 09:39:27 -0700 | [diff] [blame] | 754 | if (old_map) |
Eric Dumazet | dc05360 | 2019-03-22 08:56:38 -0700 | [diff] [blame] | 755 | static_branch_dec(&rps_needed); |
Tom Herbert | 10e4ea7 | 2015-08-05 09:39:27 -0700 | [diff] [blame] | 756 | |
Sasha Levin | da65ad1 | 2015-08-13 14:03:16 -0400 | [diff] [blame] | 757 | mutex_unlock(&rps_map_mutex); |
Tom Herbert | 10e4ea7 | 2015-08-05 09:39:27 -0700 | [diff] [blame] | 758 | |
| 759 | if (old_map) |
| 760 | kfree_rcu(old_map, rcu); |
| 761 | |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 762 | free_cpumask_var(mask); |
| 763 | return len; |
| 764 | } |
| 765 | |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 766 | static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 767 | char *buf) |
| 768 | { |
| 769 | struct rps_dev_flow_table *flow_table; |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 770 | unsigned long val = 0; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 771 | |
| 772 | rcu_read_lock(); |
| 773 | flow_table = rcu_dereference(queue->rps_flow_table); |
| 774 | if (flow_table) |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 775 | val = (unsigned long)flow_table->mask + 1; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 776 | rcu_read_unlock(); |
| 777 | |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 778 | return sprintf(buf, "%lu\n", val); |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 779 | } |
| 780 | |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 781 | static void rps_dev_flow_table_release(struct rcu_head *rcu) |
| 782 | { |
| 783 | struct rps_dev_flow_table *table = container_of(rcu, |
| 784 | struct rps_dev_flow_table, rcu); |
Al Viro | 243198d | 2013-05-05 16:05:55 +0000 | [diff] [blame] | 785 | vfree(table); |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 786 | } |
| 787 | |
Eric Dumazet | f5acb90 | 2010-04-19 14:40:57 -0700 | [diff] [blame] | 788 | static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 789 | const char *buf, size_t len) |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 790 | { |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 791 | unsigned long mask, count; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 792 | struct rps_dev_flow_table *table, *old_table; |
| 793 | static DEFINE_SPINLOCK(rps_dev_flow_lock); |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 794 | int rc; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 795 | |
| 796 | if (!capable(CAP_NET_ADMIN)) |
| 797 | return -EPERM; |
| 798 | |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 799 | rc = kstrtoul(buf, 0, &count); |
| 800 | if (rc < 0) |
| 801 | return rc; |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 802 | |
| 803 | if (count) { |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 804 | mask = count - 1; |
| 805 | /* mask = roundup_pow_of_two(count) - 1; |
| 806 | * without overflows... |
| 807 | */ |
| 808 | while ((mask | (mask >> 1)) != mask) |
| 809 | mask |= (mask >> 1); |
| 810 | /* On 64 bit arches, must check mask fits in table->mask (u32), |
stephen hemminger | 8e3bff9 | 2013-12-08 12:15:44 -0800 | [diff] [blame] | 811 | * and on 32bit arches, must check |
| 812 | * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 813 | */ |
| 814 | #if BITS_PER_LONG > 32 |
| 815 | if (mask > (unsigned long)(u32)mask) |
Xi Wang | a0a129f | 2011-12-22 13:35:22 +0000 | [diff] [blame] | 816 | return -EINVAL; |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 817 | #else |
| 818 | if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) |
Xi Wang | a0a129f | 2011-12-22 13:35:22 +0000 | [diff] [blame] | 819 | / sizeof(struct rps_dev_flow)) { |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 820 | /* Enforce a limit to prevent overflow */ |
| 821 | return -EINVAL; |
| 822 | } |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 823 | #endif |
| 824 | table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 825 | if (!table) |
| 826 | return -ENOMEM; |
| 827 | |
Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 828 | table->mask = mask; |
| 829 | for (count = 0; count <= mask; count++) |
| 830 | table->flows[count].cpu = RPS_NO_CPU; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 831 | } else { |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 832 | table = NULL; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 833 | } |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 834 | |
| 835 | spin_lock(&rps_dev_flow_lock); |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 836 | old_table = rcu_dereference_protected(queue->rps_flow_table, |
| 837 | lockdep_is_held(&rps_dev_flow_lock)); |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 838 | rcu_assign_pointer(queue->rps_flow_table, table); |
| 839 | spin_unlock(&rps_dev_flow_lock); |
| 840 | |
| 841 | if (old_table) |
| 842 | call_rcu(&old_table->rcu, rps_dev_flow_table_release); |
| 843 | |
| 844 | return len; |
| 845 | } |
| 846 | |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 847 | static struct rx_queue_attribute rps_cpus_attribute __ro_after_init |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 848 | = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 849 | |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 850 | static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 851 | = __ATTR(rps_flow_cnt, 0644, |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 852 | show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 853 | #endif /* CONFIG_RPS */ |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 854 | |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 855 | static struct attribute *rx_queue_default_attrs[] __ro_after_init = { |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 856 | #ifdef CONFIG_RPS |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 857 | &rps_cpus_attribute.attr, |
Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 858 | &rps_dev_flow_table_cnt_attribute.attr, |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 859 | #endif |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 860 | NULL |
| 861 | }; |
Kimberly Brown | be0d692 | 2019-04-01 22:51:35 -0400 | [diff] [blame] | 862 | ATTRIBUTE_GROUPS(rx_queue_default); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 863 | |
| 864 | static void rx_queue_release(struct kobject *kobj) |
| 865 | { |
| 866 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 867 | #ifdef CONFIG_RPS |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 868 | struct rps_map *map; |
| 869 | struct rps_dev_flow_table *flow_table; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 870 | |
Eric Dumazet | 33d480c | 2011-08-11 19:30:52 +0000 | [diff] [blame] | 871 | map = rcu_dereference_protected(queue->rps_map, 1); |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 872 | if (map) { |
| 873 | RCU_INIT_POINTER(queue->rps_map, NULL); |
Lai Jiangshan | f6f8023 | 2011-03-18 12:01:31 +0800 | [diff] [blame] | 874 | kfree_rcu(map, rcu); |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 875 | } |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 876 | |
Eric Dumazet | 33d480c | 2011-08-11 19:30:52 +0000 | [diff] [blame] | 877 | flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 878 | if (flow_table) { |
| 879 | RCU_INIT_POINTER(queue->rps_flow_table, NULL); |
Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 880 | call_rcu(&flow_table->rcu, rps_dev_flow_table_release); |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 881 | } |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 882 | #endif |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 883 | |
John Fastabend | 9ea1948 | 2010-11-16 06:31:39 +0000 | [diff] [blame] | 884 | memset(kobj, 0, sizeof(*kobj)); |
Tom Herbert | fe82224 | 2010-11-09 10:47:38 +0000 | [diff] [blame] | 885 | dev_put(queue->dev); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 886 | } |
| 887 | |
Weilong Chen | 82ef3d5 | 2014-01-16 17:24:31 +0800 | [diff] [blame] | 888 | static const void *rx_queue_namespace(struct kobject *kobj) |
| 889 | { |
| 890 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
| 891 | struct device *dev = &queue->dev->dev; |
| 892 | const void *ns = NULL; |
| 893 | |
| 894 | if (dev->class && dev->class->ns_type) |
| 895 | ns = dev->class->namespace(dev); |
| 896 | |
| 897 | return ns; |
| 898 | } |
| 899 | |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 900 | static void rx_queue_get_ownership(struct kobject *kobj, |
| 901 | kuid_t *uid, kgid_t *gid) |
| 902 | { |
| 903 | const struct net *net = rx_queue_namespace(kobj); |
| 904 | |
| 905 | net_ns_get_ownership(net, uid, gid); |
| 906 | } |
| 907 | |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 908 | static struct kobj_type rx_queue_ktype __ro_after_init = { |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 909 | .sysfs_ops = &rx_queue_sysfs_ops, |
| 910 | .release = rx_queue_release, |
Kimberly Brown | be0d692 | 2019-04-01 22:51:35 -0400 | [diff] [blame] | 911 | .default_groups = rx_queue_default_groups, |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 912 | .namespace = rx_queue_namespace, |
| 913 | .get_ownership = rx_queue_get_ownership, |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 914 | }; |
| 915 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 916 | static int rx_queue_add_kobject(struct net_device *dev, int index) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 917 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 918 | struct netdev_rx_queue *queue = dev->_rx + index; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 919 | struct kobject *kobj = &queue->kobj; |
| 920 | int error = 0; |
| 921 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 922 | kobj->kset = dev->queues_kset; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 923 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 924 | "rx-%u", index); |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 925 | if (error) |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 926 | goto err; |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 927 | |
YueHaibing | a3e23f7 | 2019-03-19 10:16:53 +0800 | [diff] [blame] | 928 | dev_hold(queue->dev); |
| 929 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 930 | if (dev->sysfs_rx_queue_group) { |
| 931 | error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 932 | if (error) |
| 933 | goto err; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 934 | } |
| 935 | |
| 936 | kobject_uevent(kobj, KOBJ_ADD); |
| 937 | |
| 938 | return error; |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 939 | |
| 940 | err: |
| 941 | kobject_put(kobj); |
| 942 | return error; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 943 | } |
Paul Bolle | 80dd6ea | 2014-02-09 14:07:11 +0100 | [diff] [blame] | 944 | #endif /* CONFIG_SYSFS */ |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 945 | |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 946 | int |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 947 | net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 948 | { |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 949 | #ifdef CONFIG_SYSFS |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 950 | int i; |
| 951 | int error = 0; |
| 952 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 953 | #ifndef CONFIG_RPS |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 954 | if (!dev->sysfs_rx_queue_group) |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 955 | return 0; |
| 956 | #endif |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 957 | for (i = old_num; i < new_num; i++) { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 958 | error = rx_queue_add_kobject(dev, i); |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 959 | if (error) { |
| 960 | new_num = old_num; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 961 | break; |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 962 | } |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 963 | } |
| 964 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 965 | while (--i >= new_num) { |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 966 | struct kobject *kobj = &dev->_rx[i].kobj; |
| 967 | |
Kirill Tkhai | 273c28b | 2018-01-12 18:28:31 +0300 | [diff] [blame] | 968 | if (!refcount_read(&dev_net(dev)->count)) |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 969 | kobj->uevent_suppress = 1; |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 970 | if (dev->sysfs_rx_queue_group) |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 971 | sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); |
| 972 | kobject_put(kobj); |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 973 | } |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 974 | |
| 975 | return error; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 976 | #else |
| 977 | return 0; |
| 978 | #endif |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 979 | } |
| 980 | |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 981 | #ifdef CONFIG_SYSFS |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 982 | /* |
| 983 | * netdev_queue sysfs structures and functions. |
| 984 | */ |
| 985 | struct netdev_queue_attribute { |
| 986 | struct attribute attr; |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 987 | ssize_t (*show)(struct netdev_queue *queue, char *buf); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 988 | ssize_t (*store)(struct netdev_queue *queue, |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 989 | const char *buf, size_t len); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 990 | }; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 991 | #define to_netdev_queue_attr(_attr) \ |
| 992 | container_of(_attr, struct netdev_queue_attribute, attr) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 993 | |
| 994 | #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) |
| 995 | |
| 996 | static ssize_t netdev_queue_attr_show(struct kobject *kobj, |
| 997 | struct attribute *attr, char *buf) |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 998 | { |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 999 | const struct netdev_queue_attribute *attribute |
| 1000 | = to_netdev_queue_attr(attr); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1001 | struct netdev_queue *queue = to_netdev_queue(kobj); |
| 1002 | |
| 1003 | if (!attribute->show) |
| 1004 | return -EIO; |
| 1005 | |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 1006 | return attribute->show(queue, buf); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1007 | } |
| 1008 | |
| 1009 | static ssize_t netdev_queue_attr_store(struct kobject *kobj, |
| 1010 | struct attribute *attr, |
| 1011 | const char *buf, size_t count) |
| 1012 | { |
stephen hemminger | 667e427 | 2017-08-18 13:46:27 -0700 | [diff] [blame] | 1013 | const struct netdev_queue_attribute *attribute |
| 1014 | = to_netdev_queue_attr(attr); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1015 | struct netdev_queue *queue = to_netdev_queue(kobj); |
| 1016 | |
| 1017 | if (!attribute->store) |
| 1018 | return -EIO; |
| 1019 | |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 1020 | return attribute->store(queue, buf, count); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1021 | } |
| 1022 | |
| 1023 | static const struct sysfs_ops netdev_queue_sysfs_ops = { |
| 1024 | .show = netdev_queue_attr_show, |
| 1025 | .store = netdev_queue_attr_store, |
| 1026 | }; |
| 1027 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1028 | static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1029 | { |
| 1030 | unsigned long trans_timeout; |
| 1031 | |
| 1032 | spin_lock_irq(&queue->_xmit_lock); |
| 1033 | trans_timeout = queue->trans_timeout; |
| 1034 | spin_unlock_irq(&queue->_xmit_lock); |
| 1035 | |
| 1036 | return sprintf(buf, "%lu", trans_timeout); |
| 1037 | } |
| 1038 | |
Thadeu Lima de Souza Cascardo | c4047f5 | 2015-09-15 18:28:00 -0300 | [diff] [blame] | 1039 | static unsigned int get_netdev_queue_index(struct netdev_queue *queue) |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1040 | { |
| 1041 | struct net_device *dev = queue->dev; |
Thadeu Lima de Souza Cascardo | c4047f5 | 2015-09-15 18:28:00 -0300 | [diff] [blame] | 1042 | unsigned int i; |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1043 | |
Thadeu Lima de Souza Cascardo | c4047f5 | 2015-09-15 18:28:00 -0300 | [diff] [blame] | 1044 | i = queue - dev->_tx; |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1045 | BUG_ON(i >= dev->num_tx_queues); |
| 1046 | |
| 1047 | return i; |
| 1048 | } |
| 1049 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1050 | static ssize_t traffic_class_show(struct netdev_queue *queue, |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1051 | char *buf) |
| 1052 | { |
| 1053 | struct net_device *dev = queue->dev; |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1054 | int index; |
| 1055 | int tc; |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1056 | |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1057 | if (!netif_is_multiqueue(dev)) |
| 1058 | return -ENOENT; |
| 1059 | |
| 1060 | index = get_netdev_queue_index(queue); |
Alexander Duyck | ffcfe25 | 2018-07-09 12:19:38 -0400 | [diff] [blame] | 1061 | |
| 1062 | /* If queue belongs to subordinate dev use its TC mapping */ |
| 1063 | dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; |
| 1064 | |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1065 | tc = netdev_txq_to_tc(dev, index); |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1066 | if (tc < 0) |
| 1067 | return -EINVAL; |
| 1068 | |
Alexander Duyck | ffcfe25 | 2018-07-09 12:19:38 -0400 | [diff] [blame] | 1069 | /* We can report the traffic class one of two ways: |
| 1070 | * Subordinate device traffic classes are reported with the traffic |
| 1071 | * class first, and then the subordinate class so for example TC0 on |
| 1072 | * subordinate device 2 will be reported as "0-2". If the queue |
| 1073 | * belongs to the root device it will be reported with just the |
| 1074 | * traffic class, so just "0" for TC 0 for example. |
| 1075 | */ |
| 1076 | return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) : |
| 1077 | sprintf(buf, "%u\n", tc); |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1078 | } |
| 1079 | |
| 1080 | #ifdef CONFIG_XPS |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1081 | static ssize_t tx_maxrate_show(struct netdev_queue *queue, |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1082 | char *buf) |
| 1083 | { |
| 1084 | return sprintf(buf, "%lu\n", queue->tx_maxrate); |
| 1085 | } |
| 1086 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1087 | static ssize_t tx_maxrate_store(struct netdev_queue *queue, |
| 1088 | const char *buf, size_t len) |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1089 | { |
| 1090 | struct net_device *dev = queue->dev; |
| 1091 | int err, index = get_netdev_queue_index(queue); |
| 1092 | u32 rate = 0; |
| 1093 | |
Tyler Hicks | 3033fce | 2018-07-20 21:56:51 +0000 | [diff] [blame] | 1094 | if (!capable(CAP_NET_ADMIN)) |
| 1095 | return -EPERM; |
| 1096 | |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1097 | err = kstrtou32(buf, 10, &rate); |
| 1098 | if (err < 0) |
| 1099 | return err; |
| 1100 | |
| 1101 | if (!rtnl_trylock()) |
| 1102 | return restart_syscall(); |
| 1103 | |
| 1104 | err = -EOPNOTSUPP; |
| 1105 | if (dev->netdev_ops->ndo_set_tx_maxrate) |
| 1106 | err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); |
| 1107 | |
| 1108 | rtnl_unlock(); |
| 1109 | if (!err) { |
| 1110 | queue->tx_maxrate = rate; |
| 1111 | return len; |
| 1112 | } |
| 1113 | return err; |
| 1114 | } |
| 1115 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1116 | static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init |
| 1117 | = __ATTR_RW(tx_maxrate); |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1118 | #endif |
| 1119 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1120 | static struct netdev_queue_attribute queue_trans_timeout __ro_after_init |
| 1121 | = __ATTR_RO(tx_timeout); |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1122 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1123 | static struct netdev_queue_attribute queue_traffic_class __ro_after_init |
| 1124 | = __ATTR_RO(traffic_class); |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1125 | |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1126 | #ifdef CONFIG_BQL |
| 1127 | /* |
| 1128 | * Byte queue limits sysfs structures and functions. |
| 1129 | */ |
| 1130 | static ssize_t bql_show(char *buf, unsigned int value) |
| 1131 | { |
| 1132 | return sprintf(buf, "%u\n", value); |
| 1133 | } |
| 1134 | |
| 1135 | static ssize_t bql_set(const char *buf, const size_t count, |
| 1136 | unsigned int *pvalue) |
| 1137 | { |
| 1138 | unsigned int value; |
| 1139 | int err; |
| 1140 | |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1141 | if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1142 | value = DQL_MAX_LIMIT; |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1143 | } else { |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1144 | err = kstrtouint(buf, 10, &value); |
| 1145 | if (err < 0) |
| 1146 | return err; |
| 1147 | if (value > DQL_MAX_LIMIT) |
| 1148 | return -EINVAL; |
| 1149 | } |
| 1150 | |
| 1151 | *pvalue = value; |
| 1152 | |
| 1153 | return count; |
| 1154 | } |
| 1155 | |
| 1156 | static ssize_t bql_show_hold_time(struct netdev_queue *queue, |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1157 | char *buf) |
| 1158 | { |
| 1159 | struct dql *dql = &queue->dql; |
| 1160 | |
| 1161 | return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); |
| 1162 | } |
| 1163 | |
| 1164 | static ssize_t bql_set_hold_time(struct netdev_queue *queue, |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1165 | const char *buf, size_t len) |
| 1166 | { |
| 1167 | struct dql *dql = &queue->dql; |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 1168 | unsigned int value; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1169 | int err; |
| 1170 | |
| 1171 | err = kstrtouint(buf, 10, &value); |
| 1172 | if (err < 0) |
| 1173 | return err; |
| 1174 | |
| 1175 | dql->slack_hold_time = msecs_to_jiffies(value); |
| 1176 | |
| 1177 | return len; |
| 1178 | } |
| 1179 | |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1180 | static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 1181 | = __ATTR(hold_time, 0644, |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1182 | bql_show_hold_time, bql_set_hold_time); |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1183 | |
| 1184 | static ssize_t bql_show_inflight(struct netdev_queue *queue, |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1185 | char *buf) |
| 1186 | { |
| 1187 | struct dql *dql = &queue->dql; |
| 1188 | |
| 1189 | return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); |
| 1190 | } |
| 1191 | |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1192 | static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 1193 | __ATTR(inflight, 0444, bql_show_inflight, NULL); |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1194 | |
| 1195 | #define BQL_ATTR(NAME, FIELD) \ |
| 1196 | static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1197 | char *buf) \ |
| 1198 | { \ |
| 1199 | return bql_show(buf, queue->dql.FIELD); \ |
| 1200 | } \ |
| 1201 | \ |
| 1202 | static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1203 | const char *buf, size_t len) \ |
| 1204 | { \ |
| 1205 | return bql_set(buf, len, &queue->dql.FIELD); \ |
| 1206 | } \ |
| 1207 | \ |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1208 | static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ |
Joe Perches | d644406 | 2018-03-23 15:54:38 -0700 | [diff] [blame] | 1209 | = __ATTR(NAME, 0644, \ |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1210 | bql_show_ ## NAME, bql_set_ ## NAME) |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1211 | |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1212 | BQL_ATTR(limit, limit); |
| 1213 | BQL_ATTR(limit_max, max_limit); |
| 1214 | BQL_ATTR(limit_min, min_limit); |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1215 | |
stephen hemminger | 170c658 | 2017-08-18 13:46:25 -0700 | [diff] [blame] | 1216 | static struct attribute *dql_attrs[] __ro_after_init = { |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1217 | &bql_limit_attribute.attr, |
| 1218 | &bql_limit_max_attribute.attr, |
| 1219 | &bql_limit_min_attribute.attr, |
| 1220 | &bql_hold_time_attribute.attr, |
| 1221 | &bql_inflight_attribute.attr, |
| 1222 | NULL |
| 1223 | }; |
| 1224 | |
Arvind Yadav | 38ef00c | 2017-06-29 16:31:26 +0530 | [diff] [blame] | 1225 | static const struct attribute_group dql_group = { |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1226 | .name = "byte_queue_limits", |
| 1227 | .attrs = dql_attrs, |
| 1228 | }; |
| 1229 | #endif /* CONFIG_BQL */ |
| 1230 | |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1231 | #ifdef CONFIG_XPS |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1232 | static ssize_t xps_cpus_show(struct netdev_queue *queue, |
| 1233 | char *buf) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1234 | { |
| 1235 | struct net_device *dev = queue->dev; |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1236 | int cpu, len, num_tc = 1, tc = 0; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1237 | struct xps_dev_maps *dev_maps; |
| 1238 | cpumask_var_t mask; |
| 1239 | unsigned long index; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1240 | |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1241 | if (!netif_is_multiqueue(dev)) |
| 1242 | return -ENOENT; |
| 1243 | |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1244 | index = get_netdev_queue_index(queue); |
| 1245 | |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1246 | if (dev->num_tc) { |
Alexander Duyck | ffcfe25 | 2018-07-09 12:19:38 -0400 | [diff] [blame] | 1247 | /* Do not allow XPS on subordinate device directly */ |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1248 | num_tc = dev->num_tc; |
Alexander Duyck | ffcfe25 | 2018-07-09 12:19:38 -0400 | [diff] [blame] | 1249 | if (num_tc < 0) |
| 1250 | return -EINVAL; |
| 1251 | |
| 1252 | /* If queue belongs to subordinate dev use its map */ |
| 1253 | dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; |
| 1254 | |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1255 | tc = netdev_txq_to_tc(dev, index); |
| 1256 | if (tc < 0) |
| 1257 | return -EINVAL; |
| 1258 | } |
| 1259 | |
Alexander Duyck | 664088f | 2018-05-31 15:59:46 -0400 | [diff] [blame] | 1260 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) |
| 1261 | return -ENOMEM; |
| 1262 | |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1263 | rcu_read_lock(); |
Amritha Nambiar | 80d1966 | 2018-06-29 21:26:41 -0700 | [diff] [blame] | 1264 | dev_maps = rcu_dereference(dev->xps_cpus_map); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1265 | if (dev_maps) { |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1266 | for_each_possible_cpu(cpu) { |
| 1267 | int i, tci = cpu * num_tc + tc; |
| 1268 | struct xps_map *map; |
| 1269 | |
Amritha Nambiar | 80d1966 | 2018-06-29 21:26:41 -0700 | [diff] [blame] | 1270 | map = rcu_dereference(dev_maps->attr_map[tci]); |
Alexander Duyck | 184c449 | 2016-10-28 11:50:13 -0400 | [diff] [blame] | 1271 | if (!map) |
| 1272 | continue; |
| 1273 | |
| 1274 | for (i = map->len; i--;) { |
| 1275 | if (map->queues[i] == index) { |
| 1276 | cpumask_set_cpu(cpu, mask); |
| 1277 | break; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1278 | } |
| 1279 | } |
| 1280 | } |
| 1281 | } |
| 1282 | rcu_read_unlock(); |
| 1283 | |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 1284 | len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1285 | free_cpumask_var(mask); |
Tejun Heo | f090682 | 2015-02-13 14:37:42 -0800 | [diff] [blame] | 1286 | return len < PAGE_SIZE ? len : -EINVAL; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1287 | } |
| 1288 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1289 | static ssize_t xps_cpus_store(struct netdev_queue *queue, |
| 1290 | const char *buf, size_t len) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1291 | { |
| 1292 | struct net_device *dev = queue->dev; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1293 | unsigned long index; |
Alexander Duyck | 537c00d | 2013-01-10 08:57:02 +0000 | [diff] [blame] | 1294 | cpumask_var_t mask; |
| 1295 | int err; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1296 | |
Alexander Duyck | d7be977 | 2018-07-09 12:19:32 -0400 | [diff] [blame] | 1297 | if (!netif_is_multiqueue(dev)) |
| 1298 | return -ENOENT; |
| 1299 | |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1300 | if (!capable(CAP_NET_ADMIN)) |
| 1301 | return -EPERM; |
| 1302 | |
| 1303 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 1304 | return -ENOMEM; |
| 1305 | |
| 1306 | index = get_netdev_queue_index(queue); |
| 1307 | |
| 1308 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); |
| 1309 | if (err) { |
| 1310 | free_cpumask_var(mask); |
| 1311 | return err; |
| 1312 | } |
| 1313 | |
Alexander Duyck | 537c00d | 2013-01-10 08:57:02 +0000 | [diff] [blame] | 1314 | err = netif_set_xps_queue(dev, mask, index); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1315 | |
| 1316 | free_cpumask_var(mask); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1317 | |
Alexander Duyck | 537c00d | 2013-01-10 08:57:02 +0000 | [diff] [blame] | 1318 | return err ? : len; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1319 | } |
| 1320 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1321 | static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init |
| 1322 | = __ATTR_RW(xps_cpus); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1323 | |
| 1324 | static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) |
| 1325 | { |
| 1326 | struct net_device *dev = queue->dev; |
| 1327 | struct xps_dev_maps *dev_maps; |
| 1328 | unsigned long *mask, index; |
| 1329 | int j, len, num_tc = 1, tc = 0; |
| 1330 | |
| 1331 | index = get_netdev_queue_index(queue); |
| 1332 | |
| 1333 | if (dev->num_tc) { |
| 1334 | num_tc = dev->num_tc; |
| 1335 | tc = netdev_txq_to_tc(dev, index); |
| 1336 | if (tc < 0) |
| 1337 | return -EINVAL; |
| 1338 | } |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1339 | mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1340 | if (!mask) |
| 1341 | return -ENOMEM; |
| 1342 | |
| 1343 | rcu_read_lock(); |
| 1344 | dev_maps = rcu_dereference(dev->xps_rxqs_map); |
| 1345 | if (!dev_maps) |
| 1346 | goto out_no_maps; |
| 1347 | |
| 1348 | for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues), |
| 1349 | j < dev->num_rx_queues;) { |
| 1350 | int i, tci = j * num_tc + tc; |
| 1351 | struct xps_map *map; |
| 1352 | |
| 1353 | map = rcu_dereference(dev_maps->attr_map[tci]); |
| 1354 | if (!map) |
| 1355 | continue; |
| 1356 | |
| 1357 | for (i = map->len; i--;) { |
| 1358 | if (map->queues[i] == index) { |
| 1359 | set_bit(j, mask); |
| 1360 | break; |
| 1361 | } |
| 1362 | } |
| 1363 | } |
| 1364 | out_no_maps: |
| 1365 | rcu_read_unlock(); |
| 1366 | |
| 1367 | len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues); |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1368 | bitmap_free(mask); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1369 | |
| 1370 | return len < PAGE_SIZE ? len : -EINVAL; |
| 1371 | } |
| 1372 | |
| 1373 | static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, |
| 1374 | size_t len) |
| 1375 | { |
| 1376 | struct net_device *dev = queue->dev; |
| 1377 | struct net *net = dev_net(dev); |
| 1378 | unsigned long *mask, index; |
| 1379 | int err; |
| 1380 | |
| 1381 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
| 1382 | return -EPERM; |
| 1383 | |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1384 | mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1385 | if (!mask) |
| 1386 | return -ENOMEM; |
| 1387 | |
| 1388 | index = get_netdev_queue_index(queue); |
| 1389 | |
| 1390 | err = bitmap_parse(buf, len, mask, dev->num_rx_queues); |
| 1391 | if (err) { |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1392 | bitmap_free(mask); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1393 | return err; |
| 1394 | } |
| 1395 | |
Andrei Vagin | 4d99f66 | 2018-08-08 20:07:35 -0700 | [diff] [blame] | 1396 | cpus_read_lock(); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1397 | err = __netif_set_xps_queue(dev, mask, index, true); |
Andrei Vagin | 4d99f66 | 2018-08-08 20:07:35 -0700 | [diff] [blame] | 1398 | cpus_read_unlock(); |
| 1399 | |
Andy Shevchenko | 29ca1c5 | 2019-03-04 11:48:56 +0200 | [diff] [blame] | 1400 | bitmap_free(mask); |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1401 | return err ? : len; |
| 1402 | } |
| 1403 | |
| 1404 | static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init |
| 1405 | = __ATTR_RW(xps_rxqs); |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1406 | #endif /* CONFIG_XPS */ |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1407 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1408 | static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1409 | &queue_trans_timeout.attr, |
Alexander Duyck | 8d059b0 | 2016-10-28 11:43:49 -0400 | [diff] [blame] | 1410 | &queue_traffic_class.attr, |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1411 | #ifdef CONFIG_XPS |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1412 | &xps_cpus_attribute.attr, |
Amritha Nambiar | 8af2c06 | 2018-06-29 21:27:07 -0700 | [diff] [blame] | 1413 | &xps_rxqs_attribute.attr, |
John Fastabend | 822b3b2 | 2015-03-18 14:57:33 +0200 | [diff] [blame] | 1414 | &queue_tx_maxrate.attr, |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1415 | #endif |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1416 | NULL |
| 1417 | }; |
Kimberly Brown | be0d692 | 2019-04-01 22:51:35 -0400 | [diff] [blame] | 1418 | ATTRIBUTE_GROUPS(netdev_queue_default); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1419 | |
| 1420 | static void netdev_queue_release(struct kobject *kobj) |
| 1421 | { |
| 1422 | struct netdev_queue *queue = to_netdev_queue(kobj); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1423 | |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1424 | memset(kobj, 0, sizeof(*kobj)); |
| 1425 | dev_put(queue->dev); |
| 1426 | } |
| 1427 | |
Weilong Chen | 82ef3d5 | 2014-01-16 17:24:31 +0800 | [diff] [blame] | 1428 | static const void *netdev_queue_namespace(struct kobject *kobj) |
| 1429 | { |
| 1430 | struct netdev_queue *queue = to_netdev_queue(kobj); |
| 1431 | struct device *dev = &queue->dev->dev; |
| 1432 | const void *ns = NULL; |
| 1433 | |
| 1434 | if (dev->class && dev->class->ns_type) |
| 1435 | ns = dev->class->namespace(dev); |
| 1436 | |
| 1437 | return ns; |
| 1438 | } |
| 1439 | |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 1440 | static void netdev_queue_get_ownership(struct kobject *kobj, |
| 1441 | kuid_t *uid, kgid_t *gid) |
| 1442 | { |
| 1443 | const struct net *net = netdev_queue_namespace(kobj); |
| 1444 | |
| 1445 | net_ns_get_ownership(net, uid, gid); |
| 1446 | } |
| 1447 | |
stephen hemminger | 2b9c758 | 2017-08-18 13:46:26 -0700 | [diff] [blame] | 1448 | static struct kobj_type netdev_queue_ktype __ro_after_init = { |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1449 | .sysfs_ops = &netdev_queue_sysfs_ops, |
| 1450 | .release = netdev_queue_release, |
Kimberly Brown | be0d692 | 2019-04-01 22:51:35 -0400 | [diff] [blame] | 1451 | .default_groups = netdev_queue_default_groups, |
Weilong Chen | 82ef3d5 | 2014-01-16 17:24:31 +0800 | [diff] [blame] | 1452 | .namespace = netdev_queue_namespace, |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 1453 | .get_ownership = netdev_queue_get_ownership, |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1454 | }; |
| 1455 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1456 | static int netdev_queue_add_kobject(struct net_device *dev, int index) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1457 | { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1458 | struct netdev_queue *queue = dev->_tx + index; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1459 | struct kobject *kobj = &queue->kobj; |
| 1460 | int error = 0; |
| 1461 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1462 | kobj->kset = dev->queues_kset; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1463 | error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1464 | "tx-%u", index); |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1465 | if (error) |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 1466 | goto err; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1467 | |
YueHaibing | a3e23f7 | 2019-03-19 10:16:53 +0800 | [diff] [blame] | 1468 | dev_hold(queue->dev); |
| 1469 | |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1470 | #ifdef CONFIG_BQL |
| 1471 | error = sysfs_create_group(kobj, &dql_group); |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 1472 | if (error) |
| 1473 | goto err; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1474 | #endif |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1475 | |
| 1476 | kobject_uevent(kobj, KOBJ_ADD); |
Eric Dumazet | 48a322b | 2019-11-20 19:19:07 -0800 | [diff] [blame^] | 1477 | return 0; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1478 | |
Jouni Hogander | b8eb718 | 2019-11-20 09:08:16 +0200 | [diff] [blame] | 1479 | err: |
| 1480 | kobject_put(kobj); |
| 1481 | return error; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1482 | } |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1483 | #endif /* CONFIG_SYSFS */ |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1484 | |
| 1485 | int |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1486 | netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1487 | { |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1488 | #ifdef CONFIG_SYSFS |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1489 | int i; |
| 1490 | int error = 0; |
| 1491 | |
| 1492 | for (i = old_num; i < new_num; i++) { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1493 | error = netdev_queue_add_kobject(dev, i); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1494 | if (error) { |
| 1495 | new_num = old_num; |
| 1496 | break; |
| 1497 | } |
| 1498 | } |
| 1499 | |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1500 | while (--i >= new_num) { |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1501 | struct netdev_queue *queue = dev->_tx + i; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1502 | |
Kirill Tkhai | 273c28b | 2018-01-12 18:28:31 +0300 | [diff] [blame] | 1503 | if (!refcount_read(&dev_net(dev)->count)) |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 1504 | queue->kobj.uevent_suppress = 1; |
Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 1505 | #ifdef CONFIG_BQL |
| 1506 | sysfs_remove_group(&queue->kobj, &dql_group); |
| 1507 | #endif |
| 1508 | kobject_put(&queue->kobj); |
| 1509 | } |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1510 | |
| 1511 | return error; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1512 | #else |
| 1513 | return 0; |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1514 | #endif /* CONFIG_SYSFS */ |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1515 | } |
| 1516 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1517 | static int register_queue_kobjects(struct net_device *dev) |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1518 | { |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1519 | int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1520 | |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1521 | #ifdef CONFIG_SYSFS |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1522 | dev->queues_kset = kset_create_and_add("queues", |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1523 | NULL, &dev->dev.kobj); |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1524 | if (!dev->queues_kset) |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1525 | return -ENOMEM; |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1526 | real_rx = dev->real_num_rx_queues; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1527 | #endif |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1528 | real_tx = dev->real_num_tx_queues; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1529 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1530 | error = net_rx_queue_update_kobjects(dev, 0, real_rx); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1531 | if (error) |
| 1532 | goto error; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1533 | rxq = real_rx; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1534 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1535 | error = netdev_queue_update_kobjects(dev, 0, real_tx); |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1536 | if (error) |
| 1537 | goto error; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1538 | txq = real_tx; |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1539 | |
| 1540 | return 0; |
| 1541 | |
| 1542 | error: |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1543 | netdev_queue_update_kobjects(dev, txq, 0); |
| 1544 | net_rx_queue_update_kobjects(dev, rxq, 0); |
YueHaibing | 895a5e9 | 2019-03-02 10:34:55 +0800 | [diff] [blame] | 1545 | #ifdef CONFIG_SYSFS |
| 1546 | kset_unregister(dev->queues_kset); |
| 1547 | #endif |
Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1548 | return error; |
Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1549 | } |
| 1550 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1551 | static void remove_queue_kobjects(struct net_device *dev) |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1552 | { |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1553 | int real_rx = 0, real_tx = 0; |
| 1554 | |
Michael Dalton | a953be5 | 2014-01-16 22:23:28 -0800 | [diff] [blame] | 1555 | #ifdef CONFIG_SYSFS |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1556 | real_rx = dev->real_num_rx_queues; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1557 | #endif |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1558 | real_tx = dev->real_num_tx_queues; |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1559 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1560 | net_rx_queue_update_kobjects(dev, real_rx, 0); |
| 1561 | netdev_queue_update_kobjects(dev, real_tx, 0); |
david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1562 | #ifdef CONFIG_SYSFS |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1563 | kset_unregister(dev->queues_kset); |
Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1564 | #endif |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1565 | } |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1566 | |
Eric W. Biederman | 7dc5dbc | 2013-03-25 20:07:01 -0700 | [diff] [blame] | 1567 | static bool net_current_may_mount(void) |
| 1568 | { |
| 1569 | struct net *net = current->nsproxy->net_ns; |
| 1570 | |
| 1571 | return ns_capable(net->user_ns, CAP_SYS_ADMIN); |
| 1572 | } |
| 1573 | |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1574 | static void *net_grab_current_ns(void) |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1575 | { |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1576 | struct net *ns = current->nsproxy->net_ns; |
| 1577 | #ifdef CONFIG_NET_NS |
| 1578 | if (ns) |
Reshetova, Elena | c122e14 | 2017-06-30 13:08:08 +0300 | [diff] [blame] | 1579 | refcount_inc(&ns->passive); |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1580 | #endif |
| 1581 | return ns; |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1582 | } |
| 1583 | |
| 1584 | static const void *net_initial_ns(void) |
| 1585 | { |
| 1586 | return &init_net; |
| 1587 | } |
| 1588 | |
| 1589 | static const void *net_netlink_ns(struct sock *sk) |
| 1590 | { |
| 1591 | return sock_net(sk); |
| 1592 | } |
| 1593 | |
stephen hemminger | 737aec5 | 2017-08-18 13:46:22 -0700 | [diff] [blame] | 1594 | const struct kobj_ns_type_operations net_ns_type_operations = { |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1595 | .type = KOBJ_NS_TYPE_NET, |
Eric W. Biederman | 7dc5dbc | 2013-03-25 20:07:01 -0700 | [diff] [blame] | 1596 | .current_may_mount = net_current_may_mount, |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1597 | .grab_current_ns = net_grab_current_ns, |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1598 | .netlink_ns = net_netlink_ns, |
| 1599 | .initial_ns = net_initial_ns, |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 1600 | .drop_ns = net_drop_ns, |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1601 | }; |
Johannes Berg | 0460079 | 2010-08-05 17:45:15 +0200 | [diff] [blame] | 1602 | EXPORT_SYMBOL_GPL(net_ns_type_operations); |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1603 | |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1604 | static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1606 | struct net_device *dev = to_net_dev(d); |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1607 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1608 | |
Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 1609 | /* pass interface to uevent. */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1610 | retval = add_uevent_var(env, "INTERFACE=%s", dev->name); |
Eric Rannaud | bf62456 | 2007-03-30 22:23:12 -0700 | [diff] [blame] | 1611 | if (retval) |
| 1612 | goto exit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1613 | |
Jean Tourrilhes | ca2f37d | 2007-03-07 10:49:30 -0800 | [diff] [blame] | 1614 | /* pass ifindex to uevent. |
| 1615 | * ifindex is useful as it won't change (interface name may change) |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1616 | * and is what RtNetlink uses natively. |
| 1617 | */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1618 | retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); |
Jean Tourrilhes | ca2f37d | 2007-03-07 10:49:30 -0800 | [diff] [blame] | 1619 | |
Eric Rannaud | bf62456 | 2007-03-30 22:23:12 -0700 | [diff] [blame] | 1620 | exit: |
Eric Rannaud | bf62456 | 2007-03-30 22:23:12 -0700 | [diff] [blame] | 1621 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | |
| 1624 | /* |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1625 | * netdev_release -- destroy and free a dead device. |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1626 | * Called when last reference to device kobject is gone. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | */ |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1628 | static void netdev_release(struct device *d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | { |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1630 | struct net_device *dev = to_net_dev(d); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | |
| 1632 | BUG_ON(dev->reg_state != NETREG_RELEASED); |
| 1633 | |
Florian Westphal | 6c55700 | 2017-10-02 23:50:05 +0200 | [diff] [blame] | 1634 | /* no need to wait for rcu grace period: |
| 1635 | * device is dead and about to be freed. |
| 1636 | */ |
| 1637 | kfree(rcu_access_pointer(dev->ifalias)); |
Eric Dumazet | 74d332c | 2013-10-30 13:10:44 -0700 | [diff] [blame] | 1638 | netdev_freemem(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1639 | } |
| 1640 | |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1641 | static const void *net_namespace(struct device *d) |
| 1642 | { |
Geliang Tang | 5c29482 | 2015-12-22 23:11:49 +0800 | [diff] [blame] | 1643 | struct net_device *dev = to_net_dev(d); |
| 1644 | |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1645 | return dev_net(dev); |
| 1646 | } |
| 1647 | |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 1648 | static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid) |
| 1649 | { |
| 1650 | struct net_device *dev = to_net_dev(d); |
| 1651 | const struct net *net = dev_net(dev); |
| 1652 | |
| 1653 | net_ns_get_ownership(net, uid, gid); |
| 1654 | } |
| 1655 | |
stephen hemminger | e6d473e | 2017-08-18 13:46:21 -0700 | [diff] [blame] | 1656 | static struct class net_class __ro_after_init = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1657 | .name = "net", |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1658 | .dev_release = netdev_release, |
Greg Kroah-Hartman | 6be8aee | 2013-07-24 15:05:33 -0700 | [diff] [blame] | 1659 | .dev_groups = net_class_groups, |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1660 | .dev_uevent = netdev_uevent, |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1661 | .ns_type = &net_ns_type_operations, |
| 1662 | .namespace = net_namespace, |
Dmitry Torokhov | b0e37c0 | 2018-07-20 21:56:52 +0000 | [diff] [blame] | 1663 | .get_ownership = net_get_ownership, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1664 | }; |
| 1665 | |
Florian Fainelli | aa836df | 2015-03-09 14:31:20 -0700 | [diff] [blame] | 1666 | #ifdef CONFIG_OF_NET |
| 1667 | static int of_dev_node_match(struct device *dev, const void *data) |
| 1668 | { |
| 1669 | int ret = 0; |
| 1670 | |
| 1671 | if (dev->parent) |
| 1672 | ret = dev->parent->of_node == data; |
| 1673 | |
| 1674 | return ret == 0 ? dev->of_node == data : ret; |
| 1675 | } |
| 1676 | |
Russell King | 9861f72 | 2015-09-24 20:36:33 +0100 | [diff] [blame] | 1677 | /* |
| 1678 | * of_find_net_device_by_node - lookup the net device for the device node |
| 1679 | * @np: OF device node |
| 1680 | * |
| 1681 | * Looks up the net_device structure corresponding with the device node. |
| 1682 | * If successful, returns a pointer to the net_device with the embedded |
| 1683 | * struct device refcount incremented by one, or NULL on failure. The |
| 1684 | * refcount must be dropped when done with the net_device. |
| 1685 | */ |
Florian Fainelli | aa836df | 2015-03-09 14:31:20 -0700 | [diff] [blame] | 1686 | struct net_device *of_find_net_device_by_node(struct device_node *np) |
| 1687 | { |
| 1688 | struct device *dev; |
| 1689 | |
| 1690 | dev = class_find_device(&net_class, NULL, np, of_dev_node_match); |
| 1691 | if (!dev) |
| 1692 | return NULL; |
| 1693 | |
| 1694 | return to_net_dev(dev); |
| 1695 | } |
| 1696 | EXPORT_SYMBOL(of_find_net_device_by_node); |
| 1697 | #endif |
| 1698 | |
Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 1699 | /* Delete sysfs entries but hold kobject reference until after all |
| 1700 | * netdev references are gone. |
| 1701 | */ |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1702 | void netdev_unregister_kobject(struct net_device *ndev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1704 | struct device *dev = &ndev->dev; |
Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 1705 | |
Kirill Tkhai | 273c28b | 2018-01-12 18:28:31 +0300 | [diff] [blame] | 1706 | if (!refcount_read(&dev_net(ndev)->count)) |
Andrey Vagin | 002d8a1 | 2016-10-24 19:09:53 -0700 | [diff] [blame] | 1707 | dev_set_uevent_suppress(dev, 1); |
| 1708 | |
Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 1709 | kobject_get(&dev->kobj); |
Eric W. Biederman | 3891845 | 2008-10-27 17:51:47 -0700 | [diff] [blame] | 1710 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1711 | remove_queue_kobjects(ndev); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1712 | |
Ming Lei | 9802c8e | 2013-02-22 16:34:16 -0800 | [diff] [blame] | 1713 | pm_runtime_set_memalloc_noio(dev, false); |
| 1714 | |
Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 1715 | device_del(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1716 | } |
| 1717 | |
| 1718 | /* Create sysfs entries for network device. */ |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1719 | int netdev_register_kobject(struct net_device *ndev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | { |
stephen hemminger | 6648c65 | 2017-08-18 13:46:28 -0700 | [diff] [blame] | 1721 | struct device *dev = &ndev->dev; |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1722 | const struct attribute_group **groups = ndev->sysfs_groups; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1723 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1724 | |
Eric W. Biederman | a1b3f59 | 2010-05-04 17:36:49 -0700 | [diff] [blame] | 1725 | device_initialize(dev); |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1726 | dev->class = &net_class; |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1727 | dev->platform_data = ndev; |
Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1728 | dev->groups = groups; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1729 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1730 | dev_set_name(dev, "%s", ndev->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1731 | |
Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 1732 | #ifdef CONFIG_SYSFS |
Eric W. Biederman | 0c509a6 | 2009-10-29 14:18:21 +0000 | [diff] [blame] | 1733 | /* Allow for a device specific group */ |
| 1734 | if (*groups) |
| 1735 | groups++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1736 | |
Eric W. Biederman | 0c509a6 | 2009-10-29 14:18:21 +0000 | [diff] [blame] | 1737 | *groups++ = &netstat_group; |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 1738 | |
| 1739 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1740 | if (ndev->ieee80211_ptr) |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 1741 | *groups++ = &wireless_group; |
| 1742 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1743 | else if (ndev->wireless_handlers) |
Johannes Berg | 38c1a01 | 2012-11-16 20:46:19 +0100 | [diff] [blame] | 1744 | *groups++ = &wireless_group; |
| 1745 | #endif |
| 1746 | #endif |
Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 1747 | #endif /* CONFIG_SYSFS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1748 | |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1749 | error = device_add(dev); |
| 1750 | if (error) |
Wang Hai | 8ed633b | 2019-04-12 16:36:33 -0400 | [diff] [blame] | 1751 | return error; |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1752 | |
WANG Cong | 6b53daf | 2014-07-23 16:09:10 -0700 | [diff] [blame] | 1753 | error = register_queue_kobjects(ndev); |
Wang Hai | 8ed633b | 2019-04-12 16:36:33 -0400 | [diff] [blame] | 1754 | if (error) { |
| 1755 | device_del(dev); |
| 1756 | return error; |
| 1757 | } |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1758 | |
Ming Lei | 9802c8e | 2013-02-22 16:34:16 -0800 | [diff] [blame] | 1759 | pm_runtime_set_memalloc_noio(dev, true); |
| 1760 | |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1761 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | } |
| 1763 | |
stephen hemminger | b793dc5 | 2017-08-18 13:46:20 -0700 | [diff] [blame] | 1764 | int netdev_class_create_file_ns(const struct class_attribute *class_attr, |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1765 | const void *ns) |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1766 | { |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1767 | return class_create_file_ns(&net_class, class_attr, ns); |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1768 | } |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1769 | EXPORT_SYMBOL(netdev_class_create_file_ns); |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1770 | |
stephen hemminger | b793dc5 | 2017-08-18 13:46:20 -0700 | [diff] [blame] | 1771 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1772 | const void *ns) |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1773 | { |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1774 | class_remove_file_ns(&net_class, class_attr, ns); |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1775 | } |
Tejun Heo | 58292cbe | 2013-09-11 22:29:04 -0400 | [diff] [blame] | 1776 | EXPORT_SYMBOL(netdev_class_remove_file_ns); |
Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 1777 | |
Daniel Borkmann | a48d4bb | 2014-01-06 01:20:11 +0100 | [diff] [blame] | 1778 | int __init netdev_kobject_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1779 | { |
Eric W. Biederman | 608b4b9 | 2010-05-04 17:36:45 -0700 | [diff] [blame] | 1780 | kobj_ns_type_register(&net_ns_type_operations); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1781 | return class_register(&net_class); |
| 1782 | } |