Thomas Gleixner | 1ccea77 | 2019-05-19 15:51:43 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 2 | /* A network driver using virtio. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3 | * |
| 4 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 5 | */ |
| 6 | //#define DEBUG |
| 7 | #include <linux/netdevice.h> |
| 8 | #include <linux/etherdevice.h> |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 9 | #include <linux/ethtool.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 10 | #include <linux/module.h> |
| 11 | #include <linux/virtio.h> |
| 12 | #include <linux/virtio_net.h> |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 13 | #include <linux/bpf.h> |
Daniel Borkmann | a67edbf | 2017-01-25 02:28:18 +0100 | [diff] [blame] | 14 | #include <linux/bpf_trace.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 15 | #include <linux/scatterlist.h> |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 16 | #include <linux/if_vlan.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 18 | #include <linux/cpu.h> |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 19 | #include <linux/average.h> |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 20 | #include <linux/filter.h> |
Caleb Raitto | 2ca653d | 2018-08-09 17:28:40 -0700 | [diff] [blame] | 21 | #include <linux/kernel.h> |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 22 | #include <net/route.h> |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 23 | #include <net/xdp.h> |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 24 | #include <net/net_failover.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 25 | |
Amerigo Wang | d34710e | 2013-05-09 19:50:51 +0000 | [diff] [blame] | 26 | static int napi_weight = NAPI_POLL_WEIGHT; |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 27 | module_param(napi_weight, int, 0444); |
| 28 | |
Willem de Bruijn | 31c03ae | 2019-06-13 12:24:57 -0400 | [diff] [blame] | 29 | static bool csum = true, gso = true, napi_tx = true; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 30 | module_param(csum, bool, 0444); |
| 31 | module_param(gso, bool, 0444); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 32 | module_param(napi_tx, bool, 0644); |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 33 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 34 | /* FIXME: MTU in config. */ |
Michael Dalton | 5061de3 | 2013-11-14 10:41:04 -0800 | [diff] [blame] | 35 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 36 | #define GOOD_COPY_LEN 128 |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 37 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 38 | #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) |
| 39 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 40 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ |
| 41 | #define VIRTIO_XDP_HEADROOM 256 |
| 42 | |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 43 | /* Separating two types of XDP xmit */ |
| 44 | #define VIRTIO_XDP_TX BIT(0) |
| 45 | #define VIRTIO_XDP_REDIR BIT(1) |
| 46 | |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 47 | #define VIRTIO_XDP_FLAG BIT(0) |
| 48 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 49 | /* RX packet size EWMA. The average packet size is used to determine the packet |
| 50 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
| 51 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
| 52 | * term, transient changes in packet size. |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 53 | */ |
Johannes Berg | eb1e011 | 2017-02-15 09:49:26 +0100 | [diff] [blame] | 54 | DECLARE_EWMA(pkt_len, 0, 64) |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 55 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 56 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 57 | |
Colin Ian King | 7acd432 | 2017-08-12 22:45:53 +0100 | [diff] [blame] | 58 | static const unsigned long guest_offloads[] = { |
| 59 | VIRTIO_NET_F_GUEST_TSO4, |
| 60 | VIRTIO_NET_F_GUEST_TSO6, |
| 61 | VIRTIO_NET_F_GUEST_ECN, |
Jason Wang | e59ff2c | 2018-11-22 14:36:30 +0800 | [diff] [blame] | 62 | VIRTIO_NET_F_GUEST_UFO, |
| 63 | VIRTIO_NET_F_GUEST_CSUM |
Colin Ian King | 7acd432 | 2017-08-12 22:45:53 +0100 | [diff] [blame] | 64 | }; |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 65 | |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 66 | struct virtnet_stat_desc { |
| 67 | char desc[ETH_GSTRING_LEN]; |
| 68 | size_t offset; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 69 | }; |
| 70 | |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 71 | struct virtnet_sq_stats { |
| 72 | struct u64_stats_sync syncp; |
| 73 | u64 packets; |
| 74 | u64 bytes; |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 75 | u64 xdp_tx; |
| 76 | u64 xdp_tx_drops; |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 77 | u64 kicks; |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 78 | }; |
| 79 | |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 80 | struct virtnet_rq_stats { |
| 81 | struct u64_stats_sync syncp; |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 82 | u64 packets; |
| 83 | u64 bytes; |
Toshiaki Makita | 2c4a2f7 | 2018-07-23 23:36:06 +0900 | [diff] [blame] | 84 | u64 drops; |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 85 | u64 xdp_packets; |
| 86 | u64 xdp_tx; |
| 87 | u64 xdp_redirects; |
| 88 | u64 xdp_drops; |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 89 | u64 kicks; |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 90 | }; |
| 91 | |
| 92 | #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 93 | #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 94 | |
| 95 | static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 96 | { "packets", VIRTNET_SQ_STAT(packets) }, |
| 97 | { "bytes", VIRTNET_SQ_STAT(bytes) }, |
| 98 | { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, |
| 99 | { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 100 | { "kicks", VIRTNET_SQ_STAT(kicks) }, |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 101 | }; |
| 102 | |
| 103 | static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 104 | { "packets", VIRTNET_RQ_STAT(packets) }, |
| 105 | { "bytes", VIRTNET_RQ_STAT(bytes) }, |
| 106 | { "drops", VIRTNET_RQ_STAT(drops) }, |
| 107 | { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, |
| 108 | { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, |
| 109 | { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, |
| 110 | { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 111 | { "kicks", VIRTNET_RQ_STAT(kicks) }, |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 112 | }; |
| 113 | |
| 114 | #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) |
| 115 | #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) |
| 116 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 117 | /* Internal representation of a send virtqueue */ |
| 118 | struct send_queue { |
| 119 | /* Virtqueue associated with this send _queue */ |
| 120 | struct virtqueue *vq; |
| 121 | |
| 122 | /* TX: fragments + linear part + virtio header */ |
| 123 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 124 | |
| 125 | /* Name of the send queue: output.$index */ |
| 126 | char name[40]; |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 127 | |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 128 | struct virtnet_sq_stats stats; |
| 129 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 130 | struct napi_struct napi; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 131 | }; |
| 132 | |
| 133 | /* Internal representation of a receive virtqueue */ |
| 134 | struct receive_queue { |
| 135 | /* Virtqueue associated with this receive_queue */ |
| 136 | struct virtqueue *vq; |
| 137 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 138 | struct napi_struct napi; |
| 139 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 140 | struct bpf_prog __rcu *xdp_prog; |
| 141 | |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 142 | struct virtnet_rq_stats stats; |
| 143 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 144 | /* Chain pages by the private ptr. */ |
| 145 | struct page *pages; |
| 146 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 147 | /* Average packet length for mergeable receive buffers. */ |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 148 | struct ewma_pkt_len mrg_avg_pkt_len; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 149 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 150 | /* Page frag for packet buffer allocation. */ |
| 151 | struct page_frag alloc_frag; |
| 152 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 153 | /* RX: fragments + linear part + virtio header */ |
| 154 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 155 | |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 156 | /* Min single buffer size for mergeable buffers case. */ |
| 157 | unsigned int min_buf_len; |
| 158 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 159 | /* Name of this receive queue: input.$index */ |
| 160 | char name[40]; |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 161 | |
| 162 | struct xdp_rxq_info xdp_rxq; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 163 | }; |
| 164 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 165 | /* Control VQ buffers: protected by the rtnl lock */ |
| 166 | struct control_buf { |
| 167 | struct virtio_net_ctrl_hdr hdr; |
| 168 | virtio_net_ctrl_ack status; |
| 169 | struct virtio_net_ctrl_mq mq; |
| 170 | u8 promisc; |
| 171 | u8 allmulti; |
Michael S. Tsirkin | d7fad4c | 2018-04-19 08:30:49 +0300 | [diff] [blame] | 172 | __virtio16 vid; |
Michael S. Tsirkin | f4ee703 | 2018-04-19 08:30:50 +0300 | [diff] [blame] | 173 | __virtio64 offloads; |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 174 | }; |
| 175 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 176 | struct virtnet_info { |
| 177 | struct virtio_device *vdev; |
| 178 | struct virtqueue *cvq; |
| 179 | struct net_device *dev; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 180 | struct send_queue *sq; |
| 181 | struct receive_queue *rq; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 182 | unsigned int status; |
| 183 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 184 | /* Max # of queue pairs supported by the device */ |
| 185 | u16 max_queue_pairs; |
| 186 | |
| 187 | /* # of queue pairs currently used by the driver */ |
| 188 | u16 curr_queue_pairs; |
| 189 | |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 190 | /* # of XDP queue pairs currently used by the driver */ |
| 191 | u16 xdp_queue_pairs; |
| 192 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 193 | /* I like... big packets and I cannot lie! */ |
| 194 | bool big_packets; |
| 195 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 196 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
| 197 | bool mergeable_rx_bufs; |
| 198 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 199 | /* Has control virtqueue */ |
| 200 | bool has_cvq; |
| 201 | |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 202 | /* Host can handle any s/g split between our header and packet data */ |
| 203 | bool any_header_sg; |
| 204 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 205 | /* Packet virtio header size */ |
| 206 | u8 hdr_len; |
| 207 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 208 | /* Work struct for refilling if we run low on memory. */ |
| 209 | struct delayed_work refill; |
| 210 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 211 | /* Work struct for config space updates */ |
| 212 | struct work_struct config_work; |
| 213 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 214 | /* Does the affinity hint is set for virtqueues? */ |
| 215 | bool affinity_hint_set; |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 216 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 217 | /* CPU hotplug instances for online & dead */ |
| 218 | struct hlist_node node; |
| 219 | struct hlist_node node_dead; |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 220 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 221 | struct control_buf *ctrl; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 222 | |
| 223 | /* Ethtool settings */ |
| 224 | u8 duplex; |
| 225 | u32 speed; |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 226 | |
| 227 | unsigned long guest_offloads; |
Willem de Bruijn | a02e896 | 2018-12-20 17:14:54 -0500 | [diff] [blame] | 228 | unsigned long guest_offloads_capable; |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 229 | |
| 230 | /* failover when STANDBY feature enabled */ |
| 231 | struct failover *failover; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 232 | }; |
| 233 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 234 | struct padded_vnet_hdr { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 235 | struct virtio_net_hdr_mrg_rxbuf hdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 236 | /* |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 237 | * hdr is in a separate sg buffer, and data sg buffer shares same page |
| 238 | * with this header sg. This padding makes next sg 16 byte aligned |
| 239 | * after the header. |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 240 | */ |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 241 | char padding[4]; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 242 | }; |
| 243 | |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 244 | static bool is_xdp_frame(void *ptr) |
| 245 | { |
| 246 | return (unsigned long)ptr & VIRTIO_XDP_FLAG; |
| 247 | } |
| 248 | |
| 249 | static void *xdp_to_ptr(struct xdp_frame *ptr) |
| 250 | { |
| 251 | return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); |
| 252 | } |
| 253 | |
| 254 | static struct xdp_frame *ptr_to_xdp(void *ptr) |
| 255 | { |
| 256 | return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); |
| 257 | } |
| 258 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 259 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
| 260 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
| 261 | */ |
| 262 | static int vq2txq(struct virtqueue *vq) |
| 263 | { |
Rusty Russell | 9d0ca6e | 2013-03-21 14:17:34 +0000 | [diff] [blame] | 264 | return (vq->index - 1) / 2; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | static int txq2vq(int txq) |
| 268 | { |
| 269 | return txq * 2 + 1; |
| 270 | } |
| 271 | |
| 272 | static int vq2rxq(struct virtqueue *vq) |
| 273 | { |
Rusty Russell | 9d0ca6e | 2013-03-21 14:17:34 +0000 | [diff] [blame] | 274 | return vq->index / 2; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | static int rxq2vq(int rxq) |
| 278 | { |
| 279 | return rxq * 2; |
| 280 | } |
| 281 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 282 | static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 283 | { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 284 | return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 285 | } |
| 286 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 287 | /* |
| 288 | * private is used to chain pages for big packets, put the whole |
| 289 | * most recent used list in the beginning for reuse |
| 290 | */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 291 | static void give_pages(struct receive_queue *rq, struct page *page) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 292 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 293 | struct page *end; |
| 294 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 295 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 296 | for (end = page; end->private; end = (struct page *)end->private); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 297 | end->private = (unsigned long)rq->pages; |
| 298 | rq->pages = page; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 299 | } |
| 300 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 301 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 302 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 303 | struct page *p = rq->pages; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 304 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 305 | if (p) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 306 | rq->pages = (struct page *)p->private; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 307 | /* clear private here, it is used to chain pages */ |
| 308 | p->private = 0; |
| 309 | } else |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 310 | p = alloc_page(gfp_mask); |
| 311 | return p; |
| 312 | } |
| 313 | |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 314 | static void virtqueue_napi_schedule(struct napi_struct *napi, |
| 315 | struct virtqueue *vq) |
| 316 | { |
| 317 | if (napi_schedule_prep(napi)) { |
| 318 | virtqueue_disable_cb(vq); |
| 319 | __napi_schedule(napi); |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | static void virtqueue_napi_complete(struct napi_struct *napi, |
| 324 | struct virtqueue *vq, int processed) |
| 325 | { |
| 326 | int opaque; |
| 327 | |
| 328 | opaque = virtqueue_enable_cb_prepare(vq); |
Toshiaki Makita | fdaa767 | 2017-12-07 13:15:15 +0900 | [diff] [blame] | 329 | if (napi_complete_done(napi, processed)) { |
| 330 | if (unlikely(virtqueue_poll(vq, opaque))) |
| 331 | virtqueue_napi_schedule(napi, vq); |
| 332 | } else { |
| 333 | virtqueue_disable_cb(vq); |
| 334 | } |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 335 | } |
| 336 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 337 | static void skb_xmit_done(struct virtqueue *vq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 338 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 339 | struct virtnet_info *vi = vq->vdev->priv; |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 340 | struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 341 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 342 | /* Suppress further interrupts. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 343 | virtqueue_disable_cb(vq); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 344 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 345 | if (napi->weight) |
| 346 | virtqueue_napi_schedule(napi, vq); |
| 347 | else |
| 348 | /* We were probably waiting for more output buffers. */ |
| 349 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 350 | } |
| 351 | |
Jason Wang | 28b39bc | 2017-07-19 16:54:46 +0800 | [diff] [blame] | 352 | #define MRG_CTX_HEADER_SHIFT 22 |
| 353 | static void *mergeable_len_to_ctx(unsigned int truesize, |
| 354 | unsigned int headroom) |
| 355 | { |
| 356 | return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); |
| 357 | } |
| 358 | |
| 359 | static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) |
| 360 | { |
| 361 | return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; |
| 362 | } |
| 363 | |
| 364 | static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) |
| 365 | { |
| 366 | return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); |
| 367 | } |
| 368 | |
Mike Waychison | 3464645 | 2012-01-04 12:52:32 +0000 | [diff] [blame] | 369 | /* Called from bottom half context */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 370 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
| 371 | struct receive_queue *rq, |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 372 | struct page *page, unsigned int offset, |
Jason Wang | 436c945 | 2018-11-29 13:53:16 +0800 | [diff] [blame] | 373 | unsigned int len, unsigned int truesize, |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 374 | bool hdr_valid, unsigned int metasize) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 375 | { |
| 376 | struct sk_buff *skb; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 377 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 378 | unsigned int copy, hdr_len, hdr_padded_len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 379 | char *p; |
| 380 | |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 381 | p = page_address(page) + offset; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 382 | |
| 383 | /* copy small packet so we can reuse these pages for small data */ |
Paolo Abeni | c67f5db | 2016-03-17 15:44:00 +0100 | [diff] [blame] | 384 | skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 385 | if (unlikely(!skb)) |
| 386 | return NULL; |
| 387 | |
| 388 | hdr = skb_vnet_hdr(skb); |
| 389 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 390 | hdr_len = vi->hdr_len; |
| 391 | if (vi->mergeable_rx_bufs) |
stephen hemminger | a4a7650 | 2017-08-15 10:29:17 -0700 | [diff] [blame] | 392 | hdr_padded_len = sizeof(*hdr); |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 393 | else |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 394 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 395 | |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 396 | /* hdr_valid means no XDP, so we can copy the vnet header */ |
Jason Wang | 436c945 | 2018-11-29 13:53:16 +0800 | [diff] [blame] | 397 | if (hdr_valid) |
| 398 | memcpy(hdr, p, hdr_len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 399 | |
| 400 | len -= hdr_len; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 401 | offset += hdr_padded_len; |
| 402 | p += hdr_padded_len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 403 | |
| 404 | copy = len; |
| 405 | if (copy > skb_tailroom(skb)) |
| 406 | copy = skb_tailroom(skb); |
Johannes Berg | 59ae1d1 | 2017-06-16 14:29:20 +0200 | [diff] [blame] | 407 | skb_put_data(skb, p, copy); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 408 | |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 409 | if (metasize) { |
| 410 | __skb_pull(skb, metasize); |
| 411 | skb_metadata_set(skb, metasize); |
| 412 | } |
| 413 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 414 | len -= copy; |
| 415 | offset += copy; |
| 416 | |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 417 | if (vi->mergeable_rx_bufs) { |
| 418 | if (len) |
| 419 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); |
| 420 | else |
| 421 | put_page(page); |
| 422 | return skb; |
| 423 | } |
| 424 | |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 425 | /* |
| 426 | * Verify that we can indeed put this data into a skb. |
| 427 | * This is here to handle cases when the device erroneously |
| 428 | * tries to receive more than is possible. This is usually |
| 429 | * the case of a broken device. |
| 430 | */ |
| 431 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 432 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 433 | dev_kfree_skb(skb); |
| 434 | return NULL; |
| 435 | } |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 436 | BUG_ON(offset >= PAGE_SIZE); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 437 | while (len) { |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 438 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
| 439 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, |
| 440 | frag_size, truesize); |
| 441 | len -= frag_size; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 442 | page = (struct page *)page->private; |
| 443 | offset = 0; |
| 444 | } |
| 445 | |
| 446 | if (page) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 447 | give_pages(rq, page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 448 | |
| 449 | return skb; |
| 450 | } |
| 451 | |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 452 | static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, |
| 453 | struct send_queue *sq, |
| 454 | struct xdp_frame *xdpf) |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 455 | { |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 456 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 457 | int err; |
| 458 | |
Jesper Dangaard Brouer | cac320c | 2018-04-17 16:45:52 +0200 | [diff] [blame] | 459 | if (unlikely(xdpf->headroom < vi->hdr_len)) |
| 460 | return -EOVERFLOW; |
| 461 | |
| 462 | /* Make room for virtqueue hdr (also change xdpf->headroom?) */ |
| 463 | xdpf->data -= vi->hdr_len; |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 464 | /* Zero header and leave csum up to XDP layers */ |
Jesper Dangaard Brouer | cac320c | 2018-04-17 16:45:52 +0200 | [diff] [blame] | 465 | hdr = xdpf->data; |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 466 | memset(hdr, 0, vi->hdr_len); |
Jesper Dangaard Brouer | cac320c | 2018-04-17 16:45:52 +0200 | [diff] [blame] | 467 | xdpf->len += vi->hdr_len; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 468 | |
Jesper Dangaard Brouer | cac320c | 2018-04-17 16:45:52 +0200 | [diff] [blame] | 469 | sg_init_one(sq->sg, xdpf->data, xdpf->len); |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 470 | |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 471 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), |
| 472 | GFP_ATOMIC); |
Jesper Dangaard Brouer | 11b7d897 | 2018-02-20 14:32:15 +0100 | [diff] [blame] | 473 | if (unlikely(err)) |
Jesper Dangaard Brouer | cac320c | 2018-04-17 16:45:52 +0200 | [diff] [blame] | 474 | return -ENOSPC; /* Caller handle free/refcnt */ |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 475 | |
Jesper Dangaard Brouer | cac320c | 2018-04-17 16:45:52 +0200 | [diff] [blame] | 476 | return 0; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 477 | } |
| 478 | |
Toshiaki Makita | 2a43565 | 2018-07-23 23:36:07 +0900 | [diff] [blame] | 479 | static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) |
| 480 | { |
| 481 | unsigned int qp; |
| 482 | |
| 483 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); |
| 484 | return &vi->sq[qp]; |
| 485 | } |
| 486 | |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 487 | static int virtnet_xdp_xmit(struct net_device *dev, |
Jesper Dangaard Brouer | 42b3346 | 2018-05-31 10:59:47 +0200 | [diff] [blame] | 488 | int n, struct xdp_frame **frames, u32 flags) |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 489 | { |
| 490 | struct virtnet_info *vi = netdev_priv(dev); |
Jesper Dangaard Brouer | 8dcc5b0 | 2018-02-20 14:32:20 +0100 | [diff] [blame] | 491 | struct receive_queue *rq = vi->rq; |
| 492 | struct bpf_prog *xdp_prog; |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 493 | struct send_queue *sq; |
| 494 | unsigned int len; |
Toshiaki Makita | 546f2897 | 2019-01-31 20:40:30 +0900 | [diff] [blame] | 495 | int packets = 0; |
| 496 | int bytes = 0; |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 497 | int drops = 0; |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 498 | int kicks = 0; |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 499 | int ret, err; |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 500 | void *ptr; |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 501 | int i; |
| 502 | |
Jesper Dangaard Brouer | 8dcc5b0 | 2018-02-20 14:32:20 +0100 | [diff] [blame] | 503 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this |
| 504 | * indicate XDP resources have been successfully allocated. |
| 505 | */ |
John Fastabend | 9719c6b | 2020-01-26 16:14:01 -0800 | [diff] [blame] | 506 | xdp_prog = rcu_access_pointer(rq->xdp_prog); |
Toshiaki Makita | 1667c08 | 2019-01-29 09:45:56 +0900 | [diff] [blame] | 507 | if (!xdp_prog) |
| 508 | return -ENXIO; |
| 509 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 510 | sq = virtnet_xdp_sq(vi); |
| 511 | |
| 512 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 513 | ret = -EINVAL; |
| 514 | drops = n; |
| 515 | goto out; |
| 516 | } |
| 517 | |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 518 | /* Free up any pending old buffers before queueing new ones. */ |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 519 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
Toshiaki Makita | 546f2897 | 2019-01-31 20:40:30 +0900 | [diff] [blame] | 520 | if (likely(is_xdp_frame(ptr))) { |
| 521 | struct xdp_frame *frame = ptr_to_xdp(ptr); |
| 522 | |
| 523 | bytes += frame->len; |
| 524 | xdp_return_frame(frame); |
| 525 | } else { |
| 526 | struct sk_buff *skb = ptr; |
| 527 | |
| 528 | bytes += skb->len; |
| 529 | napi_consume_skb(skb, false); |
| 530 | } |
| 531 | packets++; |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 532 | } |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 533 | |
| 534 | for (i = 0; i < n; i++) { |
| 535 | struct xdp_frame *xdpf = frames[i]; |
| 536 | |
| 537 | err = __virtnet_xdp_xmit_one(vi, sq, xdpf); |
| 538 | if (err) { |
| 539 | xdp_return_frame_rx_napi(xdpf); |
| 540 | drops++; |
| 541 | } |
| 542 | } |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 543 | ret = n - drops; |
Jesper Dangaard Brouer | 5d274cb | 2018-05-31 11:00:08 +0200 | [diff] [blame] | 544 | |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 545 | if (flags & XDP_XMIT_FLUSH) { |
| 546 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) |
| 547 | kicks = 1; |
| 548 | } |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 549 | out: |
| 550 | u64_stats_update_begin(&sq->stats.syncp); |
Toshiaki Makita | 546f2897 | 2019-01-31 20:40:30 +0900 | [diff] [blame] | 551 | sq->stats.bytes += bytes; |
| 552 | sq->stats.packets += packets; |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 553 | sq->stats.xdp_tx += n; |
| 554 | sq->stats.xdp_tx_drops += drops; |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 555 | sq->stats.kicks += kicks; |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 556 | u64_stats_update_end(&sq->stats.syncp); |
Jesper Dangaard Brouer | 5d274cb | 2018-05-31 11:00:08 +0200 | [diff] [blame] | 557 | |
Toshiaki Makita | 5b8f3c8 | 2018-07-23 23:36:08 +0900 | [diff] [blame] | 558 | return ret; |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 559 | } |
| 560 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 561 | static unsigned int virtnet_get_headroom(struct virtnet_info *vi) |
| 562 | { |
| 563 | return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; |
| 564 | } |
| 565 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 566 | /* We copy the packet for XDP in the following cases: |
| 567 | * |
| 568 | * 1) Packet is scattered across multiple rx buffers. |
| 569 | * 2) Headroom space is insufficient. |
| 570 | * |
| 571 | * This is inefficient but it's a temporary condition that |
| 572 | * we hit right after XDP is enabled and until queue is refilled |
| 573 | * with large buffers with sufficient headroom - so it should affect |
| 574 | * at most queue size packets. |
| 575 | * Afterwards, the conditions to enable |
| 576 | * XDP should preclude the underlying device from sending packets |
| 577 | * across multiple buffers (num_buf > 1), and we make sure buffers |
| 578 | * have enough headroom. |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 579 | */ |
| 580 | static struct page *xdp_linearize_page(struct receive_queue *rq, |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 581 | u16 *num_buf, |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 582 | struct page *p, |
| 583 | int offset, |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 584 | int page_off, |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 585 | unsigned int *len) |
| 586 | { |
| 587 | struct page *page = alloc_page(GFP_ATOMIC); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 588 | |
| 589 | if (!page) |
| 590 | return NULL; |
| 591 | |
| 592 | memcpy(page_address(page) + page_off, page_address(p) + offset, *len); |
| 593 | page_off += *len; |
| 594 | |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 595 | while (--*num_buf) { |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 596 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 597 | unsigned int buflen; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 598 | void *buf; |
| 599 | int off; |
| 600 | |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 601 | buf = virtqueue_get_buf(rq->vq, &buflen); |
| 602 | if (unlikely(!buf)) |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 603 | goto err_buf; |
| 604 | |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 605 | p = virt_to_head_page(buf); |
| 606 | off = buf - page_address(p); |
| 607 | |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 608 | /* guard against a misconfigured or uncooperative backend that |
| 609 | * is sending packet larger than the MTU. |
| 610 | */ |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 611 | if ((page_off + buflen + tailroom) > PAGE_SIZE) { |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 612 | put_page(p); |
| 613 | goto err_buf; |
| 614 | } |
| 615 | |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 616 | memcpy(page_address(page) + page_off, |
| 617 | page_address(p) + off, buflen); |
| 618 | page_off += buflen; |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 619 | put_page(p); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 620 | } |
| 621 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 622 | /* Headroom does not contribute to packet length */ |
| 623 | *len = page_off - VIRTIO_XDP_HEADROOM; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 624 | return page; |
| 625 | err_buf: |
| 626 | __free_pages(page, 0); |
| 627 | return NULL; |
| 628 | } |
| 629 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 630 | static struct sk_buff *receive_small(struct net_device *dev, |
| 631 | struct virtnet_info *vi, |
| 632 | struct receive_queue *rq, |
| 633 | void *buf, void *ctx, |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 634 | unsigned int len, |
Toshiaki Makita | 7d9d60f | 2018-07-23 23:36:04 +0900 | [diff] [blame] | 635 | unsigned int *xdp_xmit, |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 636 | struct virtnet_rq_stats *stats) |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 637 | { |
| 638 | struct sk_buff *skb; |
| 639 | struct bpf_prog *xdp_prog; |
| 640 | unsigned int xdp_headroom = (unsigned long)ctx; |
| 641 | unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; |
| 642 | unsigned int headroom = vi->hdr_len + header_offset; |
| 643 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + |
| 644 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 645 | struct page *page = virt_to_head_page(buf); |
Jesper Dangaard Brouer | 11b7d897 | 2018-02-20 14:32:15 +0100 | [diff] [blame] | 646 | unsigned int delta = 0; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 647 | struct page *xdp_page; |
Jesper Dangaard Brouer | 11b7d897 | 2018-02-20 14:32:15 +0100 | [diff] [blame] | 648 | int err; |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 649 | unsigned int metasize = 0; |
Jesper Dangaard Brouer | 11b7d897 | 2018-02-20 14:32:15 +0100 | [diff] [blame] | 650 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 651 | len -= vi->hdr_len; |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 652 | stats->bytes += len; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 653 | |
| 654 | rcu_read_lock(); |
| 655 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 656 | if (xdp_prog) { |
| 657 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; |
Jesper Dangaard Brouer | 44fa2db | 2018-04-17 16:46:37 +0200 | [diff] [blame] | 658 | struct xdp_frame *xdpf; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 659 | struct xdp_buff xdp; |
| 660 | void *orig_data; |
| 661 | u32 act; |
| 662 | |
Jesper Dangaard Brouer | 95dbe9e | 2018-02-20 14:32:10 +0100 | [diff] [blame] | 663 | if (unlikely(hdr->hdr.gso_type)) |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 664 | goto err_xdp; |
| 665 | |
| 666 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { |
| 667 | int offset = buf - page_address(page) + header_offset; |
| 668 | unsigned int tlen = len + vi->hdr_len; |
| 669 | u16 num_buf = 1; |
| 670 | |
| 671 | xdp_headroom = virtnet_get_headroom(vi); |
| 672 | header_offset = VIRTNET_RX_PAD + xdp_headroom; |
| 673 | headroom = vi->hdr_len + header_offset; |
| 674 | buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + |
| 675 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 676 | xdp_page = xdp_linearize_page(rq, &num_buf, page, |
| 677 | offset, header_offset, |
| 678 | &tlen); |
| 679 | if (!xdp_page) |
| 680 | goto err_xdp; |
| 681 | |
| 682 | buf = page_address(xdp_page); |
| 683 | put_page(page); |
| 684 | page = xdp_page; |
| 685 | } |
| 686 | |
| 687 | xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; |
| 688 | xdp.data = xdp.data_hard_start + xdp_headroom; |
| 689 | xdp.data_end = xdp.data + len; |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 690 | xdp.data_meta = xdp.data; |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 691 | xdp.rxq = &rq->xdp_rxq; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 692 | orig_data = xdp.data; |
| 693 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 694 | stats->xdp_packets++; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 695 | |
| 696 | switch (act) { |
| 697 | case XDP_PASS: |
| 698 | /* Recalculate length in case bpf program changed it */ |
| 699 | delta = orig_data - xdp.data; |
Nikita V. Shirokov | 6870de4 | 2018-04-17 21:42:20 -0700 | [diff] [blame] | 700 | len = xdp.data_end - xdp.data; |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 701 | metasize = xdp.data - xdp.data_meta; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 702 | break; |
| 703 | case XDP_TX: |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 704 | stats->xdp_tx++; |
Jesper Dangaard Brouer | 44fa2db | 2018-04-17 16:46:37 +0200 | [diff] [blame] | 705 | xdpf = convert_to_xdp_frame(&xdp); |
| 706 | if (unlikely(!xdpf)) |
| 707 | goto err_xdp; |
Jason Wang | ca9e83b | 2018-07-31 17:43:38 +0800 | [diff] [blame] | 708 | err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); |
| 709 | if (unlikely(err < 0)) { |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 710 | trace_xdp_exception(vi->dev, xdp_prog, act); |
Jesper Dangaard Brouer | 11b7d897 | 2018-02-20 14:32:15 +0100 | [diff] [blame] | 711 | goto err_xdp; |
| 712 | } |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 713 | *xdp_xmit |= VIRTIO_XDP_TX; |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 714 | rcu_read_unlock(); |
| 715 | goto xdp_xmit; |
| 716 | case XDP_REDIRECT: |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 717 | stats->xdp_redirects++; |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 718 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
Jesper Dangaard Brouer | 11b7d897 | 2018-02-20 14:32:15 +0100 | [diff] [blame] | 719 | if (err) |
| 720 | goto err_xdp; |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 721 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 722 | rcu_read_unlock(); |
| 723 | goto xdp_xmit; |
| 724 | default: |
| 725 | bpf_warn_invalid_xdp_action(act); |
Gustavo A. R. Silva | b633d44 | 2018-08-04 21:42:05 -0500 | [diff] [blame] | 726 | /* fall through */ |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 727 | case XDP_ABORTED: |
| 728 | trace_xdp_exception(vi->dev, xdp_prog, act); |
| 729 | case XDP_DROP: |
| 730 | goto err_xdp; |
| 731 | } |
| 732 | } |
| 733 | rcu_read_unlock(); |
| 734 | |
| 735 | skb = build_skb(buf, buflen); |
| 736 | if (!skb) { |
| 737 | put_page(page); |
| 738 | goto err; |
| 739 | } |
| 740 | skb_reserve(skb, headroom - delta); |
Nikita V. Shirokov | 6870de4 | 2018-04-17 21:42:20 -0700 | [diff] [blame] | 741 | skb_put(skb, len); |
Yuya Kusakabe | f1d4884 | 2020-02-25 12:32:11 +0900 | [diff] [blame] | 742 | if (!xdp_prog) { |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 743 | buf += header_offset; |
| 744 | memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); |
Yuya Kusakabe | f1d4884 | 2020-02-25 12:32:11 +0900 | [diff] [blame] | 745 | } /* keep zeroed vnet hdr since XDP is loaded */ |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 746 | |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 747 | if (metasize) |
| 748 | skb_metadata_set(skb, metasize); |
| 749 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 750 | err: |
| 751 | return skb; |
| 752 | |
| 753 | err_xdp: |
| 754 | rcu_read_unlock(); |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 755 | stats->xdp_drops++; |
| 756 | stats->drops++; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 757 | put_page(page); |
| 758 | xdp_xmit: |
| 759 | return NULL; |
| 760 | } |
| 761 | |
| 762 | static struct sk_buff *receive_big(struct net_device *dev, |
| 763 | struct virtnet_info *vi, |
| 764 | struct receive_queue *rq, |
| 765 | void *buf, |
Toshiaki Makita | 7d9d60f | 2018-07-23 23:36:04 +0900 | [diff] [blame] | 766 | unsigned int len, |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 767 | struct virtnet_rq_stats *stats) |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 768 | { |
| 769 | struct page *page = buf; |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 770 | struct sk_buff *skb = |
| 771 | page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0); |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 772 | |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 773 | stats->bytes += len - vi->hdr_len; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 774 | if (unlikely(!skb)) |
| 775 | goto err; |
| 776 | |
| 777 | return skb; |
| 778 | |
| 779 | err: |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 780 | stats->drops++; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 781 | give_pages(rq, page); |
| 782 | return NULL; |
| 783 | } |
| 784 | |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 785 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 786 | struct virtnet_info *vi, |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 787 | struct receive_queue *rq, |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 788 | void *buf, |
| 789 | void *ctx, |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 790 | unsigned int len, |
Toshiaki Makita | 7d9d60f | 2018-07-23 23:36:04 +0900 | [diff] [blame] | 791 | unsigned int *xdp_xmit, |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 792 | struct virtnet_rq_stats *stats) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 793 | { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 794 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
| 795 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 796 | struct page *page = virt_to_head_page(buf); |
| 797 | int offset = buf - page_address(page); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 798 | struct sk_buff *head_skb, *curr_skb; |
| 799 | struct bpf_prog *xdp_prog; |
| 800 | unsigned int truesize; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 801 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 802 | int err; |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 803 | unsigned int metasize = 0; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 804 | |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 805 | head_skb = NULL; |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 806 | stats->bytes += len - vi->hdr_len; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 807 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 808 | rcu_read_lock(); |
| 809 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 810 | if (xdp_prog) { |
Jesper Dangaard Brouer | 44fa2db | 2018-04-17 16:46:37 +0200 | [diff] [blame] | 811 | struct xdp_frame *xdpf; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 812 | struct page *xdp_page; |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 813 | struct xdp_buff xdp; |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 814 | void *data; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 815 | u32 act; |
| 816 | |
Jason Wang | 3d62b2a | 2018-05-22 11:44:31 +0800 | [diff] [blame] | 817 | /* Transient failure which in theory could occur if |
| 818 | * in-flight packets from before XDP was enabled reach |
| 819 | * the receive path after XDP is loaded. |
| 820 | */ |
| 821 | if (unlikely(hdr->hdr.gso_type)) |
| 822 | goto err_xdp; |
| 823 | |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 824 | /* This happens when rx buffer size is underestimated |
| 825 | * or headroom is not enough because of the buffer |
| 826 | * was refilled before XDP is set. This should only |
| 827 | * happen for the first several packets, so we don't |
| 828 | * care much about its performance. |
| 829 | */ |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 830 | if (unlikely(num_buf > 1 || |
| 831 | headroom < virtnet_get_headroom(vi))) { |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 832 | /* linearize data for XDP */ |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 833 | xdp_page = xdp_linearize_page(rq, &num_buf, |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 834 | page, offset, |
| 835 | VIRTIO_XDP_HEADROOM, |
| 836 | &len); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 837 | if (!xdp_page) |
| 838 | goto err_xdp; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 839 | offset = VIRTIO_XDP_HEADROOM; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 840 | } else { |
| 841 | xdp_page = page; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 842 | } |
| 843 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 844 | /* Allow consuming headroom but reserve enough space to push |
| 845 | * the descriptor on if we get an XDP_TX return code. |
| 846 | */ |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 847 | data = page_address(xdp_page) + offset; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 848 | xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 849 | xdp.data = data + vi->hdr_len; |
| 850 | xdp.data_end = xdp.data + (len - vi->hdr_len); |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 851 | xdp.data_meta = xdp.data; |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 852 | xdp.rxq = &rq->xdp_rxq; |
| 853 | |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 854 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 855 | stats->xdp_packets++; |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 856 | |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 857 | switch (act) { |
| 858 | case XDP_PASS: |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 859 | metasize = xdp.data - xdp.data_meta; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 860 | |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 861 | /* recalculate offset to account for any header |
| 862 | * adjustments and minus the metasize to copy the |
| 863 | * metadata in page_to_skb(). Note other cases do not |
| 864 | * build an skb and avoid using offset |
Nikita V. Shirokov | 6870de4 | 2018-04-17 21:42:20 -0700 | [diff] [blame] | 865 | */ |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 866 | offset = xdp.data - page_address(xdp_page) - |
| 867 | vi->hdr_len - metasize; |
| 868 | |
| 869 | /* recalculate len if xdp.data, xdp.data_end or |
| 870 | * xdp.data_meta were adjusted |
| 871 | */ |
| 872 | len = xdp.data_end - xdp.data + vi->hdr_len + metasize; |
Jason Wang | 1830f89 | 2016-12-23 22:37:27 +0800 | [diff] [blame] | 873 | /* We can only create skb based on xdp_page. */ |
| 874 | if (unlikely(xdp_page != page)) { |
| 875 | rcu_read_unlock(); |
| 876 | put_page(page); |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 877 | head_skb = page_to_skb(vi, rq, xdp_page, offset, |
| 878 | len, PAGE_SIZE, false, |
| 879 | metasize); |
Jason Wang | 1830f89 | 2016-12-23 22:37:27 +0800 | [diff] [blame] | 880 | return head_skb; |
| 881 | } |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 882 | break; |
| 883 | case XDP_TX: |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 884 | stats->xdp_tx++; |
Jesper Dangaard Brouer | 44fa2db | 2018-04-17 16:46:37 +0200 | [diff] [blame] | 885 | xdpf = convert_to_xdp_frame(&xdp); |
| 886 | if (unlikely(!xdpf)) |
| 887 | goto err_xdp; |
Jason Wang | ca9e83b | 2018-07-31 17:43:38 +0800 | [diff] [blame] | 888 | err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); |
| 889 | if (unlikely(err < 0)) { |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 890 | trace_xdp_exception(vi->dev, xdp_prog, act); |
Jesper Dangaard Brouer | 11b7d897 | 2018-02-20 14:32:15 +0100 | [diff] [blame] | 891 | if (unlikely(xdp_page != page)) |
| 892 | put_page(xdp_page); |
| 893 | goto err_xdp; |
| 894 | } |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 895 | *xdp_xmit |= VIRTIO_XDP_TX; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 896 | if (unlikely(xdp_page != page)) |
Jason Wang | 5d458a1 | 2018-05-22 11:44:29 +0800 | [diff] [blame] | 897 | put_page(page); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 898 | rcu_read_unlock(); |
| 899 | goto xdp_xmit; |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 900 | case XDP_REDIRECT: |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 901 | stats->xdp_redirects++; |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 902 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
| 903 | if (err) { |
| 904 | if (unlikely(xdp_page != page)) |
| 905 | put_page(xdp_page); |
| 906 | goto err_xdp; |
| 907 | } |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 908 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 909 | if (unlikely(xdp_page != page)) |
Jason Wang | 6890418 | 2018-05-22 11:44:28 +0800 | [diff] [blame] | 910 | put_page(page); |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 911 | rcu_read_unlock(); |
| 912 | goto xdp_xmit; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 913 | default: |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 914 | bpf_warn_invalid_xdp_action(act); |
Gustavo A. R. Silva | b633d44 | 2018-08-04 21:42:05 -0500 | [diff] [blame] | 915 | /* fall through */ |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 916 | case XDP_ABORTED: |
| 917 | trace_xdp_exception(vi->dev, xdp_prog, act); |
Gustavo A. R. Silva | b633d44 | 2018-08-04 21:42:05 -0500 | [diff] [blame] | 918 | /* fall through */ |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 919 | case XDP_DROP: |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 920 | if (unlikely(xdp_page != page)) |
| 921 | __free_pages(xdp_page, 0); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 922 | goto err_xdp; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 923 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 924 | } |
| 925 | rcu_read_unlock(); |
| 926 | |
Jason Wang | 28b39bc | 2017-07-19 16:54:46 +0800 | [diff] [blame] | 927 | truesize = mergeable_ctx_to_truesize(ctx); |
| 928 | if (unlikely(len > truesize)) { |
Dan Carpenter | 56da5fd | 2017-04-06 12:04:47 +0300 | [diff] [blame] | 929 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 930 | dev->name, len, (unsigned long)ctx); |
| 931 | dev->stats.rx_length_errors++; |
| 932 | goto err_skb; |
| 933 | } |
Jason Wang | 28b39bc | 2017-07-19 16:54:46 +0800 | [diff] [blame] | 934 | |
Yuya Kusakabe | 503d539 | 2020-02-25 12:32:12 +0900 | [diff] [blame] | 935 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, |
| 936 | metasize); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 937 | curr_skb = head_skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 938 | |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 939 | if (unlikely(!curr_skb)) |
| 940 | goto err_skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 941 | while (--num_buf) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 942 | int num_skb_frags; |
| 943 | |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 944 | buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); |
Yunjian Wang | 03e9f8a | 2017-12-04 14:02:19 +0800 | [diff] [blame] | 945 | if (unlikely(!buf)) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 946 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 947 | dev->name, num_buf, |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 948 | virtio16_to_cpu(vi->vdev, |
| 949 | hdr->num_buffers)); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 950 | dev->stats.rx_length_errors++; |
| 951 | goto err_buf; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 952 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 953 | |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 954 | stats->bytes += len; |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 955 | page = virt_to_head_page(buf); |
Jason Wang | 28b39bc | 2017-07-19 16:54:46 +0800 | [diff] [blame] | 956 | |
| 957 | truesize = mergeable_ctx_to_truesize(ctx); |
| 958 | if (unlikely(len > truesize)) { |
Dan Carpenter | 56da5fd | 2017-04-06 12:04:47 +0300 | [diff] [blame] | 959 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 960 | dev->name, len, (unsigned long)ctx); |
| 961 | dev->stats.rx_length_errors++; |
| 962 | goto err_skb; |
| 963 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 964 | |
| 965 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 966 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
| 967 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 968 | |
| 969 | if (unlikely(!nskb)) |
| 970 | goto err_skb; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 971 | if (curr_skb == head_skb) |
| 972 | skb_shinfo(curr_skb)->frag_list = nskb; |
| 973 | else |
| 974 | curr_skb->next = nskb; |
| 975 | curr_skb = nskb; |
| 976 | head_skb->truesize += nskb->truesize; |
| 977 | num_skb_frags = 0; |
| 978 | } |
| 979 | if (curr_skb != head_skb) { |
| 980 | head_skb->data_len += len; |
| 981 | head_skb->len += len; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 982 | head_skb->truesize += truesize; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 983 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 984 | offset = buf - page_address(page); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 985 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
| 986 | put_page(page); |
| 987 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 988 | len, truesize); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 989 | } else { |
| 990 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 991 | offset, len, truesize); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 992 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 993 | } |
| 994 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 995 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 996 | return head_skb; |
| 997 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 998 | err_xdp: |
| 999 | rcu_read_unlock(); |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1000 | stats->xdp_drops++; |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 1001 | err_skb: |
| 1002 | put_page(page); |
Jason Wang | 850e088 | 2018-05-22 11:44:30 +0800 | [diff] [blame] | 1003 | while (num_buf-- > 1) { |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1004 | buf = virtqueue_get_buf(rq->vq, &len); |
| 1005 | if (unlikely(!buf)) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 1006 | pr_debug("%s: rx error: %d buffers missing\n", |
| 1007 | dev->name, num_buf); |
| 1008 | dev->stats.rx_length_errors++; |
| 1009 | break; |
| 1010 | } |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1011 | stats->bytes += len; |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1012 | page = virt_to_head_page(buf); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 1013 | put_page(page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1014 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 1015 | err_buf: |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1016 | stats->drops++; |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 1017 | dev_kfree_skb(head_skb); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 1018 | xdp_xmit: |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 1019 | return NULL; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1020 | } |
| 1021 | |
Toshiaki Makita | 7d9d60f | 2018-07-23 23:36:04 +0900 | [diff] [blame] | 1022 | static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
| 1023 | void *buf, unsigned int len, void **ctx, |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1024 | unsigned int *xdp_xmit, |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1025 | struct virtnet_rq_stats *stats) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1026 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1027 | struct net_device *dev = vi->dev; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1028 | struct sk_buff *skb; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1029 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1030 | |
Michael S. Tsirkin | bcff316 | 2014-10-24 00:22:11 +0300 | [diff] [blame] | 1031 | if (unlikely(len < vi->hdr_len + ETH_HLEN)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1032 | pr_debug("%s: short packet %i\n", dev->name, len); |
| 1033 | dev->stats.rx_length_errors++; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1034 | if (vi->mergeable_rx_bufs) { |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1035 | put_page(virt_to_head_page(buf)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1036 | } else if (vi->big_packets) { |
Michael Dalton | 98bfd23 | 2013-12-05 13:14:05 -0800 | [diff] [blame] | 1037 | give_pages(rq, buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1038 | } else { |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 1039 | put_page(virt_to_head_page(buf)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1040 | } |
Toshiaki Makita | 7d9d60f | 2018-07-23 23:36:04 +0900 | [diff] [blame] | 1041 | return; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1042 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1043 | |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 1044 | if (vi->mergeable_rx_bufs) |
Toshiaki Makita | 7d9d60f | 2018-07-23 23:36:04 +0900 | [diff] [blame] | 1045 | skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1046 | stats); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 1047 | else if (vi->big_packets) |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1048 | skb = receive_big(dev, vi, rq, buf, len, stats); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 1049 | else |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1050 | skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 1051 | |
| 1052 | if (unlikely(!skb)) |
Toshiaki Makita | 7d9d60f | 2018-07-23 23:36:04 +0900 | [diff] [blame] | 1053 | return; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1054 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1055 | hdr = skb_vnet_hdr(skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1056 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 1057 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 1058 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1059 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 1060 | if (virtio_net_hdr_to_skb(skb, &hdr->hdr, |
| 1061 | virtio_is_little_endian(vi->vdev))) { |
| 1062 | net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", |
| 1063 | dev->name, hdr->hdr.gso_type, |
| 1064 | hdr->hdr.gso_size); |
| 1065 | goto frame_err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1066 | } |
| 1067 | |
Willem de Bruijn | 133bbb1 | 2019-01-17 20:08:53 -0500 | [diff] [blame] | 1068 | skb_record_rx_queue(skb, vq2rxq(rq->vq)); |
Mike Rapoport | d1dc06d | 2016-06-14 08:29:38 +0300 | [diff] [blame] | 1069 | skb->protocol = eth_type_trans(skb, dev); |
| 1070 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
| 1071 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
| 1072 | |
Eric Dumazet | 0fbd050 | 2015-07-31 18:25:17 +0200 | [diff] [blame] | 1073 | napi_gro_receive(&rq->napi, skb); |
Toshiaki Makita | 7d9d60f | 2018-07-23 23:36:04 +0900 | [diff] [blame] | 1074 | return; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1075 | |
| 1076 | frame_err: |
| 1077 | dev->stats.rx_frame_errors++; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1078 | dev_kfree_skb(skb); |
| 1079 | } |
| 1080 | |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 1081 | /* Unlike mergeable buffers, all buffers are allocated to the |
| 1082 | * same size, except for the headroom. For this reason we do |
| 1083 | * not need to use mergeable_len_to_ctx here - it is enough |
| 1084 | * to store the headroom as the context ignoring the truesize. |
| 1085 | */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1086 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
| 1087 | gfp_t gfp) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1088 | { |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 1089 | struct page_frag *alloc_frag = &rq->alloc_frag; |
| 1090 | char *buf; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1091 | unsigned int xdp_headroom = virtnet_get_headroom(vi); |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 1092 | void *ctx = (void *)(unsigned long)xdp_headroom; |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 1093 | int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1094 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1095 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 1096 | len = SKB_DATA_ALIGN(len) + |
| 1097 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 1098 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1099 | return -ENOMEM; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1100 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 1101 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
| 1102 | get_page(alloc_frag->page); |
| 1103 | alloc_frag->offset += len; |
| 1104 | sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, |
| 1105 | vi->hdr_len + GOOD_PACKET_LEN); |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 1106 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1107 | if (err < 0) |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 1108 | put_page(virt_to_head_page(buf)); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1109 | return err; |
| 1110 | } |
| 1111 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1112 | static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, |
| 1113 | gfp_t gfp) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1114 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1115 | struct page *first, *list = NULL; |
| 1116 | char *p; |
| 1117 | int i, err, offset; |
| 1118 | |
Rusty Russell | a583544 | 2014-09-11 10:17:36 +0930 | [diff] [blame] | 1119 | sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); |
| 1120 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1121 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1122 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1123 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1124 | if (!first) { |
| 1125 | if (list) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1126 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1127 | return -ENOMEM; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1128 | } |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1129 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1130 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1131 | /* chain new page in list head to match sg */ |
| 1132 | first->private = (unsigned long)list; |
| 1133 | list = first; |
| 1134 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1135 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1136 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1137 | if (!first) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1138 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1139 | return -ENOMEM; |
| 1140 | } |
| 1141 | p = page_address(first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1142 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1143 | /* rq->sg[0], rq->sg[1] share the same page */ |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1144 | /* a separated rq->sg[0] for header - required in case !any_header_sg */ |
| 1145 | sg_set_buf(&rq->sg[0], p, vi->hdr_len); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1146 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1147 | /* rq->sg[1] for data packet, from offset */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1148 | offset = sizeof(struct padded_vnet_hdr); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1149 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1150 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1151 | /* chain first in list head */ |
| 1152 | first->private = (unsigned long)list; |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1153 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
| 1154 | first, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1155 | if (err < 0) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1156 | give_pages(rq, first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1157 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1158 | return err; |
| 1159 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1160 | |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 1161 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 1162 | struct ewma_pkt_len *avg_pkt_len, |
| 1163 | unsigned int room) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1164 | { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1165 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 1166 | unsigned int len; |
| 1167 | |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 1168 | if (room) |
| 1169 | return PAGE_SIZE - room; |
| 1170 | |
| 1171 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), |
Michael S. Tsirkin | f0c3192 | 2017-06-02 17:54:33 +0300 | [diff] [blame] | 1172 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 1173 | |
Michael S. Tsirkin | e377fcc | 2017-03-06 22:21:35 +0200 | [diff] [blame] | 1174 | return ALIGN(len, L1_CACHE_BYTES); |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 1175 | } |
| 1176 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1177 | static int add_recvbuf_mergeable(struct virtnet_info *vi, |
| 1178 | struct receive_queue *rq, gfp_t gfp) |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 1179 | { |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1180 | struct page_frag *alloc_frag = &rq->alloc_frag; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1181 | unsigned int headroom = virtnet_get_headroom(vi); |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 1182 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; |
| 1183 | unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1184 | char *buf; |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1185 | void *ctx; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1186 | int err; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1187 | unsigned int len, hole; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1188 | |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 1189 | /* Extra tailroom is needed to satisfy XDP's assumption. This |
| 1190 | * means rx frags coalescing won't work, but consider we've |
| 1191 | * disabled GSO for XDP, it won't be a big issue. |
| 1192 | */ |
| 1193 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); |
| 1194 | if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1195 | return -ENOMEM; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1196 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1197 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1198 | buf += headroom; /* advance address leaving hole at front of pkt */ |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1199 | get_page(alloc_frag->page); |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 1200 | alloc_frag->offset += len + room; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1201 | hole = alloc_frag->size - alloc_frag->offset; |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 1202 | if (hole < len + room) { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1203 | /* To avoid internal fragmentation, if there is very likely not |
| 1204 | * enough space for another buffer, add the remaining space to |
Michael S. Tsirkin | 1daa879 | 2017-07-31 21:49:49 +0300 | [diff] [blame] | 1205 | * the current buffer. |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1206 | */ |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1207 | len += hole; |
| 1208 | alloc_frag->offset += hole; |
| 1209 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1210 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1211 | sg_init_one(rq->sg, buf, len); |
David S. Miller | 29fda25 | 2017-08-01 10:07:50 -0700 | [diff] [blame] | 1212 | ctx = mergeable_len_to_ctx(len, headroom); |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1213 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1214 | if (err < 0) |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 1215 | put_page(virt_to_head_page(buf)); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1216 | |
| 1217 | return err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1218 | } |
| 1219 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 1220 | /* |
| 1221 | * Returns false if we couldn't fill entirely (OOM). |
| 1222 | * |
| 1223 | * Normally run in the receive path, but can also be run from ndo_open |
| 1224 | * before we're receiving packets, or from refill_work which is |
| 1225 | * careful to disable receiving (using napi_disable). |
| 1226 | */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1227 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
| 1228 | gfp_t gfp) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1229 | { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1230 | int err; |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 1231 | bool oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1232 | |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 1233 | do { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1234 | if (vi->mergeable_rx_bufs) |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1235 | err = add_recvbuf_mergeable(vi, rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1236 | else if (vi->big_packets) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1237 | err = add_recvbuf_big(vi, rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1238 | else |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1239 | err = add_recvbuf_small(vi, rq, gfp); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1240 | |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 1241 | oom = err == -ENOMEM; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1242 | if (err) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1243 | break; |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1244 | } while (rq->vq->num_free); |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 1245 | if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { |
| 1246 | u64_stats_update_begin(&rq->stats.syncp); |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1247 | rq->stats.kicks++; |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 1248 | u64_stats_update_end(&rq->stats.syncp); |
| 1249 | } |
| 1250 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1251 | return !oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1252 | } |
| 1253 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 1254 | static void skb_recv_done(struct virtqueue *rvq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1255 | { |
| 1256 | struct virtnet_info *vi = rvq->vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1257 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1258 | |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1259 | virtqueue_napi_schedule(&rq->napi, rvq); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1260 | } |
| 1261 | |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1262 | static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 1263 | { |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1264 | napi_enable(napi); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 1265 | |
| 1266 | /* If all buffers were filled by other side before we napi_enabled, we |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1267 | * won't get another interrupt, so process any outstanding packets now. |
| 1268 | * Call local_bh_enable after to trigger softIRQ processing. |
| 1269 | */ |
| 1270 | local_bh_disable(); |
| 1271 | virtqueue_napi_schedule(napi, vq); |
| 1272 | local_bh_enable(); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 1273 | } |
| 1274 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1275 | static void virtnet_napi_tx_enable(struct virtnet_info *vi, |
| 1276 | struct virtqueue *vq, |
| 1277 | struct napi_struct *napi) |
| 1278 | { |
| 1279 | if (!napi->weight) |
| 1280 | return; |
| 1281 | |
| 1282 | /* Tx napi touches cachelines on the cpu handling tx interrupts. Only |
| 1283 | * enable the feature if this is likely affine with the transmit path. |
| 1284 | */ |
| 1285 | if (!vi->affinity_hint_set) { |
| 1286 | napi->weight = 0; |
| 1287 | return; |
| 1288 | } |
| 1289 | |
| 1290 | return virtnet_napi_enable(vq, napi); |
| 1291 | } |
| 1292 | |
Willem de Bruijn | 78a57b4 | 2017-04-25 15:59:17 -0400 | [diff] [blame] | 1293 | static void virtnet_napi_tx_disable(struct napi_struct *napi) |
| 1294 | { |
| 1295 | if (napi->weight) |
| 1296 | napi_disable(napi); |
| 1297 | } |
| 1298 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1299 | static void refill_work(struct work_struct *work) |
| 1300 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1301 | struct virtnet_info *vi = |
| 1302 | container_of(work, struct virtnet_info, refill.work); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1303 | bool still_empty; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1304 | int i; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1305 | |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1306 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1307 | struct receive_queue *rq = &vi->rq[i]; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1308 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1309 | napi_disable(&rq->napi); |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1310 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1311 | virtnet_napi_enable(rq->vq, &rq->napi); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1312 | |
| 1313 | /* In theory, this can happen: if we don't get any buffers in |
| 1314 | * we will *never* try to fill again. |
| 1315 | */ |
| 1316 | if (still_empty) |
| 1317 | schedule_delayed_work(&vi->refill, HZ/2); |
| 1318 | } |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1319 | } |
| 1320 | |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 1321 | static int virtnet_receive(struct receive_queue *rq, int budget, |
| 1322 | unsigned int *xdp_xmit) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1323 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1324 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1325 | struct virtnet_rq_stats stats = {}; |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1326 | unsigned int len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1327 | void *buf; |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1328 | int i; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1329 | |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 1330 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1331 | void *ctx; |
| 1332 | |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1333 | while (stats.packets < budget && |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1334 | (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1335 | receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1336 | stats.packets++; |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1337 | } |
| 1338 | } else { |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1339 | while (stats.packets < budget && |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1340 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1341 | receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1342 | stats.packets++; |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1343 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1344 | } |
| 1345 | |
? jiang | 718be6b | 2019-08-20 02:51:23 +0000 | [diff] [blame] | 1346 | if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1347 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1348 | schedule_delayed_work(&vi->refill, 0); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1349 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1350 | |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1351 | u64_stats_update_begin(&rq->stats.syncp); |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1352 | for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { |
| 1353 | size_t offset = virtnet_rq_stats_desc[i].offset; |
| 1354 | u64 *item; |
| 1355 | |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1356 | item = (u64 *)((u8 *)&rq->stats + offset); |
| 1357 | *item += *(u64 *)((u8 *)&stats + offset); |
Toshiaki Makita | a0929a4 | 2018-07-23 23:36:05 +0900 | [diff] [blame] | 1358 | } |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1359 | u64_stats_update_end(&rq->stats.syncp); |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 1360 | |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1361 | return stats.packets; |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1362 | } |
| 1363 | |
Michael S. Tsirkin | df133f3 | 2019-01-17 23:20:07 -0500 | [diff] [blame] | 1364 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1365 | { |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1366 | unsigned int len; |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1367 | unsigned int packets = 0; |
| 1368 | unsigned int bytes = 0; |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 1369 | void *ptr; |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1370 | |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 1371 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
| 1372 | if (likely(!is_xdp_frame(ptr))) { |
| 1373 | struct sk_buff *skb = ptr; |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1374 | |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 1375 | pr_debug("Sent skb %p\n", skb); |
| 1376 | |
| 1377 | bytes += skb->len; |
| 1378 | napi_consume_skb(skb, in_napi); |
| 1379 | } else { |
| 1380 | struct xdp_frame *frame = ptr_to_xdp(ptr); |
| 1381 | |
| 1382 | bytes += frame->len; |
| 1383 | xdp_return_frame(frame); |
| 1384 | } |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1385 | packets++; |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1386 | } |
| 1387 | |
| 1388 | /* Avoid overhead when no packets have been processed |
| 1389 | * happens when called speculatively from start_xmit. |
| 1390 | */ |
| 1391 | if (!packets) |
| 1392 | return; |
| 1393 | |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1394 | u64_stats_update_begin(&sq->stats.syncp); |
| 1395 | sq->stats.bytes += bytes; |
| 1396 | sq->stats.packets += packets; |
| 1397 | u64_stats_update_end(&sq->stats.syncp); |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1398 | } |
| 1399 | |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 1400 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) |
| 1401 | { |
| 1402 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) |
| 1403 | return false; |
| 1404 | else if (q < vi->curr_queue_pairs) |
| 1405 | return true; |
| 1406 | else |
| 1407 | return false; |
| 1408 | } |
| 1409 | |
Willem de Bruijn | 7b0411e | 2017-04-24 13:49:29 -0400 | [diff] [blame] | 1410 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
| 1411 | { |
| 1412 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 1413 | unsigned int index = vq2rxq(rq->vq); |
| 1414 | struct send_queue *sq = &vi->sq[index]; |
| 1415 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); |
| 1416 | |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 1417 | if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) |
Willem de Bruijn | 7b0411e | 2017-04-24 13:49:29 -0400 | [diff] [blame] | 1418 | return; |
| 1419 | |
| 1420 | if (__netif_tx_trylock(txq)) { |
Michael S. Tsirkin | df133f3 | 2019-01-17 23:20:07 -0500 | [diff] [blame] | 1421 | free_old_xmit_skbs(sq, true); |
Willem de Bruijn | 7b0411e | 2017-04-24 13:49:29 -0400 | [diff] [blame] | 1422 | __netif_tx_unlock(txq); |
| 1423 | } |
| 1424 | |
| 1425 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) |
| 1426 | netif_tx_wake_queue(txq); |
| 1427 | } |
| 1428 | |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1429 | static int virtnet_poll(struct napi_struct *napi, int budget) |
| 1430 | { |
| 1431 | struct receive_queue *rq = |
| 1432 | container_of(napi, struct receive_queue, napi); |
Jason Wang | 9267c43 | 2018-04-13 14:58:25 +0800 | [diff] [blame] | 1433 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 1434 | struct send_queue *sq; |
Toshiaki Makita | 2a43565 | 2018-07-23 23:36:07 +0900 | [diff] [blame] | 1435 | unsigned int received; |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 1436 | unsigned int xdp_xmit = 0; |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1437 | |
Willem de Bruijn | 7b0411e | 2017-04-24 13:49:29 -0400 | [diff] [blame] | 1438 | virtnet_poll_cleantx(rq); |
| 1439 | |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 1440 | received = virtnet_receive(rq, budget, &xdp_xmit); |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1441 | |
Rusty Russell | 8329d98 | 2007-11-19 11:20:43 -0500 | [diff] [blame] | 1442 | /* Out of packets? */ |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1443 | if (received < budget) |
| 1444 | virtqueue_napi_complete(napi, rq->vq, received); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1445 | |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 1446 | if (xdp_xmit & VIRTIO_XDP_REDIR) |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 1447 | xdp_do_flush(); |
Jesper Dangaard Brouer | 2471c75 | 2018-06-26 17:39:58 +0200 | [diff] [blame] | 1448 | |
| 1449 | if (xdp_xmit & VIRTIO_XDP_TX) { |
Toshiaki Makita | 2a43565 | 2018-07-23 23:36:07 +0900 | [diff] [blame] | 1450 | sq = virtnet_xdp_sq(vi); |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 1451 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { |
| 1452 | u64_stats_update_begin(&sq->stats.syncp); |
| 1453 | sq->stats.kicks++; |
| 1454 | u64_stats_update_end(&sq->stats.syncp); |
| 1455 | } |
Jason Wang | 9267c43 | 2018-04-13 14:58:25 +0800 | [diff] [blame] | 1456 | } |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 1457 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1458 | return received; |
| 1459 | } |
| 1460 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1461 | static int virtnet_open(struct net_device *dev) |
| 1462 | { |
| 1463 | struct virtnet_info *vi = netdev_priv(dev); |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 1464 | int i, err; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1465 | |
Jason Wang | e416662 | 2013-05-21 20:03:58 +0000 | [diff] [blame] | 1466 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1467 | if (i < vi->curr_queue_pairs) |
| 1468 | /* Make sure we have some buffers: if oom use wq. */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1469 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
Jason Wang | e416662 | 2013-05-21 20:03:58 +0000 | [diff] [blame] | 1470 | schedule_delayed_work(&vi->refill, 0); |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 1471 | |
| 1472 | err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); |
| 1473 | if (err < 0) |
| 1474 | return err; |
| 1475 | |
Jesper Dangaard Brouer | 8d5d885 | 2018-04-17 16:46:12 +0200 | [diff] [blame] | 1476 | err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, |
| 1477 | MEM_TYPE_PAGE_SHARED, NULL); |
| 1478 | if (err < 0) { |
| 1479 | xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
| 1480 | return err; |
| 1481 | } |
| 1482 | |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1483 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1484 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1485 | } |
| 1486 | |
| 1487 | return 0; |
| 1488 | } |
| 1489 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1490 | static int virtnet_poll_tx(struct napi_struct *napi, int budget) |
| 1491 | { |
| 1492 | struct send_queue *sq = container_of(napi, struct send_queue, napi); |
| 1493 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 1494 | unsigned int index = vq2txq(sq->vq); |
| 1495 | struct netdev_queue *txq; |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1496 | |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 1497 | if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { |
| 1498 | /* We don't need to enable cb for XDP */ |
| 1499 | napi_complete_done(napi, 0); |
| 1500 | return 0; |
| 1501 | } |
| 1502 | |
| 1503 | txq = netdev_get_tx_queue(vi->dev, index); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1504 | __netif_tx_lock(txq, raw_smp_processor_id()); |
Michael S. Tsirkin | df133f3 | 2019-01-17 23:20:07 -0500 | [diff] [blame] | 1505 | free_old_xmit_skbs(sq, true); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1506 | __netif_tx_unlock(txq); |
| 1507 | |
| 1508 | virtqueue_napi_complete(napi, sq->vq, 0); |
| 1509 | |
| 1510 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) |
| 1511 | netif_tx_wake_queue(txq); |
| 1512 | |
| 1513 | return 0; |
| 1514 | } |
| 1515 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1516 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1517 | { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1518 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1519 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1520 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Jason A. Donenfeld | e2fcad5 | 2017-06-04 04:16:26 +0200 | [diff] [blame] | 1521 | int num_sg; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1522 | unsigned hdr_len = vi->hdr_len; |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1523 | bool can_push; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1524 | |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 1525 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1526 | |
| 1527 | can_push = vi->any_header_sg && |
| 1528 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && |
| 1529 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; |
| 1530 | /* Even if we can, don't push here yet as this would skew |
| 1531 | * csum_start offset below. */ |
| 1532 | if (can_push) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1533 | hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1534 | else |
| 1535 | hdr = skb_vnet_hdr(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1536 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 1537 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
Willem de Bruijn | fd3a886 | 2018-06-06 11:23:01 -0400 | [diff] [blame] | 1538 | virtio_is_little_endian(vi->vdev), false, |
| 1539 | 0)) |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 1540 | BUG(); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1541 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1542 | if (vi->mergeable_rx_bufs) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1543 | hdr->num_buffers = 0; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1544 | |
Jason Wang | 547c890 | 2015-08-27 14:53:06 +0800 | [diff] [blame] | 1545 | sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1546 | if (can_push) { |
| 1547 | __skb_push(skb, hdr_len); |
| 1548 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); |
Jason A. Donenfeld | e2fcad5 | 2017-06-04 04:16:26 +0200 | [diff] [blame] | 1549 | if (unlikely(num_sg < 0)) |
| 1550 | return num_sg; |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1551 | /* Pull header back to avoid skew in tx bytes calculations. */ |
| 1552 | __skb_pull(skb, hdr_len); |
| 1553 | } else { |
| 1554 | sg_set_buf(sq->sg, hdr, hdr_len); |
Jason A. Donenfeld | e2fcad5 | 2017-06-04 04:16:26 +0200 | [diff] [blame] | 1555 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); |
| 1556 | if (unlikely(num_sg < 0)) |
| 1557 | return num_sg; |
| 1558 | num_sg++; |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1559 | } |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1560 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 1561 | } |
| 1562 | |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 1563 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1564 | { |
| 1565 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1566 | int qnum = skb_get_queue_mapping(skb); |
| 1567 | struct send_queue *sq = &vi->sq[qnum]; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1568 | int err; |
Michael S. Tsirkin | 4b7fd2e6 | 2014-10-15 16:23:28 +0300 | [diff] [blame] | 1569 | struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); |
Florian Westphal | 6b16f9e | 2019-04-01 16:42:14 +0200 | [diff] [blame] | 1570 | bool kick = !netdev_xmit_more(); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1571 | bool use_napi = sq->napi.weight; |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1572 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1573 | /* Free up any pending old buffers before queueing new ones. */ |
Michael S. Tsirkin | df133f3 | 2019-01-17 23:20:07 -0500 | [diff] [blame] | 1574 | free_old_xmit_skbs(sq, false); |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1575 | |
Willem de Bruijn | bdb12e0 | 2017-04-24 13:49:30 -0400 | [diff] [blame] | 1576 | if (use_napi && kick) |
| 1577 | virtqueue_enable_cb_delayed(sq->vq); |
| 1578 | |
Jacob Keller | 074c358 | 2014-06-25 02:37:13 +0000 | [diff] [blame] | 1579 | /* timestamp packet in software */ |
| 1580 | skb_tx_timestamp(skb); |
| 1581 | |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 1582 | /* Try to transmit */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1583 | err = xmit_skb(sq, skb); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1584 | |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1585 | /* This should not happen! */ |
Jason Wang | 681daee2 | 2014-03-26 13:03:00 +0800 | [diff] [blame] | 1586 | if (unlikely(err)) { |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1587 | dev->stats.tx_fifo_errors++; |
| 1588 | if (net_ratelimit()) |
| 1589 | dev_warn(&dev->dev, |
Yuval Shaia | 7934b48 | 2019-04-03 12:10:13 +0300 | [diff] [blame] | 1590 | "Unexpected TXQ (%d) queue failure: %d\n", |
| 1591 | qnum, err); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 1592 | dev->stats.tx_dropped++; |
Eric W. Biederman | 85e9452 | 2014-03-15 18:43:33 -0700 | [diff] [blame] | 1593 | dev_kfree_skb_any(skb); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 1594 | return NETDEV_TX_OK; |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1595 | } |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 1596 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1597 | /* Don't wait up for transmitted skbs to be freed. */ |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1598 | if (!use_napi) { |
| 1599 | skb_orphan(skb); |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 1600 | nf_reset_ct(skb); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1601 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1602 | |
Michael S. Tsirkin | 60302ff | 2015-04-02 13:05:47 +0200 | [diff] [blame] | 1603 | /* If running out of space, stop queue to avoid getting packets that we |
| 1604 | * are then unable to transmit. |
| 1605 | * An alternative would be to force queuing layer to requeue the skb by |
| 1606 | * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be |
| 1607 | * returned in a normal path of operation: it means that driver is not |
| 1608 | * maintaining the TX queue stop/start state properly, and causes |
| 1609 | * the stack to do a non-trivial amount of useless work. |
| 1610 | * Since most packets only take 1 or 2 ring slots, stopping the queue |
| 1611 | * early means 16 slots are typically wasted. |
stephen hemminger | d631b94 | 2015-03-24 16:22:07 -0700 | [diff] [blame] | 1612 | */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1613 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1614 | netif_stop_subqueue(dev, qnum); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1615 | if (!use_napi && |
| 1616 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1617 | /* More just got used, free them then recheck. */ |
Michael S. Tsirkin | df133f3 | 2019-01-17 23:20:07 -0500 | [diff] [blame] | 1618 | free_old_xmit_skbs(sq, false); |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1619 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1620 | netif_start_subqueue(dev, qnum); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1621 | virtqueue_disable_cb(sq->vq); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1622 | } |
| 1623 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1624 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1625 | |
Toshiaki Makita | 461f03d | 2018-07-23 23:36:09 +0900 | [diff] [blame] | 1626 | if (kick || netif_xmit_stopped(txq)) { |
| 1627 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { |
| 1628 | u64_stats_update_begin(&sq->stats.syncp); |
| 1629 | sq->stats.kicks++; |
| 1630 | u64_stats_update_end(&sq->stats.syncp); |
| 1631 | } |
| 1632 | } |
David S. Miller | 0b725a2 | 2014-08-25 15:51:53 -0700 | [diff] [blame] | 1633 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1634 | return NETDEV_TX_OK; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1635 | } |
| 1636 | |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1637 | /* |
| 1638 | * Send command via the control virtqueue and check status. Commands |
| 1639 | * supported by the hypervisor, as indicated by feature bits, should |
stephen hemminger | 788a8b6 | 2013-12-09 16:18:45 -0800 | [diff] [blame] | 1640 | * never fail unless improperly formatted. |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1641 | */ |
| 1642 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1643 | struct scatterlist *out) |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1644 | { |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1645 | struct scatterlist *sgs[4], hdr, stat; |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1646 | unsigned out_num = 0, tmp; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1647 | |
| 1648 | /* Caller should know better */ |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1649 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1650 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1651 | vi->ctrl->status = ~0; |
| 1652 | vi->ctrl->hdr.class = class; |
| 1653 | vi->ctrl->hdr.cmd = cmd; |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1654 | /* Add header */ |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1655 | sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1656 | sgs[out_num++] = &hdr; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1657 | |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1658 | if (out) |
| 1659 | sgs[out_num++] = out; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1660 | |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1661 | /* Add return status. */ |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1662 | sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1663 | sgs[out_num] = &stat; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1664 | |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1665 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
Rusty Russell | a7c5814 | 2014-03-13 11:23:39 +1030 | [diff] [blame] | 1666 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1667 | |
Heinz Graalfs | 6797590 | 2013-10-29 09:40:02 +1030 | [diff] [blame] | 1668 | if (unlikely(!virtqueue_kick(vi->cvq))) |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1669 | return vi->ctrl->status == VIRTIO_NET_OK; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1670 | |
| 1671 | /* Spin for a response, the kick causes an ioport write, trapping |
| 1672 | * into the hypervisor, so the request should be handled immediately. |
| 1673 | */ |
Heinz Graalfs | 047b9b9 | 2013-10-29 09:40:47 +1030 | [diff] [blame] | 1674 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
| 1675 | !virtqueue_is_broken(vi->cvq)) |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1676 | cpu_relax(); |
| 1677 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1678 | return vi->ctrl->status == VIRTIO_NET_OK; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1679 | } |
| 1680 | |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1681 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| 1682 | { |
| 1683 | struct virtnet_info *vi = netdev_priv(dev); |
| 1684 | struct virtio_device *vdev = vi->vdev; |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1685 | int ret; |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1686 | struct sockaddr *addr; |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1687 | struct scatterlist sg; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1688 | |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 1689 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) |
| 1690 | return -EOPNOTSUPP; |
| 1691 | |
Shyam Saini | 801822d | 2016-12-24 00:44:58 +0530 | [diff] [blame] | 1692 | addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1693 | if (!addr) |
| 1694 | return -ENOMEM; |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1695 | |
| 1696 | ret = eth_prepare_mac_addr_change(dev, addr); |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1697 | if (ret) |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1698 | goto out; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1699 | |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1700 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
| 1701 | sg_init_one(&sg, addr->sa_data, dev->addr_len); |
| 1702 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1703 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1704 | dev_warn(&vdev->dev, |
| 1705 | "Failed to set mac address by vq command.\n"); |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1706 | ret = -EINVAL; |
| 1707 | goto out; |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1708 | } |
Michael S. Tsirkin | 7e93a02 | 2014-11-26 15:58:28 +0200 | [diff] [blame] | 1709 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && |
| 1710 | !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 1711 | unsigned int i; |
| 1712 | |
| 1713 | /* Naturally, this has an atomicity problem. */ |
| 1714 | for (i = 0; i < dev->addr_len; i++) |
| 1715 | virtio_cwrite8(vdev, |
| 1716 | offsetof(struct virtio_net_config, mac) + |
| 1717 | i, addr->sa_data[i]); |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1718 | } |
| 1719 | |
| 1720 | eth_commit_mac_addr_change(dev, p); |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1721 | ret = 0; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1722 | |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1723 | out: |
| 1724 | kfree(addr); |
| 1725 | return ret; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1726 | } |
| 1727 | |
stephen hemminger | bc1f447 | 2017-01-06 19:12:52 -0800 | [diff] [blame] | 1728 | static void virtnet_stats(struct net_device *dev, |
| 1729 | struct rtnl_link_stats64 *tot) |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1730 | { |
| 1731 | struct virtnet_info *vi = netdev_priv(dev); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1732 | unsigned int start; |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1733 | int i; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1734 | |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1735 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Toshiaki Makita | 2c4a2f7 | 2018-07-23 23:36:06 +0900 | [diff] [blame] | 1736 | u64 tpackets, tbytes, rpackets, rbytes, rdrops; |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1737 | struct receive_queue *rq = &vi->rq[i]; |
| 1738 | struct send_queue *sq = &vi->sq[i]; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1739 | |
| 1740 | do { |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1741 | start = u64_stats_fetch_begin_irq(&sq->stats.syncp); |
| 1742 | tpackets = sq->stats.packets; |
| 1743 | tbytes = sq->stats.bytes; |
| 1744 | } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 1745 | |
| 1746 | do { |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1747 | start = u64_stats_fetch_begin_irq(&rq->stats.syncp); |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 1748 | rpackets = rq->stats.packets; |
| 1749 | rbytes = rq->stats.bytes; |
| 1750 | rdrops = rq->stats.drops; |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 1751 | } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1752 | |
| 1753 | tot->rx_packets += rpackets; |
| 1754 | tot->tx_packets += tpackets; |
| 1755 | tot->rx_bytes += rbytes; |
| 1756 | tot->tx_bytes += tbytes; |
Toshiaki Makita | 2c4a2f7 | 2018-07-23 23:36:06 +0900 | [diff] [blame] | 1757 | tot->rx_dropped += rdrops; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1758 | } |
| 1759 | |
| 1760 | tot->tx_dropped = dev->stats.tx_dropped; |
Rick Jones | 021ac8d | 2011-11-21 09:28:17 +0000 | [diff] [blame] | 1761 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1762 | tot->rx_length_errors = dev->stats.rx_length_errors; |
| 1763 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1764 | } |
| 1765 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1766 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
| 1767 | { |
| 1768 | rtnl_lock(); |
| 1769 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1770 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1771 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
| 1772 | rtnl_unlock(); |
| 1773 | } |
| 1774 | |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 1775 | static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1776 | { |
| 1777 | struct scatterlist sg; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1778 | struct net_device *dev = vi->dev; |
| 1779 | |
| 1780 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
| 1781 | return 0; |
| 1782 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1783 | vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
| 1784 | sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1785 | |
| 1786 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1787 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1788 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
| 1789 | queue_pairs); |
| 1790 | return -EINVAL; |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1791 | } else { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1792 | vi->curr_queue_pairs = queue_pairs; |
Jason Wang | 35ed159 | 2013-10-15 11:18:59 +0800 | [diff] [blame] | 1793 | /* virtnet_open() will refill when device is going to up. */ |
| 1794 | if (dev->flags & IFF_UP) |
| 1795 | schedule_delayed_work(&vi->refill, 0); |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1796 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1797 | |
| 1798 | return 0; |
| 1799 | } |
| 1800 | |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 1801 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
| 1802 | { |
| 1803 | int err; |
| 1804 | |
| 1805 | rtnl_lock(); |
| 1806 | err = _virtnet_set_queues(vi, queue_pairs); |
| 1807 | rtnl_unlock(); |
| 1808 | return err; |
| 1809 | } |
| 1810 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1811 | static int virtnet_close(struct net_device *dev) |
| 1812 | { |
| 1813 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1814 | int i; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1815 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 1816 | /* Make sure refill_work doesn't re-enable napi! */ |
| 1817 | cancel_delayed_work_sync(&vi->refill); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1818 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1819 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 1820 | xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1821 | napi_disable(&vi->rq[i].napi); |
Willem de Bruijn | 78a57b4 | 2017-04-25 15:59:17 -0400 | [diff] [blame] | 1822 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1823 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1824 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1825 | return 0; |
| 1826 | } |
| 1827 | |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1828 | static void virtnet_set_rx_mode(struct net_device *dev) |
| 1829 | { |
| 1830 | struct virtnet_info *vi = netdev_priv(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1831 | struct scatterlist sg[2]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1832 | struct virtio_net_ctrl_mac *mac_data; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1833 | struct netdev_hw_addr *ha; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1834 | int uc_count; |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1835 | int mc_count; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1836 | void *buf; |
| 1837 | int i; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1838 | |
stephen hemminger | 788a8b6 | 2013-12-09 16:18:45 -0800 | [diff] [blame] | 1839 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1840 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 1841 | return; |
| 1842 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1843 | vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 1844 | vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1845 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1846 | sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1847 | |
| 1848 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1849 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1850 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1851 | vi->ctrl->promisc ? "en" : "dis"); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1852 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1853 | sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1854 | |
| 1855 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1856 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1857 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1858 | vi->ctrl->allmulti ? "en" : "dis"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1859 | |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1860 | uc_count = netdev_uc_count(dev); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1861 | mc_count = netdev_mc_count(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1862 | /* MAC filter - use one buffer for both lists */ |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1863 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
| 1864 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
| 1865 | mac_data = buf; |
Joe Perches | e68ed8f | 2013-02-03 17:28:15 +0000 | [diff] [blame] | 1866 | if (!buf) |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1867 | return; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1868 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1869 | sg_init_table(sg, 2); |
| 1870 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1871 | /* Store the unicast list and count in the front of the buffer */ |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 1872 | mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1873 | i = 0; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1874 | netdev_for_each_uc_addr(ha, dev) |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1875 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1876 | |
| 1877 | sg_set_buf(&sg[0], mac_data, |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1878 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1879 | |
| 1880 | /* multicast list and count fill the end */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1881 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1882 | |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 1883 | mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); |
Jiri Pirko | 567ec87 | 2010-02-23 23:17:07 +0000 | [diff] [blame] | 1884 | i = 0; |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1885 | netdev_for_each_mc_addr(ha, dev) |
| 1886 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1887 | |
| 1888 | sg_set_buf(&sg[1], mac_data, |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1889 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1890 | |
| 1891 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1892 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
Thomas Huth | 99e872a | 2013-11-29 10:02:19 +0100 | [diff] [blame] | 1893 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1894 | |
| 1895 | kfree(buf); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1896 | } |
| 1897 | |
Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 1898 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
| 1899 | __be16 proto, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1900 | { |
| 1901 | struct virtnet_info *vi = netdev_priv(dev); |
| 1902 | struct scatterlist sg; |
| 1903 | |
Michael S. Tsirkin | d7fad4c | 2018-04-19 08:30:49 +0300 | [diff] [blame] | 1904 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1905 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1906 | |
| 1907 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1908 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1909 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1910 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1911 | } |
| 1912 | |
Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 1913 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
| 1914 | __be16 proto, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1915 | { |
| 1916 | struct virtnet_info *vi = netdev_priv(dev); |
| 1917 | struct scatterlist sg; |
| 1918 | |
Michael S. Tsirkin | d7fad4c | 2018-04-19 08:30:49 +0300 | [diff] [blame] | 1919 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 1920 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1921 | |
| 1922 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1923 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1924 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1925 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1926 | } |
| 1927 | |
Peter Xu | 310974f | 2019-03-18 14:56:06 +0800 | [diff] [blame] | 1928 | static void virtnet_clean_affinity(struct virtnet_info *vi) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1929 | { |
| 1930 | int i; |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1931 | |
| 1932 | if (vi->affinity_hint_set) { |
| 1933 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Caleb Raitto | 19e226e | 2018-08-09 18:18:28 -0700 | [diff] [blame] | 1934 | virtqueue_set_affinity(vi->rq[i].vq, NULL); |
| 1935 | virtqueue_set_affinity(vi->sq[i].vq, NULL); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1936 | } |
| 1937 | |
| 1938 | vi->affinity_hint_set = false; |
| 1939 | } |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1940 | } |
| 1941 | |
| 1942 | static void virtnet_set_affinity(struct virtnet_info *vi) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1943 | { |
Caleb Raitto | 2ca653d | 2018-08-09 17:28:40 -0700 | [diff] [blame] | 1944 | cpumask_var_t mask; |
| 1945 | int stragglers; |
| 1946 | int group_size; |
| 1947 | int i, j, cpu; |
| 1948 | int num_cpu; |
| 1949 | int stride; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1950 | |
Caleb Raitto | 2ca653d | 2018-08-09 17:28:40 -0700 | [diff] [blame] | 1951 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { |
Peter Xu | 310974f | 2019-03-18 14:56:06 +0800 | [diff] [blame] | 1952 | virtnet_clean_affinity(vi); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1953 | return; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1954 | } |
| 1955 | |
Caleb Raitto | 2ca653d | 2018-08-09 17:28:40 -0700 | [diff] [blame] | 1956 | num_cpu = num_online_cpus(); |
| 1957 | stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); |
| 1958 | stragglers = num_cpu >= vi->curr_queue_pairs ? |
| 1959 | num_cpu % vi->curr_queue_pairs : |
| 1960 | 0; |
| 1961 | cpu = cpumask_next(-1, cpu_online_mask); |
Andrei Vagin | 4d99f66 | 2018-08-08 20:07:35 -0700 | [diff] [blame] | 1962 | |
Caleb Raitto | 2ca653d | 2018-08-09 17:28:40 -0700 | [diff] [blame] | 1963 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 1964 | group_size = stride + (i < stragglers ? 1 : 0); |
| 1965 | |
| 1966 | for (j = 0; j < group_size; j++) { |
| 1967 | cpumask_set_cpu(cpu, mask); |
| 1968 | cpu = cpumask_next_wrap(cpu, cpu_online_mask, |
| 1969 | nr_cpu_ids, false); |
| 1970 | } |
| 1971 | virtqueue_set_affinity(vi->rq[i].vq, mask); |
| 1972 | virtqueue_set_affinity(vi->sq[i].vq, mask); |
| 1973 | __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false); |
| 1974 | cpumask_clear(mask); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1975 | } |
| 1976 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1977 | vi->affinity_hint_set = true; |
Caleb Raitto | 2ca653d | 2018-08-09 17:28:40 -0700 | [diff] [blame] | 1978 | free_cpumask_var(mask); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1979 | } |
| 1980 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1981 | static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1982 | { |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1983 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 1984 | node); |
| 1985 | virtnet_set_affinity(vi); |
| 1986 | return 0; |
| 1987 | } |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1988 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1989 | static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) |
| 1990 | { |
| 1991 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 1992 | node_dead); |
| 1993 | virtnet_set_affinity(vi); |
| 1994 | return 0; |
| 1995 | } |
Jason Wang | 3ab098d | 2013-10-15 11:18:58 +0800 | [diff] [blame] | 1996 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1997 | static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) |
| 1998 | { |
| 1999 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 2000 | node); |
| 2001 | |
Peter Xu | 310974f | 2019-03-18 14:56:06 +0800 | [diff] [blame] | 2002 | virtnet_clean_affinity(vi); |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2003 | return 0; |
| 2004 | } |
| 2005 | |
| 2006 | static enum cpuhp_state virtionet_online; |
| 2007 | |
| 2008 | static int virtnet_cpu_notif_add(struct virtnet_info *vi) |
| 2009 | { |
| 2010 | int ret; |
| 2011 | |
| 2012 | ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); |
| 2013 | if (ret) |
| 2014 | return ret; |
| 2015 | ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, |
| 2016 | &vi->node_dead); |
| 2017 | if (!ret) |
| 2018 | return ret; |
| 2019 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); |
| 2020 | return ret; |
| 2021 | } |
| 2022 | |
| 2023 | static void virtnet_cpu_notif_remove(struct virtnet_info *vi) |
| 2024 | { |
| 2025 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); |
| 2026 | cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, |
| 2027 | &vi->node_dead); |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 2028 | } |
| 2029 | |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 2030 | static void virtnet_get_ringparam(struct net_device *dev, |
| 2031 | struct ethtool_ringparam *ring) |
| 2032 | { |
| 2033 | struct virtnet_info *vi = netdev_priv(dev); |
| 2034 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2035 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
| 2036 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 2037 | ring->rx_pending = ring->rx_max_pending; |
| 2038 | ring->tx_pending = ring->tx_max_pending; |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 2039 | } |
| 2040 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 2041 | |
| 2042 | static void virtnet_get_drvinfo(struct net_device *dev, |
| 2043 | struct ethtool_drvinfo *info) |
| 2044 | { |
| 2045 | struct virtnet_info *vi = netdev_priv(dev); |
| 2046 | struct virtio_device *vdev = vi->vdev; |
| 2047 | |
| 2048 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 2049 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
| 2050 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
| 2051 | |
| 2052 | } |
| 2053 | |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 2054 | /* TODO: Eliminate OOO packets during switching */ |
| 2055 | static int virtnet_set_channels(struct net_device *dev, |
| 2056 | struct ethtool_channels *channels) |
| 2057 | { |
| 2058 | struct virtnet_info *vi = netdev_priv(dev); |
| 2059 | u16 queue_pairs = channels->combined_count; |
| 2060 | int err; |
| 2061 | |
| 2062 | /* We don't support separate rx/tx channels. |
| 2063 | * We don't allow setting 'other' channels. |
| 2064 | */ |
| 2065 | if (channels->rx_count || channels->tx_count || channels->other_count) |
| 2066 | return -EINVAL; |
| 2067 | |
Amos Kong | c18e9cd | 2014-04-18 13:45:41 +0800 | [diff] [blame] | 2068 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 2069 | return -EINVAL; |
| 2070 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2071 | /* For now we don't support modifying channels while XDP is loaded |
| 2072 | * also when XDP is loaded all RX queues have XDP programs so we only |
| 2073 | * need to check a single RX queue. |
| 2074 | */ |
| 2075 | if (vi->rq[0].xdp_prog) |
| 2076 | return -EINVAL; |
| 2077 | |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 2078 | get_online_cpus(); |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 2079 | err = _virtnet_set_queues(vi, queue_pairs); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 2080 | if (!err) { |
| 2081 | netif_set_real_num_tx_queues(dev, queue_pairs); |
| 2082 | netif_set_real_num_rx_queues(dev, queue_pairs); |
| 2083 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 2084 | virtnet_set_affinity(vi); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 2085 | } |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 2086 | put_online_cpus(); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 2087 | |
| 2088 | return err; |
| 2089 | } |
| 2090 | |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 2091 | static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
| 2092 | { |
| 2093 | struct virtnet_info *vi = netdev_priv(dev); |
| 2094 | char *p = (char *)data; |
| 2095 | unsigned int i, j; |
| 2096 | |
| 2097 | switch (stringset) { |
| 2098 | case ETH_SS_STATS: |
| 2099 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2100 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { |
| 2101 | snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", |
| 2102 | i, virtnet_rq_stats_desc[j].desc); |
| 2103 | p += ETH_GSTRING_LEN; |
| 2104 | } |
| 2105 | } |
| 2106 | |
| 2107 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2108 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { |
| 2109 | snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s", |
| 2110 | i, virtnet_sq_stats_desc[j].desc); |
| 2111 | p += ETH_GSTRING_LEN; |
| 2112 | } |
| 2113 | } |
| 2114 | break; |
| 2115 | } |
| 2116 | } |
| 2117 | |
| 2118 | static int virtnet_get_sset_count(struct net_device *dev, int sset) |
| 2119 | { |
| 2120 | struct virtnet_info *vi = netdev_priv(dev); |
| 2121 | |
| 2122 | switch (sset) { |
| 2123 | case ETH_SS_STATS: |
| 2124 | return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + |
| 2125 | VIRTNET_SQ_STATS_LEN); |
| 2126 | default: |
| 2127 | return -EOPNOTSUPP; |
| 2128 | } |
| 2129 | } |
| 2130 | |
| 2131 | static void virtnet_get_ethtool_stats(struct net_device *dev, |
| 2132 | struct ethtool_stats *stats, u64 *data) |
| 2133 | { |
| 2134 | struct virtnet_info *vi = netdev_priv(dev); |
| 2135 | unsigned int idx = 0, start, i, j; |
| 2136 | const u8 *stats_base; |
| 2137 | size_t offset; |
| 2138 | |
| 2139 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2140 | struct receive_queue *rq = &vi->rq[i]; |
| 2141 | |
Jason Wang | d46eeea | 2018-07-31 17:43:39 +0800 | [diff] [blame] | 2142 | stats_base = (u8 *)&rq->stats; |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 2143 | do { |
| 2144 | start = u64_stats_fetch_begin_irq(&rq->stats.syncp); |
| 2145 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { |
| 2146 | offset = virtnet_rq_stats_desc[j].offset; |
| 2147 | data[idx + j] = *(u64 *)(stats_base + offset); |
| 2148 | } |
| 2149 | } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); |
| 2150 | idx += VIRTNET_RQ_STATS_LEN; |
| 2151 | } |
| 2152 | |
| 2153 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2154 | struct send_queue *sq = &vi->sq[i]; |
| 2155 | |
| 2156 | stats_base = (u8 *)&sq->stats; |
| 2157 | do { |
| 2158 | start = u64_stats_fetch_begin_irq(&sq->stats.syncp); |
| 2159 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { |
| 2160 | offset = virtnet_sq_stats_desc[j].offset; |
| 2161 | data[idx + j] = *(u64 *)(stats_base + offset); |
| 2162 | } |
| 2163 | } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); |
| 2164 | idx += VIRTNET_SQ_STATS_LEN; |
| 2165 | } |
| 2166 | } |
| 2167 | |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 2168 | static void virtnet_get_channels(struct net_device *dev, |
| 2169 | struct ethtool_channels *channels) |
| 2170 | { |
| 2171 | struct virtnet_info *vi = netdev_priv(dev); |
| 2172 | |
| 2173 | channels->combined_count = vi->curr_queue_pairs; |
| 2174 | channels->max_combined = vi->max_queue_pairs; |
| 2175 | channels->max_other = 0; |
| 2176 | channels->rx_count = 0; |
| 2177 | channels->tx_count = 0; |
| 2178 | channels->other_count = 0; |
| 2179 | } |
| 2180 | |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 2181 | static int virtnet_set_link_ksettings(struct net_device *dev, |
| 2182 | const struct ethtool_link_ksettings *cmd) |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 2183 | { |
| 2184 | struct virtnet_info *vi = netdev_priv(dev); |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 2185 | |
Cris Forno | 9aedc6e2 | 2020-02-28 14:12:05 -0600 | [diff] [blame] | 2186 | return ethtool_virtdev_set_link_ksettings(dev, cmd, |
| 2187 | &vi->speed, &vi->duplex); |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 2188 | } |
| 2189 | |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 2190 | static int virtnet_get_link_ksettings(struct net_device *dev, |
| 2191 | struct ethtool_link_ksettings *cmd) |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 2192 | { |
| 2193 | struct virtnet_info *vi = netdev_priv(dev); |
| 2194 | |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 2195 | cmd->base.speed = vi->speed; |
| 2196 | cmd->base.duplex = vi->duplex; |
| 2197 | cmd->base.port = PORT_OTHER; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 2198 | |
| 2199 | return 0; |
| 2200 | } |
| 2201 | |
Jason Wang | 0c465be | 2018-10-09 10:06:26 +0800 | [diff] [blame] | 2202 | static int virtnet_set_coalesce(struct net_device *dev, |
| 2203 | struct ethtool_coalesce *ec) |
| 2204 | { |
Jason Wang | 0c465be | 2018-10-09 10:06:26 +0800 | [diff] [blame] | 2205 | struct virtnet_info *vi = netdev_priv(dev); |
| 2206 | int i, napi_weight; |
| 2207 | |
Jakub Kicinski | a51e520 | 2020-03-04 21:15:42 -0800 | [diff] [blame] | 2208 | if (ec->tx_max_coalesced_frames > 1 || |
| 2209 | ec->rx_max_coalesced_frames != 1) |
Jason Wang | 0c465be | 2018-10-09 10:06:26 +0800 | [diff] [blame] | 2210 | return -EINVAL; |
| 2211 | |
Jason Wang | 0c465be | 2018-10-09 10:06:26 +0800 | [diff] [blame] | 2212 | napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; |
Jason Wang | 0c465be | 2018-10-09 10:06:26 +0800 | [diff] [blame] | 2213 | if (napi_weight ^ vi->sq[0].napi.weight) { |
| 2214 | if (dev->flags & IFF_UP) |
| 2215 | return -EBUSY; |
| 2216 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2217 | vi->sq[i].napi.weight = napi_weight; |
| 2218 | } |
| 2219 | |
| 2220 | return 0; |
| 2221 | } |
| 2222 | |
| 2223 | static int virtnet_get_coalesce(struct net_device *dev, |
| 2224 | struct ethtool_coalesce *ec) |
| 2225 | { |
| 2226 | struct ethtool_coalesce ec_default = { |
| 2227 | .cmd = ETHTOOL_GCOALESCE, |
| 2228 | .rx_max_coalesced_frames = 1, |
| 2229 | }; |
| 2230 | struct virtnet_info *vi = netdev_priv(dev); |
| 2231 | |
| 2232 | memcpy(ec, &ec_default, sizeof(ec_default)); |
| 2233 | |
| 2234 | if (vi->sq[0].napi.weight) |
| 2235 | ec->tx_max_coalesced_frames = 1; |
| 2236 | |
| 2237 | return 0; |
| 2238 | } |
| 2239 | |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 2240 | static void virtnet_init_settings(struct net_device *dev) |
| 2241 | { |
| 2242 | struct virtnet_info *vi = netdev_priv(dev); |
| 2243 | |
| 2244 | vi->speed = SPEED_UNKNOWN; |
| 2245 | vi->duplex = DUPLEX_UNKNOWN; |
| 2246 | } |
| 2247 | |
Jason Baron | faa9b39 | 2018-01-05 17:44:54 -0500 | [diff] [blame] | 2248 | static void virtnet_update_settings(struct virtnet_info *vi) |
| 2249 | { |
| 2250 | u32 speed; |
| 2251 | u8 duplex; |
| 2252 | |
| 2253 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) |
| 2254 | return; |
| 2255 | |
| 2256 | speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, |
| 2257 | speed)); |
| 2258 | if (ethtool_validate_speed(speed)) |
| 2259 | vi->speed = speed; |
| 2260 | duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, |
| 2261 | duplex)); |
| 2262 | if (ethtool_validate_duplex(duplex)) |
| 2263 | vi->duplex = duplex; |
| 2264 | } |
| 2265 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 2266 | static const struct ethtool_ops virtnet_ethtool_ops = { |
Jakub Kicinski | a51e520 | 2020-03-04 21:15:42 -0800 | [diff] [blame] | 2267 | .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 2268 | .get_drvinfo = virtnet_get_drvinfo, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2269 | .get_link = ethtool_op_get_link, |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 2270 | .get_ringparam = virtnet_get_ringparam, |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 2271 | .get_strings = virtnet_get_strings, |
| 2272 | .get_sset_count = virtnet_get_sset_count, |
| 2273 | .get_ethtool_stats = virtnet_get_ethtool_stats, |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 2274 | .set_channels = virtnet_set_channels, |
| 2275 | .get_channels = virtnet_get_channels, |
Jacob Keller | 074c358 | 2014-06-25 02:37:13 +0000 | [diff] [blame] | 2276 | .get_ts_info = ethtool_op_get_ts_info, |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 2277 | .get_link_ksettings = virtnet_get_link_ksettings, |
| 2278 | .set_link_ksettings = virtnet_set_link_ksettings, |
Jason Wang | 0c465be | 2018-10-09 10:06:26 +0800 | [diff] [blame] | 2279 | .set_coalesce = virtnet_set_coalesce, |
| 2280 | .get_coalesce = virtnet_get_coalesce, |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 2281 | }; |
| 2282 | |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2283 | static void virtnet_freeze_down(struct virtio_device *vdev) |
| 2284 | { |
| 2285 | struct virtnet_info *vi = vdev->priv; |
| 2286 | int i; |
| 2287 | |
| 2288 | /* Make sure no work handler is accessing the device */ |
| 2289 | flush_work(&vi->config_work); |
| 2290 | |
Ake Koomsin | 05c998b | 2018-10-17 19:44:12 +0900 | [diff] [blame] | 2291 | netif_tx_lock_bh(vi->dev); |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2292 | netif_device_detach(vi->dev); |
Ake Koomsin | 05c998b | 2018-10-17 19:44:12 +0900 | [diff] [blame] | 2293 | netif_tx_unlock_bh(vi->dev); |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2294 | cancel_delayed_work_sync(&vi->refill); |
| 2295 | |
| 2296 | if (netif_running(vi->dev)) { |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 2297 | for (i = 0; i < vi->max_queue_pairs; i++) { |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2298 | napi_disable(&vi->rq[i].napi); |
Willem de Bruijn | 78a57b4 | 2017-04-25 15:59:17 -0400 | [diff] [blame] | 2299 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 2300 | } |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2301 | } |
| 2302 | } |
| 2303 | |
| 2304 | static int init_vqs(struct virtnet_info *vi); |
| 2305 | |
| 2306 | static int virtnet_restore_up(struct virtio_device *vdev) |
| 2307 | { |
| 2308 | struct virtnet_info *vi = vdev->priv; |
| 2309 | int err, i; |
| 2310 | |
| 2311 | err = init_vqs(vi); |
| 2312 | if (err) |
| 2313 | return err; |
| 2314 | |
| 2315 | virtio_device_ready(vdev); |
| 2316 | |
| 2317 | if (netif_running(vi->dev)) { |
| 2318 | for (i = 0; i < vi->curr_queue_pairs; i++) |
| 2319 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
| 2320 | schedule_delayed_work(&vi->refill, 0); |
| 2321 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 2322 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 2323 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 2324 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
| 2325 | &vi->sq[i].napi); |
| 2326 | } |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2327 | } |
| 2328 | |
Ake Koomsin | 05c998b | 2018-10-17 19:44:12 +0900 | [diff] [blame] | 2329 | netif_tx_lock_bh(vi->dev); |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2330 | netif_device_attach(vi->dev); |
Ake Koomsin | 05c998b | 2018-10-17 19:44:12 +0900 | [diff] [blame] | 2331 | netif_tx_unlock_bh(vi->dev); |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2332 | return err; |
| 2333 | } |
| 2334 | |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2335 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
| 2336 | { |
| 2337 | struct scatterlist sg; |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 2338 | vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2339 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 2340 | sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2341 | |
| 2342 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, |
| 2343 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { |
Yuval Shaia | 7934b48 | 2019-04-03 12:10:13 +0300 | [diff] [blame] | 2344 | dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2345 | return -EINVAL; |
| 2346 | } |
| 2347 | |
| 2348 | return 0; |
| 2349 | } |
| 2350 | |
| 2351 | static int virtnet_clear_guest_offloads(struct virtnet_info *vi) |
| 2352 | { |
| 2353 | u64 offloads = 0; |
| 2354 | |
| 2355 | if (!vi->guest_offloads) |
| 2356 | return 0; |
| 2357 | |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2358 | return virtnet_set_guest_offloads(vi, offloads); |
| 2359 | } |
| 2360 | |
| 2361 | static int virtnet_restore_guest_offloads(struct virtnet_info *vi) |
| 2362 | { |
| 2363 | u64 offloads = vi->guest_offloads; |
| 2364 | |
| 2365 | if (!vi->guest_offloads) |
| 2366 | return 0; |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2367 | |
| 2368 | return virtnet_set_guest_offloads(vi, offloads); |
| 2369 | } |
| 2370 | |
Jakub Kicinski | 9861ce0 | 2017-04-30 21:46:48 -0700 | [diff] [blame] | 2371 | static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
| 2372 | struct netlink_ext_ack *extack) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2373 | { |
| 2374 | unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); |
| 2375 | struct virtnet_info *vi = netdev_priv(dev); |
| 2376 | struct bpf_prog *old_prog; |
Jason Wang | 017b29c | 2017-02-20 11:50:20 +0800 | [diff] [blame] | 2377 | u16 xdp_qp = 0, curr_qp; |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2378 | int i, err; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2379 | |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2380 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
| 2381 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 2382 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 2383 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || |
Jason Wang | 18ba58e | 2018-11-22 14:36:31 +0800 | [diff] [blame] | 2384 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || |
| 2385 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { |
| 2386 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2387 | return -EOPNOTSUPP; |
| 2388 | } |
| 2389 | |
| 2390 | if (vi->mergeable_rx_bufs && !vi->any_header_sg) { |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 2391 | NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2392 | return -EINVAL; |
| 2393 | } |
| 2394 | |
| 2395 | if (dev->mtu > max_sz) { |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 2396 | NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2397 | netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); |
| 2398 | return -EINVAL; |
| 2399 | } |
| 2400 | |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2401 | curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; |
| 2402 | if (prog) |
| 2403 | xdp_qp = nr_cpu_ids; |
| 2404 | |
| 2405 | /* XDP requires extra queues for XDP_TX */ |
| 2406 | if (curr_qp + xdp_qp > vi->max_queue_pairs) { |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 2407 | NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2408 | netdev_warn(dev, "request %i queues but max is %i\n", |
| 2409 | curr_qp + xdp_qp, vi->max_queue_pairs); |
| 2410 | return -ENOMEM; |
| 2411 | } |
| 2412 | |
Toshiaki Makita | 03aa6d3 | 2019-01-29 09:45:57 +0900 | [diff] [blame] | 2413 | old_prog = rtnl_dereference(vi->rq[0].xdp_prog); |
| 2414 | if (!prog && !old_prog) |
| 2415 | return 0; |
| 2416 | |
Andrii Nakryiko | 85192db | 2019-11-17 09:28:03 -0800 | [diff] [blame] | 2417 | if (prog) |
| 2418 | bpf_prog_add(prog, vi->max_queue_pairs - 1); |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 2419 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 2420 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 2421 | if (netif_running(dev)) { |
| 2422 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Jason Wang | 4e09ff5 | 2018-02-28 18:20:04 +0800 | [diff] [blame] | 2423 | napi_disable(&vi->rq[i].napi); |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 2424 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
| 2425 | } |
| 2426 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2427 | |
Toshiaki Makita | 03aa6d3 | 2019-01-29 09:45:57 +0900 | [diff] [blame] | 2428 | if (!prog) { |
| 2429 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2430 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
| 2431 | if (i == 0) |
| 2432 | virtnet_restore_guest_offloads(vi); |
| 2433 | } |
| 2434 | synchronize_net(); |
| 2435 | } |
| 2436 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 2437 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
| 2438 | if (err) |
| 2439 | goto err; |
Toshiaki Makita | 188313c | 2019-01-29 09:45:55 +0900 | [diff] [blame] | 2440 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 2441 | vi->xdp_queue_pairs = xdp_qp; |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2442 | |
Toshiaki Makita | 03aa6d3 | 2019-01-29 09:45:57 +0900 | [diff] [blame] | 2443 | if (prog) { |
| 2444 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2445 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
| 2446 | if (i == 0 && !old_prog) |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2447 | virtnet_clear_guest_offloads(vi); |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2448 | } |
Toshiaki Makita | 03aa6d3 | 2019-01-29 09:45:57 +0900 | [diff] [blame] | 2449 | } |
| 2450 | |
| 2451 | for (i = 0; i < vi->max_queue_pairs; i++) { |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2452 | if (old_prog) |
| 2453 | bpf_prog_put(old_prog); |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 2454 | if (netif_running(dev)) { |
Jason Wang | 4e09ff5 | 2018-02-28 18:20:04 +0800 | [diff] [blame] | 2455 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 2456 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
| 2457 | &vi->sq[i].napi); |
| 2458 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2459 | } |
| 2460 | |
| 2461 | return 0; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 2462 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 2463 | err: |
Toshiaki Makita | 03aa6d3 | 2019-01-29 09:45:57 +0900 | [diff] [blame] | 2464 | if (!prog) { |
| 2465 | virtnet_clear_guest_offloads(vi); |
| 2466 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2467 | rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); |
| 2468 | } |
| 2469 | |
Toshiaki Makita | 8be4d9a | 2019-01-29 09:45:53 +0900 | [diff] [blame] | 2470 | if (netif_running(dev)) { |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 2471 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Toshiaki Makita | 8be4d9a | 2019-01-29 09:45:53 +0900 | [diff] [blame] | 2472 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
Toshiaki Makita | 534da5e | 2019-01-29 09:45:54 +0900 | [diff] [blame] | 2473 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
| 2474 | &vi->sq[i].napi); |
| 2475 | } |
Toshiaki Makita | 8be4d9a | 2019-01-29 09:45:53 +0900 | [diff] [blame] | 2476 | } |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 2477 | if (prog) |
| 2478 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); |
| 2479 | return err; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2480 | } |
| 2481 | |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2482 | static u32 virtnet_xdp_query(struct net_device *dev) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2483 | { |
| 2484 | struct virtnet_info *vi = netdev_priv(dev); |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2485 | const struct bpf_prog *xdp_prog; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2486 | int i; |
| 2487 | |
| 2488 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2489 | xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 2490 | if (xdp_prog) |
| 2491 | return xdp_prog->aux->id; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2492 | } |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2493 | return 0; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2494 | } |
| 2495 | |
Jakub Kicinski | f4e6352 | 2017-11-03 13:56:16 -0700 | [diff] [blame] | 2496 | static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2497 | { |
| 2498 | switch (xdp->command) { |
| 2499 | case XDP_SETUP_PROG: |
Jakub Kicinski | 9861ce0 | 2017-04-30 21:46:48 -0700 | [diff] [blame] | 2500 | return virtnet_xdp_set(dev, xdp->prog, xdp->extack); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2501 | case XDP_QUERY_PROG: |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2502 | xdp->prog_id = virtnet_xdp_query(dev); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2503 | return 0; |
| 2504 | default: |
| 2505 | return -EINVAL; |
| 2506 | } |
| 2507 | } |
| 2508 | |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 2509 | static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, |
| 2510 | size_t len) |
| 2511 | { |
| 2512 | struct virtnet_info *vi = netdev_priv(dev); |
| 2513 | int ret; |
| 2514 | |
| 2515 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) |
| 2516 | return -EOPNOTSUPP; |
| 2517 | |
| 2518 | ret = snprintf(buf, len, "sby"); |
| 2519 | if (ret >= len) |
| 2520 | return -EOPNOTSUPP; |
| 2521 | |
| 2522 | return 0; |
| 2523 | } |
| 2524 | |
Willem de Bruijn | a02e896 | 2018-12-20 17:14:54 -0500 | [diff] [blame] | 2525 | static int virtnet_set_features(struct net_device *dev, |
| 2526 | netdev_features_t features) |
| 2527 | { |
| 2528 | struct virtnet_info *vi = netdev_priv(dev); |
| 2529 | u64 offloads; |
| 2530 | int err; |
| 2531 | |
| 2532 | if ((dev->features ^ features) & NETIF_F_LRO) { |
| 2533 | if (vi->xdp_queue_pairs) |
| 2534 | return -EBUSY; |
| 2535 | |
| 2536 | if (features & NETIF_F_LRO) |
| 2537 | offloads = vi->guest_offloads_capable; |
| 2538 | else |
| 2539 | offloads = 0; |
| 2540 | |
| 2541 | err = virtnet_set_guest_offloads(vi, offloads); |
| 2542 | if (err) |
| 2543 | return err; |
| 2544 | vi->guest_offloads = offloads; |
| 2545 | } |
| 2546 | |
| 2547 | return 0; |
| 2548 | } |
| 2549 | |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 2550 | static const struct net_device_ops virtnet_netdev = { |
| 2551 | .ndo_open = virtnet_open, |
| 2552 | .ndo_stop = virtnet_close, |
| 2553 | .ndo_start_xmit = start_xmit, |
| 2554 | .ndo_validate_addr = eth_validate_addr, |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 2555 | .ndo_set_mac_address = virtnet_set_mac_address, |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 2556 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2557 | .ndo_get_stats64 = virtnet_stats, |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 2558 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 2559 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
Jakub Kicinski | f4e6352 | 2017-11-03 13:56:16 -0700 | [diff] [blame] | 2560 | .ndo_bpf = virtnet_xdp, |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 2561 | .ndo_xdp_xmit = virtnet_xdp_xmit, |
Vlad Yasevich | 2836b4f | 2017-05-23 13:38:43 -0400 | [diff] [blame] | 2562 | .ndo_features_check = passthru_features_check, |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 2563 | .ndo_get_phys_port_name = virtnet_get_phys_port_name, |
Willem de Bruijn | a02e896 | 2018-12-20 17:14:54 -0500 | [diff] [blame] | 2564 | .ndo_set_features = virtnet_set_features, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 2565 | }; |
| 2566 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2567 | static void virtnet_config_changed_work(struct work_struct *work) |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2568 | { |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2569 | struct virtnet_info *vi = |
| 2570 | container_of(work, struct virtnet_info, config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2571 | u16 v; |
| 2572 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 2573 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
| 2574 | struct virtio_net_config, status, &v) < 0) |
Michael S. Tsirkin | 507613b | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 2575 | return; |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2576 | |
| 2577 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
Amerigo Wang | ee89bab | 2012-08-09 22:14:56 +0000 | [diff] [blame] | 2578 | netdev_notify_peers(vi->dev); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2579 | virtnet_ack_link_announce(vi); |
| 2580 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2581 | |
| 2582 | /* Ignore unknown (future) status bits */ |
| 2583 | v &= VIRTIO_NET_S_LINK_UP; |
| 2584 | |
| 2585 | if (vi->status == v) |
Michael S. Tsirkin | 507613b | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 2586 | return; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2587 | |
| 2588 | vi->status = v; |
| 2589 | |
| 2590 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
Jason Baron | faa9b39 | 2018-01-05 17:44:54 -0500 | [diff] [blame] | 2591 | virtnet_update_settings(vi); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2592 | netif_carrier_on(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2593 | netif_tx_wake_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2594 | } else { |
| 2595 | netif_carrier_off(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2596 | netif_tx_stop_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2597 | } |
| 2598 | } |
| 2599 | |
| 2600 | static void virtnet_config_changed(struct virtio_device *vdev) |
| 2601 | { |
| 2602 | struct virtnet_info *vi = vdev->priv; |
| 2603 | |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 2604 | schedule_work(&vi->config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2605 | } |
| 2606 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2607 | static void virtnet_free_queues(struct virtnet_info *vi) |
| 2608 | { |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 2609 | int i; |
| 2610 | |
Jason Wang | ab3971b | 2015-03-12 13:57:44 +0800 | [diff] [blame] | 2611 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2612 | napi_hash_del(&vi->rq[i].napi); |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 2613 | netif_napi_del(&vi->rq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 2614 | netif_napi_del(&vi->sq[i].napi); |
Jason Wang | ab3971b | 2015-03-12 13:57:44 +0800 | [diff] [blame] | 2615 | } |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 2616 | |
Eric Dumazet | 963abe5 | 2016-11-15 22:24:12 -0800 | [diff] [blame] | 2617 | /* We called napi_hash_del() before netif_napi_del(), |
| 2618 | * we need to respect an RCU grace period before freeing vi->rq |
| 2619 | */ |
| 2620 | synchronize_net(); |
| 2621 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2622 | kfree(vi->rq); |
| 2623 | kfree(vi->sq); |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 2624 | kfree(vi->ctrl); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2625 | } |
| 2626 | |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 2627 | static void _free_receive_bufs(struct virtnet_info *vi) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2628 | { |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2629 | struct bpf_prog *old_prog; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2630 | int i; |
| 2631 | |
| 2632 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2633 | while (vi->rq[i].pages) |
| 2634 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2635 | |
| 2636 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 2637 | RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); |
| 2638 | if (old_prog) |
| 2639 | bpf_prog_put(old_prog); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2640 | } |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 2641 | } |
| 2642 | |
| 2643 | static void free_receive_bufs(struct virtnet_info *vi) |
| 2644 | { |
| 2645 | rtnl_lock(); |
| 2646 | _free_receive_bufs(vi); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2647 | rtnl_unlock(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2648 | } |
| 2649 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 2650 | static void free_receive_page_frags(struct virtnet_info *vi) |
| 2651 | { |
| 2652 | int i; |
| 2653 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2654 | if (vi->rq[i].alloc_frag.page) |
| 2655 | put_page(vi->rq[i].alloc_frag.page); |
| 2656 | } |
| 2657 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2658 | static void free_unused_bufs(struct virtnet_info *vi) |
| 2659 | { |
| 2660 | void *buf; |
| 2661 | int i; |
| 2662 | |
| 2663 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2664 | struct virtqueue *vq = vi->sq[i].vq; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 2665 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 2666 | if (!is_xdp_frame(buf)) |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 2667 | dev_kfree_skb(buf); |
| 2668 | else |
Toshiaki Makita | 5050471 | 2019-01-29 09:45:59 +0900 | [diff] [blame] | 2669 | xdp_return_frame(ptr_to_xdp(buf)); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 2670 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2671 | } |
| 2672 | |
| 2673 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2674 | struct virtqueue *vq = vi->rq[i].vq; |
| 2675 | |
| 2676 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 2677 | if (vi->mergeable_rx_bufs) { |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 2678 | put_page(virt_to_head_page(buf)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 2679 | } else if (vi->big_packets) { |
Andrey Vagin | fa9fac1 | 2013-12-05 18:36:20 +0400 | [diff] [blame] | 2680 | give_pages(&vi->rq[i], buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 2681 | } else { |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 2682 | put_page(virt_to_head_page(buf)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 2683 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2684 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2685 | } |
| 2686 | } |
| 2687 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2688 | static void virtnet_del_vqs(struct virtnet_info *vi) |
| 2689 | { |
| 2690 | struct virtio_device *vdev = vi->vdev; |
| 2691 | |
Peter Xu | 310974f | 2019-03-18 14:56:06 +0800 | [diff] [blame] | 2692 | virtnet_clean_affinity(vi); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2693 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2694 | vdev->config->del_vqs(vdev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2695 | |
| 2696 | virtnet_free_queues(vi); |
| 2697 | } |
| 2698 | |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 2699 | /* How large should a single buffer be so a queue full of these can fit at |
| 2700 | * least one full packet? |
| 2701 | * Logic below assumes the mergeable buffer header is used. |
| 2702 | */ |
| 2703 | static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) |
| 2704 | { |
| 2705 | const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 2706 | unsigned int rq_size = virtqueue_get_vring_size(vq); |
| 2707 | unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; |
| 2708 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; |
| 2709 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); |
| 2710 | |
Michael S. Tsirkin | f0c3192 | 2017-06-02 17:54:33 +0300 | [diff] [blame] | 2711 | return max(max(min_buf_len, hdr_len) - hdr_len, |
| 2712 | (unsigned int)GOOD_PACKET_LEN); |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 2713 | } |
| 2714 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2715 | static int virtnet_find_vqs(struct virtnet_info *vi) |
| 2716 | { |
| 2717 | vq_callback_t **callbacks; |
| 2718 | struct virtqueue **vqs; |
| 2719 | int ret = -ENOMEM; |
| 2720 | int i, total_vqs; |
| 2721 | const char **names; |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2722 | bool *ctx; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2723 | |
| 2724 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by |
| 2725 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by |
| 2726 | * possible control vq. |
| 2727 | */ |
| 2728 | total_vqs = vi->max_queue_pairs * 2 + |
| 2729 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); |
| 2730 | |
| 2731 | /* Allocate space for find_vqs parameters */ |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 2732 | vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2733 | if (!vqs) |
| 2734 | goto err_vq; |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 2735 | callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2736 | if (!callbacks) |
| 2737 | goto err_callback; |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 2738 | names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2739 | if (!names) |
| 2740 | goto err_names; |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 2741 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 2742 | ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2743 | if (!ctx) |
| 2744 | goto err_ctx; |
| 2745 | } else { |
| 2746 | ctx = NULL; |
| 2747 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2748 | |
| 2749 | /* Parameters for control virtqueue, if any */ |
| 2750 | if (vi->has_cvq) { |
| 2751 | callbacks[total_vqs - 1] = NULL; |
| 2752 | names[total_vqs - 1] = "control"; |
| 2753 | } |
| 2754 | |
| 2755 | /* Allocate/initialize parameters for send/receive virtqueues */ |
| 2756 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2757 | callbacks[rxq2vq(i)] = skb_recv_done; |
| 2758 | callbacks[txq2vq(i)] = skb_xmit_done; |
| 2759 | sprintf(vi->rq[i].name, "input.%d", i); |
| 2760 | sprintf(vi->sq[i].name, "output.%d", i); |
| 2761 | names[rxq2vq(i)] = vi->rq[i].name; |
| 2762 | names[txq2vq(i)] = vi->sq[i].name; |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2763 | if (ctx) |
| 2764 | ctx[rxq2vq(i)] = true; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2765 | } |
| 2766 | |
| 2767 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2768 | names, ctx, NULL); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2769 | if (ret) |
| 2770 | goto err_find; |
| 2771 | |
| 2772 | if (vi->has_cvq) { |
| 2773 | vi->cvq = vqs[total_vqs - 1]; |
| 2774 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 2775 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2776 | } |
| 2777 | |
| 2778 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2779 | vi->rq[i].vq = vqs[rxq2vq(i)]; |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 2780 | vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2781 | vi->sq[i].vq = vqs[txq2vq(i)]; |
| 2782 | } |
| 2783 | |
Tonghao Zhang | 2fa3c8a | 2018-05-31 07:16:32 -0700 | [diff] [blame] | 2784 | /* run here: ret == 0. */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2785 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2786 | |
| 2787 | err_find: |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2788 | kfree(ctx); |
| 2789 | err_ctx: |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2790 | kfree(names); |
| 2791 | err_names: |
| 2792 | kfree(callbacks); |
| 2793 | err_callback: |
| 2794 | kfree(vqs); |
| 2795 | err_vq: |
| 2796 | return ret; |
| 2797 | } |
| 2798 | |
| 2799 | static int virtnet_alloc_queues(struct virtnet_info *vi) |
| 2800 | { |
| 2801 | int i; |
| 2802 | |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 2803 | vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); |
| 2804 | if (!vi->ctrl) |
| 2805 | goto err_ctrl; |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 2806 | vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2807 | if (!vi->sq) |
| 2808 | goto err_sq; |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 2809 | vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); |
Amerigo Wang | 008d427 | 2012-12-10 02:24:08 +0000 | [diff] [blame] | 2810 | if (!vi->rq) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2811 | goto err_rq; |
| 2812 | |
| 2813 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
| 2814 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2815 | vi->rq[i].pages = NULL; |
| 2816 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, |
| 2817 | napi_weight); |
Willem de Bruijn | 1d11e73 | 2017-04-27 20:37:58 -0400 | [diff] [blame] | 2818 | netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, |
| 2819 | napi_tx ? napi_weight : 0); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2820 | |
| 2821 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 2822 | ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2823 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 2824 | |
| 2825 | u64_stats_init(&vi->rq[i].stats.syncp); |
| 2826 | u64_stats_init(&vi->sq[i].stats.syncp); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2827 | } |
| 2828 | |
| 2829 | return 0; |
| 2830 | |
| 2831 | err_rq: |
| 2832 | kfree(vi->sq); |
| 2833 | err_sq: |
Michael S. Tsirkin | 12e5716 | 2018-04-19 08:30:48 +0300 | [diff] [blame] | 2834 | kfree(vi->ctrl); |
| 2835 | err_ctrl: |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2836 | return -ENOMEM; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2837 | } |
| 2838 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2839 | static int init_vqs(struct virtnet_info *vi) |
| 2840 | { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2841 | int ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2842 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2843 | /* Allocate send & receive queues */ |
| 2844 | ret = virtnet_alloc_queues(vi); |
| 2845 | if (ret) |
| 2846 | goto err; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2847 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2848 | ret = virtnet_find_vqs(vi); |
| 2849 | if (ret) |
| 2850 | goto err_free; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2851 | |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 2852 | get_online_cpus(); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 2853 | virtnet_set_affinity(vi); |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 2854 | put_online_cpus(); |
| 2855 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2856 | return 0; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2857 | |
| 2858 | err_free: |
| 2859 | virtnet_free_queues(vi); |
| 2860 | err: |
| 2861 | return ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2862 | } |
| 2863 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2864 | #ifdef CONFIG_SYSFS |
| 2865 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 2866 | char *buf) |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2867 | { |
| 2868 | struct virtnet_info *vi = netdev_priv(queue->dev); |
| 2869 | unsigned int queue_index = get_netdev_rx_queue_index(queue); |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 2870 | unsigned int headroom = virtnet_get_headroom(vi); |
| 2871 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 2872 | struct ewma_pkt_len *avg; |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2873 | |
| 2874 | BUG_ON(queue_index >= vi->max_queue_pairs); |
| 2875 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 2876 | return sprintf(buf, "%u\n", |
Jason Wang | 3cc81a9 | 2018-03-02 17:29:14 +0800 | [diff] [blame] | 2877 | get_mergeable_buf_len(&vi->rq[queue_index], avg, |
| 2878 | SKB_DATA_ALIGN(headroom + tailroom))); |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2879 | } |
| 2880 | |
| 2881 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = |
| 2882 | __ATTR_RO(mergeable_rx_buffer_size); |
| 2883 | |
| 2884 | static struct attribute *virtio_net_mrg_rx_attrs[] = { |
| 2885 | &mergeable_rx_buffer_size_attribute.attr, |
| 2886 | NULL |
| 2887 | }; |
| 2888 | |
| 2889 | static const struct attribute_group virtio_net_mrg_rx_group = { |
| 2890 | .name = "virtio_net", |
| 2891 | .attrs = virtio_net_mrg_rx_attrs |
| 2892 | }; |
| 2893 | #endif |
| 2894 | |
Jason Wang | 892d6eb | 2014-11-20 17:03:05 +0800 | [diff] [blame] | 2895 | static bool virtnet_fail_on_feature(struct virtio_device *vdev, |
| 2896 | unsigned int fbit, |
| 2897 | const char *fname, const char *dname) |
| 2898 | { |
| 2899 | if (!virtio_has_feature(vdev, fbit)) |
| 2900 | return false; |
| 2901 | |
| 2902 | dev_err(&vdev->dev, "device advertises feature %s but not %s", |
| 2903 | fname, dname); |
| 2904 | |
| 2905 | return true; |
| 2906 | } |
| 2907 | |
| 2908 | #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ |
| 2909 | virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) |
| 2910 | |
| 2911 | static bool virtnet_validate_features(struct virtio_device *vdev) |
| 2912 | { |
| 2913 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && |
| 2914 | (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, |
| 2915 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2916 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, |
| 2917 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2918 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, |
| 2919 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2920 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || |
| 2921 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, |
| 2922 | "VIRTIO_NET_F_CTRL_VQ"))) { |
| 2923 | return false; |
| 2924 | } |
| 2925 | |
| 2926 | return true; |
| 2927 | } |
| 2928 | |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 2929 | #define MIN_MTU ETH_MIN_MTU |
| 2930 | #define MAX_MTU ETH_MAX_MTU |
| 2931 | |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 2932 | static int virtnet_validate(struct virtio_device *vdev) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2933 | { |
Michael S. Tsirkin | 6ba4224 | 2015-01-12 16:23:37 +0200 | [diff] [blame] | 2934 | if (!vdev->config->get) { |
| 2935 | dev_err(&vdev->dev, "%s failure: config access disabled\n", |
| 2936 | __func__); |
| 2937 | return -EINVAL; |
| 2938 | } |
| 2939 | |
Jason Wang | 892d6eb | 2014-11-20 17:03:05 +0800 | [diff] [blame] | 2940 | if (!virtnet_validate_features(vdev)) |
| 2941 | return -EINVAL; |
| 2942 | |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 2943 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
| 2944 | int mtu = virtio_cread16(vdev, |
| 2945 | offsetof(struct virtio_net_config, |
| 2946 | mtu)); |
| 2947 | if (mtu < MIN_MTU) |
| 2948 | __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); |
| 2949 | } |
| 2950 | |
| 2951 | return 0; |
| 2952 | } |
| 2953 | |
| 2954 | static int virtnet_probe(struct virtio_device *vdev) |
| 2955 | { |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 2956 | int i, err = -ENOMEM; |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 2957 | struct net_device *dev; |
| 2958 | struct virtnet_info *vi; |
| 2959 | u16 max_queue_pairs; |
| 2960 | int mtu; |
| 2961 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2962 | /* Find if host supports multiqueue virtio_net device */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 2963 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
| 2964 | struct virtio_net_config, |
| 2965 | max_virtqueue_pairs, &max_queue_pairs); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2966 | |
| 2967 | /* We need at least 2 queue's */ |
| 2968 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
| 2969 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || |
| 2970 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 2971 | max_queue_pairs = 1; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2972 | |
| 2973 | /* Allocate ourselves a network device with room for our info */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2974 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2975 | if (!dev) |
| 2976 | return -ENOMEM; |
| 2977 | |
| 2978 | /* Set up network device as normal. */ |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 2979 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 2980 | dev->netdev_ops = &virtnet_netdev; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2981 | dev->features = NETIF_F_HIGHDMA; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2982 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 2983 | dev->ethtool_ops = &virtnet_ethtool_ops; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2984 | SET_NETDEV_DEV(dev, &vdev->dev); |
| 2985 | |
| 2986 | /* Do we support "hardware" checksums? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2987 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2988 | /* This opens up the world of extra features. */ |
Jason Wang | 48900cb | 2015-08-05 10:34:04 +0800 | [diff] [blame] | 2989 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2990 | if (csum) |
Jason Wang | 48900cb | 2015-08-05 10:34:04 +0800 | [diff] [blame] | 2991 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2992 | |
| 2993 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
David S. Miller | e078de0 | 2017-07-03 06:37:32 -0700 | [diff] [blame] | 2994 | dev->hw_features |= NETIF_F_TSO |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 2995 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
| 2996 | } |
Rusty Russell | 5539ae96 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 2997 | /* Individual feature bits: what can host handle? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2998 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
| 2999 | dev->hw_features |= NETIF_F_TSO; |
| 3000 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
| 3001 | dev->hw_features |= NETIF_F_TSO6; |
| 3002 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
| 3003 | dev->hw_features |= NETIF_F_TSO_ECN; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 3004 | |
Jason Wang | 41f2f12 | 2014-12-24 11:03:52 +0800 | [diff] [blame] | 3005 | dev->features |= NETIF_F_GSO_ROBUST; |
| 3006 | |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 3007 | if (gso) |
David S. Miller | e078de0 | 2017-07-03 06:37:32 -0700 | [diff] [blame] | 3008 | dev->features |= dev->hw_features & NETIF_F_ALL_TSO; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 3009 | /* (!csum && gso) case will be fixed by register_netdev() */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3010 | } |
Thomas Huth | 4f49129 | 2013-08-27 17:09:02 +0200 | [diff] [blame] | 3011 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
| 3012 | dev->features |= NETIF_F_RXCSUM; |
Willem de Bruijn | a02e896 | 2018-12-20 17:14:54 -0500 | [diff] [blame] | 3013 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 3014 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) |
| 3015 | dev->features |= NETIF_F_LRO; |
| 3016 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) |
| 3017 | dev->hw_features |= NETIF_F_LRO; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3018 | |
Jason Wang | 4fda830 | 2013-04-10 23:32:21 +0000 | [diff] [blame] | 3019 | dev->vlan_features = dev->features; |
| 3020 | |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 3021 | /* MTU range: 68 - 65535 */ |
| 3022 | dev->min_mtu = MIN_MTU; |
| 3023 | dev->max_mtu = MAX_MTU; |
| 3024 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3025 | /* Configuration may specify what MAC to use. Otherwise random. */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 3026 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
| 3027 | virtio_cread_bytes(vdev, |
| 3028 | offsetof(struct virtio_net_config, mac), |
| 3029 | dev->dev_addr, dev->addr_len); |
| 3030 | else |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 3031 | eth_hw_addr_random(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3032 | |
| 3033 | /* Set up our device-specific information */ |
| 3034 | vi = netdev_priv(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3035 | vi->dev = dev; |
| 3036 | vi->vdev = vdev; |
Christian Borntraeger | d9d5dcc | 2008-02-18 10:02:51 +0100 | [diff] [blame] | 3037 | vdev->priv = vi; |
John Stultz | 827da44 | 2013-10-07 15:51:58 -0700 | [diff] [blame] | 3038 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 3039 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3040 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 3041 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 3042 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 3043 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 3044 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 3045 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 3046 | vi->big_packets = true; |
| 3047 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 3048 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 3049 | vi->mergeable_rx_bufs = true; |
| 3050 | |
Michael S. Tsirkin | d04302b | 2014-10-24 00:24:03 +0300 | [diff] [blame] | 3051 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || |
| 3052 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 3053 | vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 3054 | else |
| 3055 | vi->hdr_len = sizeof(struct virtio_net_hdr); |
| 3056 | |
Michael S. Tsirkin | 7599330 | 2015-07-15 15:26:19 +0300 | [diff] [blame] | 3057 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
| 3058 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 3059 | vi->any_header_sg = true; |
| 3060 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 3061 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 3062 | vi->has_cvq = true; |
| 3063 | |
Aaron Conole | 14de9d1 | 2016-06-03 16:57:12 -0400 | [diff] [blame] | 3064 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
| 3065 | mtu = virtio_cread16(vdev, |
| 3066 | offsetof(struct virtio_net_config, |
| 3067 | mtu)); |
Aaron Conole | 93a205e | 2016-10-25 16:12:12 -0400 | [diff] [blame] | 3068 | if (mtu < dev->min_mtu) { |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 3069 | /* Should never trigger: MTU was previously validated |
| 3070 | * in virtnet_validate. |
| 3071 | */ |
Yuval Shaia | 7934b48 | 2019-04-03 12:10:13 +0300 | [diff] [blame] | 3072 | dev_err(&vdev->dev, |
| 3073 | "device MTU appears to have changed it is now %d < %d", |
| 3074 | mtu, dev->min_mtu); |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 3075 | goto free; |
Aaron Conole | 93a205e | 2016-10-25 16:12:12 -0400 | [diff] [blame] | 3076 | } |
Michael S. Tsirkin | 2e123b4 | 2017-03-08 02:14:25 +0200 | [diff] [blame] | 3077 | |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 3078 | dev->mtu = mtu; |
| 3079 | dev->max_mtu = mtu; |
| 3080 | |
Michael S. Tsirkin | 2e123b4 | 2017-03-08 02:14:25 +0200 | [diff] [blame] | 3081 | /* TODO: size buffers correctly in this case. */ |
| 3082 | if (dev->mtu > ETH_DATA_LEN) |
| 3083 | vi->big_packets = true; |
Aaron Conole | 14de9d1 | 2016-06-03 16:57:12 -0400 | [diff] [blame] | 3084 | } |
| 3085 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 3086 | if (vi->any_header_sg) |
| 3087 | dev->needed_headroom = vi->hdr_len; |
Zhangjie \(HZ\) | 6ebbc1a | 2014-04-29 18:43:22 +0800 | [diff] [blame] | 3088 | |
Jason Wang | 4490001 | 2016-11-25 12:37:26 +0800 | [diff] [blame] | 3089 | /* Enable multiqueue by default */ |
| 3090 | if (num_online_cpus() >= max_queue_pairs) |
| 3091 | vi->curr_queue_pairs = max_queue_pairs; |
| 3092 | else |
| 3093 | vi->curr_queue_pairs = num_online_cpus(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 3094 | vi->max_queue_pairs = max_queue_pairs; |
| 3095 | |
| 3096 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 3097 | err = init_vqs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 3098 | if (err) |
Toshiaki Makita | d7dfc5c | 2018-01-17 15:38:25 +0900 | [diff] [blame] | 3099 | goto free; |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 3100 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 3101 | #ifdef CONFIG_SYSFS |
| 3102 | if (vi->mergeable_rx_bufs) |
| 3103 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; |
| 3104 | #endif |
Zhi Yong Wu | 0f13b66b | 2013-11-18 21:19:27 +0800 | [diff] [blame] | 3105 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
| 3106 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 3107 | |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 3108 | virtnet_init_settings(dev); |
| 3109 | |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 3110 | if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { |
| 3111 | vi->failover = net_failover_create(vi->dev); |
Wei Yongjun | 4b8e6ac | 2018-05-31 02:05:07 +0000 | [diff] [blame] | 3112 | if (IS_ERR(vi->failover)) { |
| 3113 | err = PTR_ERR(vi->failover); |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 3114 | goto free_vqs; |
Wei Yongjun | 4b8e6ac | 2018-05-31 02:05:07 +0000 | [diff] [blame] | 3115 | } |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 3116 | } |
| 3117 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3118 | err = register_netdev(dev); |
| 3119 | if (err) { |
| 3120 | pr_debug("virtio_net: registering device failed\n"); |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 3121 | goto free_failover; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3122 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 3123 | |
Michael S. Tsirkin | 4baf1e3 | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 3124 | virtio_device_ready(vdev); |
| 3125 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3126 | err = virtnet_cpu_notif_add(vi); |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 3127 | if (err) { |
| 3128 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
wangyunjian | f00e35e | 2016-05-31 11:52:43 +0800 | [diff] [blame] | 3129 | goto free_unregister_netdev; |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 3130 | } |
| 3131 | |
Jason Wang | a220871 | 2016-12-13 14:23:05 +0800 | [diff] [blame] | 3132 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
Jason Wang | 4490001 | 2016-11-25 12:37:26 +0800 | [diff] [blame] | 3133 | |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 3134 | /* Assume link up if device can't report link status, |
| 3135 | otherwise get link status from config. */ |
Jay Vosburgh | bda7fab | 2018-03-22 14:42:41 +0000 | [diff] [blame] | 3136 | netif_carrier_off(dev); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 3137 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 3138 | schedule_work(&vi->config_work); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 3139 | } else { |
| 3140 | vi->status = VIRTIO_NET_S_LINK_UP; |
Jason Baron | faa9b39 | 2018-01-05 17:44:54 -0500 | [diff] [blame] | 3141 | virtnet_update_settings(vi); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 3142 | netif_carrier_on(dev); |
| 3143 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 3144 | |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 3145 | for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) |
| 3146 | if (virtio_has_feature(vi->vdev, guest_offloads[i])) |
| 3147 | set_bit(guest_offloads[i], &vi->guest_offloads); |
Willem de Bruijn | a02e896 | 2018-12-20 17:14:54 -0500 | [diff] [blame] | 3148 | vi->guest_offloads_capable = vi->guest_offloads; |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 3149 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 3150 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
| 3151 | dev->name, max_queue_pairs); |
| 3152 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3153 | return 0; |
| 3154 | |
wangyunjian | f00e35e | 2016-05-31 11:52:43 +0800 | [diff] [blame] | 3155 | free_unregister_netdev: |
Michael S. Tsirkin | 0246555 | 2014-10-15 10:22:31 +1030 | [diff] [blame] | 3156 | vi->vdev->config->reset(vdev); |
| 3157 | |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 3158 | unregister_netdev(dev); |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 3159 | free_failover: |
| 3160 | net_failover_destroy(vi->failover); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 3161 | free_vqs: |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 3162 | cancel_delayed_work_sync(&vi->refill); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 3163 | free_receive_page_frags(vi); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 3164 | virtnet_del_vqs(vi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3165 | free: |
| 3166 | free_netdev(dev); |
| 3167 | return err; |
| 3168 | } |
| 3169 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 3170 | static void remove_vq_common(struct virtnet_info *vi) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3171 | { |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 3172 | vi->vdev->config->reset(vi->vdev); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 3173 | |
| 3174 | /* Free unused buffers in both send and recv, if any. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 3175 | free_unused_bufs(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 3176 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 3177 | free_receive_bufs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 3178 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 3179 | free_receive_page_frags(vi); |
| 3180 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 3181 | virtnet_del_vqs(vi); |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 3182 | } |
| 3183 | |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 3184 | static void virtnet_remove(struct virtio_device *vdev) |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 3185 | { |
| 3186 | struct virtnet_info *vi = vdev->priv; |
| 3187 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3188 | virtnet_cpu_notif_remove(vi); |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 3189 | |
Michael S. Tsirkin | 102a278 | 2014-10-15 10:22:29 +1030 | [diff] [blame] | 3190 | /* Make sure no work handler is accessing the device. */ |
| 3191 | flush_work(&vi->config_work); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 3192 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 3193 | unregister_netdev(vi->dev); |
| 3194 | |
Sridhar Samudrala | ba5e442 | 2018-05-24 09:55:17 -0700 | [diff] [blame] | 3195 | net_failover_destroy(vi->failover); |
| 3196 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 3197 | remove_vq_common(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 3198 | |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 3199 | free_netdev(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3200 | } |
| 3201 | |
Arnd Bergmann | 67a7519 | 2017-07-25 17:35:50 +0200 | [diff] [blame] | 3202 | static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 3203 | { |
| 3204 | struct virtnet_info *vi = vdev->priv; |
| 3205 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3206 | virtnet_cpu_notif_remove(vi); |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 3207 | virtnet_freeze_down(vdev); |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 3208 | remove_vq_common(vi); |
| 3209 | |
| 3210 | return 0; |
| 3211 | } |
| 3212 | |
Arnd Bergmann | 67a7519 | 2017-07-25 17:35:50 +0200 | [diff] [blame] | 3213 | static __maybe_unused int virtnet_restore(struct virtio_device *vdev) |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 3214 | { |
| 3215 | struct virtnet_info *vi = vdev->priv; |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 3216 | int err; |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 3217 | |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 3218 | err = virtnet_restore_up(vdev); |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 3219 | if (err) |
| 3220 | return err; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 3221 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
| 3222 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3223 | err = virtnet_cpu_notif_add(vi); |
Jason Wang | ec9debb | 2013-10-29 15:11:07 +0800 | [diff] [blame] | 3224 | if (err) |
| 3225 | return err; |
| 3226 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 3227 | return 0; |
| 3228 | } |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 3229 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3230 | static struct virtio_device_id id_table[] = { |
| 3231 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
| 3232 | { 0 }, |
| 3233 | }; |
| 3234 | |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 3235 | #define VIRTNET_FEATURES \ |
| 3236 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ |
| 3237 | VIRTIO_NET_F_MAC, \ |
| 3238 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ |
| 3239 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ |
| 3240 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ |
| 3241 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ |
| 3242 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ |
| 3243 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ |
| 3244 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ |
Jason Baron | faa9b39 | 2018-01-05 17:44:54 -0500 | [diff] [blame] | 3245 | VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ |
Sridhar Samudrala | 9805069 | 2018-05-24 09:55:16 -0700 | [diff] [blame] | 3246 | VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 3247 | |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 3248 | static unsigned int features[] = { |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 3249 | VIRTNET_FEATURES, |
| 3250 | }; |
| 3251 | |
| 3252 | static unsigned int features_legacy[] = { |
| 3253 | VIRTNET_FEATURES, |
| 3254 | VIRTIO_NET_F_GSO, |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 3255 | VIRTIO_F_ANY_LAYOUT, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 3256 | }; |
| 3257 | |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 3258 | static struct virtio_driver virtio_net_driver = { |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 3259 | .feature_table = features, |
| 3260 | .feature_table_size = ARRAY_SIZE(features), |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 3261 | .feature_table_legacy = features_legacy, |
| 3262 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3263 | .driver.name = KBUILD_MODNAME, |
| 3264 | .driver.owner = THIS_MODULE, |
| 3265 | .id_table = id_table, |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 3266 | .validate = virtnet_validate, |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3267 | .probe = virtnet_probe, |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 3268 | .remove = virtnet_remove, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 3269 | .config_changed = virtnet_config_changed, |
Aaron Lu | 8910700 | 2013-09-17 09:25:23 +0930 | [diff] [blame] | 3270 | #ifdef CONFIG_PM_SLEEP |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 3271 | .freeze = virtnet_freeze, |
| 3272 | .restore = virtnet_restore, |
| 3273 | #endif |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3274 | }; |
| 3275 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3276 | static __init int virtio_net_driver_init(void) |
| 3277 | { |
| 3278 | int ret; |
| 3279 | |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 3280 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3281 | virtnet_cpu_online, |
| 3282 | virtnet_cpu_down_prep); |
| 3283 | if (ret < 0) |
| 3284 | goto out; |
| 3285 | virtionet_online = ret; |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 3286 | ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3287 | NULL, virtnet_cpu_dead); |
| 3288 | if (ret) |
| 3289 | goto err_dead; |
| 3290 | |
| 3291 | ret = register_virtio_driver(&virtio_net_driver); |
| 3292 | if (ret) |
| 3293 | goto err_virtio; |
| 3294 | return 0; |
| 3295 | err_virtio: |
| 3296 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 3297 | err_dead: |
| 3298 | cpuhp_remove_multi_state(virtionet_online); |
| 3299 | out: |
| 3300 | return ret; |
| 3301 | } |
| 3302 | module_init(virtio_net_driver_init); |
| 3303 | |
| 3304 | static __exit void virtio_net_driver_exit(void) |
| 3305 | { |
Andrew Jones | cfa0ebc | 2017-07-24 15:38:32 +0200 | [diff] [blame] | 3306 | unregister_virtio_driver(&virtio_net_driver); |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3307 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 3308 | cpuhp_remove_multi_state(virtionet_online); |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 3309 | } |
| 3310 | module_exit(virtio_net_driver_exit); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 3311 | |
| 3312 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 3313 | MODULE_DESCRIPTION("Virtio network driver"); |
| 3314 | MODULE_LICENSE("GPL"); |