Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1 | /* A network driver using virtio. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2 | * |
| 3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
Jeff Kirsher | adf8d3f | 2013-12-06 06:28:47 -0800 | [diff] [blame] | 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 17 | */ |
| 18 | //#define DEBUG |
| 19 | #include <linux/netdevice.h> |
| 20 | #include <linux/etherdevice.h> |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 21 | #include <linux/ethtool.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 22 | #include <linux/module.h> |
| 23 | #include <linux/virtio.h> |
| 24 | #include <linux/virtio_net.h> |
| 25 | #include <linux/scatterlist.h> |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 26 | #include <linux/if_vlan.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 28 | #include <linux/cpu.h> |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 29 | #include <linux/average.h> |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 30 | #include <net/busy_poll.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 31 | |
Amerigo Wang | d34710e | 2013-05-09 19:50:51 +0000 | [diff] [blame] | 32 | static int napi_weight = NAPI_POLL_WEIGHT; |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 33 | module_param(napi_weight, int, 0444); |
| 34 | |
Rusty Russell | eb93992 | 2011-12-19 14:08:01 +0000 | [diff] [blame] | 35 | static bool csum = true, gso = true; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 36 | module_param(csum, bool, 0444); |
| 37 | module_param(gso, bool, 0444); |
| 38 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 39 | /* FIXME: MTU in config. */ |
Michael Dalton | 5061de3 | 2013-11-14 10:41:04 -0800 | [diff] [blame] | 40 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 41 | #define GOOD_COPY_LEN 128 |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 42 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 43 | /* Weight used for the RX packet size EWMA. The average packet size is used to |
| 44 | * determine the packet buffer size when refilling RX rings. As the entire RX |
| 45 | * ring may be refilled at once, the weight is chosen so that the EWMA will be |
| 46 | * insensitive to short-term, transient changes in packet size. |
| 47 | */ |
| 48 | #define RECEIVE_AVG_WEIGHT 64 |
| 49 | |
| 50 | /* Minimum alignment for mergeable packet buffers. */ |
| 51 | #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) |
| 52 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 53 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 54 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 55 | struct virtnet_stats { |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 56 | struct u64_stats_sync tx_syncp; |
| 57 | struct u64_stats_sync rx_syncp; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 58 | u64 tx_bytes; |
| 59 | u64 tx_packets; |
| 60 | |
| 61 | u64 rx_bytes; |
| 62 | u64 rx_packets; |
| 63 | }; |
| 64 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 65 | /* Internal representation of a send virtqueue */ |
| 66 | struct send_queue { |
| 67 | /* Virtqueue associated with this send _queue */ |
| 68 | struct virtqueue *vq; |
| 69 | |
| 70 | /* TX: fragments + linear part + virtio header */ |
| 71 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 72 | |
| 73 | /* Name of the send queue: output.$index */ |
| 74 | char name[40]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 75 | }; |
| 76 | |
| 77 | /* Internal representation of a receive virtqueue */ |
| 78 | struct receive_queue { |
| 79 | /* Virtqueue associated with this receive_queue */ |
| 80 | struct virtqueue *vq; |
| 81 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 82 | struct napi_struct napi; |
| 83 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 84 | /* Chain pages by the private ptr. */ |
| 85 | struct page *pages; |
| 86 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 87 | /* Average packet length for mergeable receive buffers. */ |
| 88 | struct ewma mrg_avg_pkt_len; |
| 89 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 90 | /* Page frag for packet buffer allocation. */ |
| 91 | struct page_frag alloc_frag; |
| 92 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 93 | /* RX: fragments + linear part + virtio header */ |
| 94 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 95 | |
| 96 | /* Name of this receive queue: input.$index */ |
| 97 | char name[40]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 98 | }; |
| 99 | |
| 100 | struct virtnet_info { |
| 101 | struct virtio_device *vdev; |
| 102 | struct virtqueue *cvq; |
| 103 | struct net_device *dev; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 104 | struct send_queue *sq; |
| 105 | struct receive_queue *rq; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 106 | unsigned int status; |
| 107 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 108 | /* Max # of queue pairs supported by the device */ |
| 109 | u16 max_queue_pairs; |
| 110 | |
| 111 | /* # of queue pairs currently used by the driver */ |
| 112 | u16 curr_queue_pairs; |
| 113 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 114 | /* I like... big packets and I cannot lie! */ |
| 115 | bool big_packets; |
| 116 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 117 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
| 118 | bool mergeable_rx_bufs; |
| 119 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 120 | /* Has control virtqueue */ |
| 121 | bool has_cvq; |
| 122 | |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 123 | /* Host can handle any s/g split between our header and packet data */ |
| 124 | bool any_header_sg; |
| 125 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 126 | /* Active statistics */ |
| 127 | struct virtnet_stats __percpu *stats; |
| 128 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 129 | /* Work struct for refilling if we run low on memory. */ |
| 130 | struct delayed_work refill; |
| 131 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 132 | /* Work struct for config space updates */ |
| 133 | struct work_struct config_work; |
| 134 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 135 | /* Does the affinity hint is set for virtqueues? */ |
| 136 | bool affinity_hint_set; |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 137 | |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 138 | /* CPU hot plug notifier */ |
| 139 | struct notifier_block nb; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 140 | }; |
| 141 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 142 | struct skb_vnet_hdr { |
| 143 | union { |
| 144 | struct virtio_net_hdr hdr; |
| 145 | struct virtio_net_hdr_mrg_rxbuf mhdr; |
| 146 | }; |
| 147 | }; |
| 148 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 149 | struct padded_vnet_hdr { |
| 150 | struct virtio_net_hdr hdr; |
| 151 | /* |
| 152 | * virtio_net_hdr should be in a separated sg buffer because of a |
| 153 | * QEMU bug, and data sg buffer shares same page with this header sg. |
| 154 | * This padding makes next sg 16 byte aligned after virtio_net_hdr. |
| 155 | */ |
| 156 | char padding[6]; |
| 157 | }; |
| 158 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 159 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
| 160 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
| 161 | */ |
| 162 | static int vq2txq(struct virtqueue *vq) |
| 163 | { |
Rusty Russell | 9d0ca6e | 2013-03-21 14:17:34 +0000 | [diff] [blame] | 164 | return (vq->index - 1) / 2; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | static int txq2vq(int txq) |
| 168 | { |
| 169 | return txq * 2 + 1; |
| 170 | } |
| 171 | |
| 172 | static int vq2rxq(struct virtqueue *vq) |
| 173 | { |
Rusty Russell | 9d0ca6e | 2013-03-21 14:17:34 +0000 | [diff] [blame] | 174 | return vq->index / 2; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | static int rxq2vq(int rxq) |
| 178 | { |
| 179 | return rxq * 2; |
| 180 | } |
| 181 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 182 | static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 183 | { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 184 | return (struct skb_vnet_hdr *)skb->cb; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 185 | } |
| 186 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 187 | /* |
| 188 | * private is used to chain pages for big packets, put the whole |
| 189 | * most recent used list in the beginning for reuse |
| 190 | */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 191 | static void give_pages(struct receive_queue *rq, struct page *page) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 192 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 193 | struct page *end; |
| 194 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 195 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 196 | for (end = page; end->private; end = (struct page *)end->private); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 197 | end->private = (unsigned long)rq->pages; |
| 198 | rq->pages = page; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 199 | } |
| 200 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 201 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 202 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 203 | struct page *p = rq->pages; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 204 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 205 | if (p) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 206 | rq->pages = (struct page *)p->private; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 207 | /* clear private here, it is used to chain pages */ |
| 208 | p->private = 0; |
| 209 | } else |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 210 | p = alloc_page(gfp_mask); |
| 211 | return p; |
| 212 | } |
| 213 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 214 | static void skb_xmit_done(struct virtqueue *vq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 215 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 216 | struct virtnet_info *vi = vq->vdev->priv; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 217 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 218 | /* Suppress further interrupts. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 219 | virtqueue_disable_cb(vq); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 220 | |
Rusty Russell | 363f151 | 2008-06-08 20:51:55 +1000 | [diff] [blame] | 221 | /* We were probably waiting for more output buffers. */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 222 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 223 | } |
| 224 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 225 | static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) |
| 226 | { |
| 227 | unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); |
| 228 | return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; |
| 229 | } |
| 230 | |
| 231 | static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) |
| 232 | { |
| 233 | return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); |
| 234 | |
| 235 | } |
| 236 | |
| 237 | static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) |
| 238 | { |
| 239 | unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; |
| 240 | return (unsigned long)buf | (size - 1); |
| 241 | } |
| 242 | |
Mike Waychison | 3464645 | 2012-01-04 12:52:32 +0000 | [diff] [blame] | 243 | /* Called from bottom half context */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 244 | static struct sk_buff *page_to_skb(struct receive_queue *rq, |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 245 | struct page *page, unsigned int offset, |
| 246 | unsigned int len, unsigned int truesize) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 247 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 248 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 249 | struct sk_buff *skb; |
| 250 | struct skb_vnet_hdr *hdr; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 251 | unsigned int copy, hdr_len, hdr_padded_len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 252 | char *p; |
| 253 | |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 254 | p = page_address(page) + offset; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 255 | |
| 256 | /* copy small packet so we can reuse these pages for small data */ |
| 257 | skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); |
| 258 | if (unlikely(!skb)) |
| 259 | return NULL; |
| 260 | |
| 261 | hdr = skb_vnet_hdr(skb); |
| 262 | |
| 263 | if (vi->mergeable_rx_bufs) { |
| 264 | hdr_len = sizeof hdr->mhdr; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 265 | hdr_padded_len = sizeof hdr->mhdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 266 | } else { |
| 267 | hdr_len = sizeof hdr->hdr; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 268 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | memcpy(hdr, p, hdr_len); |
| 272 | |
| 273 | len -= hdr_len; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 274 | offset += hdr_padded_len; |
| 275 | p += hdr_padded_len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 276 | |
| 277 | copy = len; |
| 278 | if (copy > skb_tailroom(skb)) |
| 279 | copy = skb_tailroom(skb); |
| 280 | memcpy(skb_put(skb, copy), p, copy); |
| 281 | |
| 282 | len -= copy; |
| 283 | offset += copy; |
| 284 | |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 285 | if (vi->mergeable_rx_bufs) { |
| 286 | if (len) |
| 287 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); |
| 288 | else |
| 289 | put_page(page); |
| 290 | return skb; |
| 291 | } |
| 292 | |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 293 | /* |
| 294 | * Verify that we can indeed put this data into a skb. |
| 295 | * This is here to handle cases when the device erroneously |
| 296 | * tries to receive more than is possible. This is usually |
| 297 | * the case of a broken device. |
| 298 | */ |
| 299 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 300 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 301 | dev_kfree_skb(skb); |
| 302 | return NULL; |
| 303 | } |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 304 | BUG_ON(offset >= PAGE_SIZE); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 305 | while (len) { |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 306 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
| 307 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, |
| 308 | frag_size, truesize); |
| 309 | len -= frag_size; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 310 | page = (struct page *)page->private; |
| 311 | offset = 0; |
| 312 | } |
| 313 | |
| 314 | if (page) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 315 | give_pages(rq, page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 316 | |
| 317 | return skb; |
| 318 | } |
| 319 | |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 320 | static struct sk_buff *receive_small(void *buf, unsigned int len) |
| 321 | { |
| 322 | struct sk_buff * skb = buf; |
| 323 | |
| 324 | len -= sizeof(struct virtio_net_hdr); |
| 325 | skb_trim(skb, len); |
| 326 | |
| 327 | return skb; |
| 328 | } |
| 329 | |
| 330 | static struct sk_buff *receive_big(struct net_device *dev, |
| 331 | struct receive_queue *rq, |
| 332 | void *buf, |
| 333 | unsigned int len) |
| 334 | { |
| 335 | struct page *page = buf; |
| 336 | struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); |
| 337 | |
| 338 | if (unlikely(!skb)) |
| 339 | goto err; |
| 340 | |
| 341 | return skb; |
| 342 | |
| 343 | err: |
| 344 | dev->stats.rx_dropped++; |
| 345 | give_pages(rq, page); |
| 346 | return NULL; |
| 347 | } |
| 348 | |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 349 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
| 350 | struct receive_queue *rq, |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 351 | unsigned long ctx, |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 352 | unsigned int len) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 353 | { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 354 | void *buf = mergeable_ctx_to_buf_address(ctx); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 355 | struct skb_vnet_hdr *hdr = buf; |
| 356 | int num_buf = hdr->mhdr.num_buffers; |
| 357 | struct page *page = virt_to_head_page(buf); |
| 358 | int offset = buf - page_address(page); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 359 | unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
| 360 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 361 | struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize); |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 362 | struct sk_buff *curr_skb = head_skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 363 | |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 364 | if (unlikely(!curr_skb)) |
| 365 | goto err_skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 366 | while (--num_buf) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 367 | int num_skb_frags; |
| 368 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 369 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); |
| 370 | if (unlikely(!ctx)) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 371 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
| 372 | dev->name, num_buf, hdr->mhdr.num_buffers); |
| 373 | dev->stats.rx_length_errors++; |
| 374 | goto err_buf; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 375 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 376 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 377 | buf = mergeable_ctx_to_buf_address(ctx); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 378 | page = virt_to_head_page(buf); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 379 | |
| 380 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 381 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
| 382 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 383 | |
| 384 | if (unlikely(!nskb)) |
| 385 | goto err_skb; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 386 | if (curr_skb == head_skb) |
| 387 | skb_shinfo(curr_skb)->frag_list = nskb; |
| 388 | else |
| 389 | curr_skb->next = nskb; |
| 390 | curr_skb = nskb; |
| 391 | head_skb->truesize += nskb->truesize; |
| 392 | num_skb_frags = 0; |
| 393 | } |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 394 | truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 395 | if (curr_skb != head_skb) { |
| 396 | head_skb->data_len += len; |
| 397 | head_skb->len += len; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 398 | head_skb->truesize += truesize; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 399 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 400 | offset = buf - page_address(page); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 401 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
| 402 | put_page(page); |
| 403 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 404 | len, truesize); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 405 | } else { |
| 406 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 407 | offset, len, truesize); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 408 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 409 | } |
| 410 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 411 | ewma_add(&rq->mrg_avg_pkt_len, head_skb->len); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 412 | return head_skb; |
| 413 | |
| 414 | err_skb: |
| 415 | put_page(page); |
| 416 | while (--num_buf) { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 417 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); |
| 418 | if (unlikely(!ctx)) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 419 | pr_debug("%s: rx error: %d buffers missing\n", |
| 420 | dev->name, num_buf); |
| 421 | dev->stats.rx_length_errors++; |
| 422 | break; |
| 423 | } |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 424 | page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 425 | put_page(page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 426 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 427 | err_buf: |
| 428 | dev->stats.rx_dropped++; |
| 429 | dev_kfree_skb(head_skb); |
| 430 | return NULL; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 431 | } |
| 432 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 433 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 434 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 435 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 436 | struct net_device *dev = vi->dev; |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 437 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 438 | struct sk_buff *skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 439 | struct skb_vnet_hdr *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 440 | |
| 441 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
| 442 | pr_debug("%s: short packet %i\n", dev->name, len); |
| 443 | dev->stats.rx_length_errors++; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 444 | if (vi->mergeable_rx_bufs) { |
| 445 | unsigned long ctx = (unsigned long)buf; |
| 446 | void *base = mergeable_ctx_to_buf_address(ctx); |
| 447 | put_page(virt_to_head_page(base)); |
| 448 | } else if (vi->big_packets) { |
Michael Dalton | 98bfd23 | 2013-12-05 13:14:05 -0800 | [diff] [blame] | 449 | give_pages(rq, buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 450 | } else { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 451 | dev_kfree_skb(buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 452 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 453 | return; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 454 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 455 | |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 456 | if (vi->mergeable_rx_bufs) |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 457 | skb = receive_mergeable(dev, rq, (unsigned long)buf, len); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 458 | else if (vi->big_packets) |
| 459 | skb = receive_big(dev, rq, buf, len); |
| 460 | else |
| 461 | skb = receive_small(buf, len); |
| 462 | |
| 463 | if (unlikely(!skb)) |
| 464 | return; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 465 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 466 | hdr = skb_vnet_hdr(skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 467 | |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 468 | u64_stats_update_begin(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 469 | stats->rx_bytes += skb->len; |
| 470 | stats->rx_packets++; |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 471 | u64_stats_update_end(&stats->rx_syncp); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 472 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 473 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 474 | pr_debug("Needs csum!\n"); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 475 | if (!skb_partial_csum_set(skb, |
| 476 | hdr->hdr.csum_start, |
| 477 | hdr->hdr.csum_offset)) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 478 | goto frame_err; |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 479 | } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { |
| 480 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 481 | } |
| 482 | |
Mark McLoughlin | 23cde76 | 2008-06-08 20:49:00 +1000 | [diff] [blame] | 483 | skb->protocol = eth_type_trans(skb, dev); |
| 484 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
| 485 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
| 486 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 487 | if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 488 | pr_debug("GSO!\n"); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 489 | switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 490 | case VIRTIO_NET_HDR_GSO_TCPV4: |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 491 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 492 | break; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 493 | case VIRTIO_NET_HDR_GSO_UDP: |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 494 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 495 | break; |
| 496 | case VIRTIO_NET_HDR_GSO_TCPV6: |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 497 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 498 | break; |
| 499 | default: |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 500 | net_warn_ratelimited("%s: bad gso type %u.\n", |
| 501 | dev->name, hdr->hdr.gso_type); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 502 | goto frame_err; |
| 503 | } |
| 504 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 505 | if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 506 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 507 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 508 | skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 509 | if (skb_shinfo(skb)->gso_size == 0) { |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 510 | net_warn_ratelimited("%s: zero gso size.\n", dev->name); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 511 | goto frame_err; |
| 512 | } |
| 513 | |
| 514 | /* Header must be checked, and gso_segs computed. */ |
| 515 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 516 | skb_shinfo(skb)->gso_segs = 0; |
| 517 | } |
| 518 | |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 519 | skb_mark_napi_id(skb, &rq->napi); |
| 520 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 521 | netif_receive_skb(skb); |
| 522 | return; |
| 523 | |
| 524 | frame_err: |
| 525 | dev->stats.rx_frame_errors++; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 526 | dev_kfree_skb(skb); |
| 527 | } |
| 528 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 529 | static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 530 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 531 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 532 | struct sk_buff *skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 533 | struct skb_vnet_hdr *hdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 534 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 535 | |
Michael Dalton | 5061de3 | 2013-11-14 10:41:04 -0800 | [diff] [blame] | 536 | skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 537 | if (unlikely(!skb)) |
| 538 | return -ENOMEM; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 539 | |
Michael Dalton | 5061de3 | 2013-11-14 10:41:04 -0800 | [diff] [blame] | 540 | skb_put(skb, GOOD_PACKET_LEN); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 541 | |
| 542 | hdr = skb_vnet_hdr(skb); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 543 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 544 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 545 | skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 546 | |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 547 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 548 | if (err < 0) |
| 549 | dev_kfree_skb(skb); |
| 550 | |
| 551 | return err; |
| 552 | } |
| 553 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 554 | static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 555 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 556 | struct page *first, *list = NULL; |
| 557 | char *p; |
| 558 | int i, err, offset; |
| 559 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 560 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 561 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 562 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 563 | if (!first) { |
| 564 | if (list) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 565 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 566 | return -ENOMEM; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 567 | } |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 568 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 569 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 570 | /* chain new page in list head to match sg */ |
| 571 | first->private = (unsigned long)list; |
| 572 | list = first; |
| 573 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 574 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 575 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 576 | if (!first) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 577 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 578 | return -ENOMEM; |
| 579 | } |
| 580 | p = page_address(first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 581 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 582 | /* rq->sg[0], rq->sg[1] share the same page */ |
| 583 | /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ |
| 584 | sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 585 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 586 | /* rq->sg[1] for data packet, from offset */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 587 | offset = sizeof(struct padded_vnet_hdr); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 588 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 589 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 590 | /* chain first in list head */ |
| 591 | first->private = (unsigned long)list; |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 592 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
| 593 | first, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 594 | if (err < 0) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 595 | give_pages(rq, first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 596 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 597 | return err; |
| 598 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 599 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 600 | static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 601 | { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 602 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 603 | unsigned int len; |
| 604 | |
| 605 | len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len), |
| 606 | GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); |
| 607 | return ALIGN(len, MERGEABLE_BUFFER_ALIGN); |
| 608 | } |
| 609 | |
| 610 | static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) |
| 611 | { |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 612 | struct page_frag *alloc_frag = &rq->alloc_frag; |
| 613 | char *buf; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 614 | unsigned long ctx; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 615 | int err; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 616 | unsigned int len, hole; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 617 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 618 | len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 619 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 620 | return -ENOMEM; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 621 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 622 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 623 | ctx = mergeable_buf_to_ctx(buf, len); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 624 | get_page(alloc_frag->page); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 625 | alloc_frag->offset += len; |
| 626 | hole = alloc_frag->size - alloc_frag->offset; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 627 | if (hole < len) { |
| 628 | /* To avoid internal fragmentation, if there is very likely not |
| 629 | * enough space for another buffer, add the remaining space to |
| 630 | * the current buffer. This extra space is not included in |
| 631 | * the truesize stored in ctx. |
| 632 | */ |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 633 | len += hole; |
| 634 | alloc_frag->offset += hole; |
| 635 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 636 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 637 | sg_init_one(rq->sg, buf, len); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 638 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 639 | if (err < 0) |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 640 | put_page(virt_to_head_page(buf)); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 641 | |
| 642 | return err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 643 | } |
| 644 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 645 | /* |
| 646 | * Returns false if we couldn't fill entirely (OOM). |
| 647 | * |
| 648 | * Normally run in the receive path, but can also be run from ndo_open |
| 649 | * before we're receiving packets, or from refill_work which is |
| 650 | * careful to disable receiving (using napi_disable). |
| 651 | */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 652 | static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 653 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 654 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 655 | int err; |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 656 | bool oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 657 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 658 | gfp |= __GFP_COLD; |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 659 | do { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 660 | if (vi->mergeable_rx_bufs) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 661 | err = add_recvbuf_mergeable(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 662 | else if (vi->big_packets) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 663 | err = add_recvbuf_big(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 664 | else |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 665 | err = add_recvbuf_small(rq, gfp); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 666 | |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 667 | oom = err == -ENOMEM; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 668 | if (err) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 669 | break; |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 670 | } while (rq->vq->num_free); |
Jason Wang | 681daee2 | 2014-03-26 13:03:00 +0800 | [diff] [blame] | 671 | virtqueue_kick(rq->vq); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 672 | return !oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 673 | } |
| 674 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 675 | static void skb_recv_done(struct virtqueue *rvq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 676 | { |
| 677 | struct virtnet_info *vi = rvq->vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 678 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 679 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 680 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 681 | if (napi_schedule_prep(&rq->napi)) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 682 | virtqueue_disable_cb(rvq); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 683 | __napi_schedule(&rq->napi); |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 684 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 685 | } |
| 686 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 687 | static void virtnet_napi_enable(struct receive_queue *rq) |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 688 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 689 | napi_enable(&rq->napi); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 690 | |
| 691 | /* If all buffers were filled by other side before we napi_enabled, we |
| 692 | * won't get another interrupt, so process any outstanding packets |
| 693 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
| 694 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 695 | if (napi_schedule_prep(&rq->napi)) { |
| 696 | virtqueue_disable_cb(rq->vq); |
Michael S. Tsirkin | ec13ee8 | 2012-05-16 10:57:12 +0300 | [diff] [blame] | 697 | local_bh_disable(); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 698 | __napi_schedule(&rq->napi); |
Michael S. Tsirkin | ec13ee8 | 2012-05-16 10:57:12 +0300 | [diff] [blame] | 699 | local_bh_enable(); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 700 | } |
| 701 | } |
| 702 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 703 | static void refill_work(struct work_struct *work) |
| 704 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 705 | struct virtnet_info *vi = |
| 706 | container_of(work, struct virtnet_info, refill.work); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 707 | bool still_empty; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 708 | int i; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 709 | |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 710 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 711 | struct receive_queue *rq = &vi->rq[i]; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 712 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 713 | napi_disable(&rq->napi); |
| 714 | still_empty = !try_fill_recv(rq, GFP_KERNEL); |
| 715 | virtnet_napi_enable(rq); |
| 716 | |
| 717 | /* In theory, this can happen: if we don't get any buffers in |
| 718 | * we will *never* try to fill again. |
| 719 | */ |
| 720 | if (still_empty) |
| 721 | schedule_delayed_work(&vi->refill, HZ/2); |
| 722 | } |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 723 | } |
| 724 | |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 725 | static int virtnet_receive(struct receive_queue *rq, int budget) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 726 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 727 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 728 | unsigned int len, received = 0; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 729 | void *buf; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 730 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 731 | while (received < budget && |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 732 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
| 733 | receive_buf(rq, buf, len); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 734 | received++; |
| 735 | } |
| 736 | |
Jason Wang | be121f4 | 2014-01-16 14:45:24 +0800 | [diff] [blame] | 737 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 738 | if (!try_fill_recv(rq, GFP_ATOMIC)) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 739 | schedule_delayed_work(&vi->refill, 0); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 740 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 741 | |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 742 | return received; |
| 743 | } |
| 744 | |
| 745 | static int virtnet_poll(struct napi_struct *napi, int budget) |
| 746 | { |
| 747 | struct receive_queue *rq = |
| 748 | container_of(napi, struct receive_queue, napi); |
| 749 | unsigned int r, received = 0; |
| 750 | |
| 751 | again: |
| 752 | received += virtnet_receive(rq, budget - received); |
| 753 | |
Rusty Russell | 8329d98 | 2007-11-19 11:20:43 -0500 | [diff] [blame] | 754 | /* Out of packets? */ |
| 755 | if (received < budget) { |
Michael S. Tsirkin | cbdadbb | 2013-07-09 08:13:04 +0300 | [diff] [blame] | 756 | r = virtqueue_enable_cb_prepare(rq->vq); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 757 | napi_complete(napi); |
Michael S. Tsirkin | cbdadbb | 2013-07-09 08:13:04 +0300 | [diff] [blame] | 758 | if (unlikely(virtqueue_poll(rq->vq, r)) && |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 759 | napi_schedule_prep(napi)) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 760 | virtqueue_disable_cb(rq->vq); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 761 | __napi_schedule(napi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 762 | goto again; |
Christian Borntraeger | 4265f16 | 2008-03-14 14:17:05 +0100 | [diff] [blame] | 763 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 764 | } |
| 765 | |
| 766 | return received; |
| 767 | } |
| 768 | |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 769 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 770 | /* must be called with local_bh_disable()d */ |
| 771 | static int virtnet_busy_poll(struct napi_struct *napi) |
| 772 | { |
| 773 | struct receive_queue *rq = |
| 774 | container_of(napi, struct receive_queue, napi); |
| 775 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 776 | int r, received = 0, budget = 4; |
| 777 | |
| 778 | if (!(vi->status & VIRTIO_NET_S_LINK_UP)) |
| 779 | return LL_FLUSH_FAILED; |
| 780 | |
| 781 | if (!napi_schedule_prep(napi)) |
| 782 | return LL_FLUSH_BUSY; |
| 783 | |
| 784 | virtqueue_disable_cb(rq->vq); |
| 785 | |
| 786 | again: |
| 787 | received += virtnet_receive(rq, budget); |
| 788 | |
| 789 | r = virtqueue_enable_cb_prepare(rq->vq); |
| 790 | clear_bit(NAPI_STATE_SCHED, &napi->state); |
| 791 | if (unlikely(virtqueue_poll(rq->vq, r)) && |
| 792 | napi_schedule_prep(napi)) { |
| 793 | virtqueue_disable_cb(rq->vq); |
| 794 | if (received < budget) { |
| 795 | budget -= received; |
| 796 | goto again; |
| 797 | } else { |
| 798 | __napi_schedule(napi); |
| 799 | } |
| 800 | } |
| 801 | |
| 802 | return received; |
| 803 | } |
| 804 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 805 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 806 | static int virtnet_open(struct net_device *dev) |
| 807 | { |
| 808 | struct virtnet_info *vi = netdev_priv(dev); |
| 809 | int i; |
| 810 | |
Jason Wang | e416662 | 2013-05-21 20:03:58 +0000 | [diff] [blame] | 811 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 812 | if (i < vi->curr_queue_pairs) |
| 813 | /* Make sure we have some buffers: if oom use wq. */ |
| 814 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) |
| 815 | schedule_delayed_work(&vi->refill, 0); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 816 | virtnet_napi_enable(&vi->rq[i]); |
| 817 | } |
| 818 | |
| 819 | return 0; |
| 820 | } |
| 821 | |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 822 | static void free_old_xmit_skbs(struct send_queue *sq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 823 | { |
| 824 | struct sk_buff *skb; |
Michael S. Tsirkin | 6ee57bc | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 825 | unsigned int len; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 826 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 827 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 828 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 829 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 830 | pr_debug("Sent skb %p\n", skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 831 | |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 832 | u64_stats_update_begin(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 833 | stats->tx_bytes += skb->len; |
| 834 | stats->tx_packets++; |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 835 | u64_stats_update_end(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 836 | |
Eric Dumazet | ed79bab | 2009-10-14 14:36:43 +0000 | [diff] [blame] | 837 | dev_kfree_skb_any(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 838 | } |
| 839 | } |
| 840 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 841 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 842 | { |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 843 | struct skb_vnet_hdr *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 844 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 845 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Michael S. Tsirkin | 7bedc7d | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 846 | unsigned num_sg; |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 847 | unsigned hdr_len; |
| 848 | bool can_push; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 849 | |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 850 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 851 | if (vi->mergeable_rx_bufs) |
| 852 | hdr_len = sizeof hdr->mhdr; |
| 853 | else |
| 854 | hdr_len = sizeof hdr->hdr; |
| 855 | |
| 856 | can_push = vi->any_header_sg && |
| 857 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && |
| 858 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; |
| 859 | /* Even if we can, don't push here yet as this would skew |
| 860 | * csum_start offset below. */ |
| 861 | if (can_push) |
| 862 | hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); |
| 863 | else |
| 864 | hdr = skb_vnet_hdr(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 865 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 866 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 867 | hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 868 | hdr->hdr.csum_start = skb_checksum_start_offset(skb); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 869 | hdr->hdr.csum_offset = skb->csum_offset; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 870 | } else { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 871 | hdr->hdr.flags = 0; |
| 872 | hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 873 | } |
| 874 | |
| 875 | if (skb_is_gso(skb)) { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 876 | hdr->hdr.hdr_len = skb_headlen(skb); |
| 877 | hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 878 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 879 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 880 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 881 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 882 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 883 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 884 | else |
| 885 | BUG(); |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 886 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 887 | hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 888 | } else { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 889 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
| 890 | hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 891 | } |
| 892 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 893 | if (vi->mergeable_rx_bufs) |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 894 | hdr->mhdr.num_buffers = 0; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 895 | |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 896 | if (can_push) { |
| 897 | __skb_push(skb, hdr_len); |
| 898 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); |
| 899 | /* Pull header back to avoid skew in tx bytes calculations. */ |
| 900 | __skb_pull(skb, hdr_len); |
| 901 | } else { |
| 902 | sg_set_buf(sq->sg, hdr, hdr_len); |
| 903 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; |
| 904 | } |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 905 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 906 | } |
| 907 | |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 908 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 909 | { |
| 910 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 911 | int qnum = skb_get_queue_mapping(skb); |
| 912 | struct send_queue *sq = &vi->sq[qnum]; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 913 | int err; |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 914 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 915 | /* Free up any pending old buffers before queueing new ones. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 916 | free_old_xmit_skbs(sq); |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 917 | |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 918 | /* Try to transmit */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 919 | err = xmit_skb(sq, skb); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 920 | |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 921 | /* This should not happen! */ |
Jason Wang | 681daee2 | 2014-03-26 13:03:00 +0800 | [diff] [blame] | 922 | if (unlikely(err)) { |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 923 | dev->stats.tx_fifo_errors++; |
| 924 | if (net_ratelimit()) |
| 925 | dev_warn(&dev->dev, |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 926 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 927 | dev->stats.tx_dropped++; |
Eric W. Biederman | 85e9452 | 2014-03-15 18:43:33 -0700 | [diff] [blame] | 928 | dev_kfree_skb_any(skb); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 929 | return NETDEV_TX_OK; |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 930 | } |
Jason Wang | 681daee2 | 2014-03-26 13:03:00 +0800 | [diff] [blame] | 931 | virtqueue_kick(sq->vq); |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 932 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 933 | /* Don't wait up for transmitted skbs to be freed. */ |
| 934 | skb_orphan(skb); |
| 935 | nf_reset(skb); |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 936 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 937 | /* Apparently nice girls don't return TX_BUSY; stop the queue |
| 938 | * before it gets out of hand. Naturally, this wastes entries. */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 939 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 940 | netif_stop_subqueue(dev, qnum); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 941 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 942 | /* More just got used, free them then recheck. */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 943 | free_old_xmit_skbs(sq); |
| 944 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 945 | netif_start_subqueue(dev, qnum); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 946 | virtqueue_disable_cb(sq->vq); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 947 | } |
| 948 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 949 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 950 | |
| 951 | return NETDEV_TX_OK; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 952 | } |
| 953 | |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 954 | /* |
| 955 | * Send command via the control virtqueue and check status. Commands |
| 956 | * supported by the hypervisor, as indicated by feature bits, should |
stephen hemminger | 788a8b6 | 2013-12-09 16:18:45 -0800 | [diff] [blame] | 957 | * never fail unless improperly formatted. |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 958 | */ |
| 959 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 960 | struct scatterlist *out) |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 961 | { |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 962 | struct scatterlist *sgs[4], hdr, stat; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 963 | struct virtio_net_ctrl_hdr ctrl; |
| 964 | virtio_net_ctrl_ack status = ~0; |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 965 | unsigned out_num = 0, tmp; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 966 | |
| 967 | /* Caller should know better */ |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 968 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 969 | |
| 970 | ctrl.class = class; |
| 971 | ctrl.cmd = cmd; |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 972 | /* Add header */ |
| 973 | sg_init_one(&hdr, &ctrl, sizeof(ctrl)); |
| 974 | sgs[out_num++] = &hdr; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 975 | |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 976 | if (out) |
| 977 | sgs[out_num++] = out; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 978 | |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 979 | /* Add return status. */ |
| 980 | sg_init_one(&stat, &status, sizeof(status)); |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 981 | sgs[out_num] = &stat; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 982 | |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 983 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
Rusty Russell | a7c5814 | 2014-03-13 11:23:39 +1030 | [diff] [blame] | 984 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 985 | |
Heinz Graalfs | 6797590 | 2013-10-29 09:40:02 +1030 | [diff] [blame] | 986 | if (unlikely(!virtqueue_kick(vi->cvq))) |
| 987 | return status == VIRTIO_NET_OK; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 988 | |
| 989 | /* Spin for a response, the kick causes an ioport write, trapping |
| 990 | * into the hypervisor, so the request should be handled immediately. |
| 991 | */ |
Heinz Graalfs | 047b9b9 | 2013-10-29 09:40:47 +1030 | [diff] [blame] | 992 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
| 993 | !virtqueue_is_broken(vi->cvq)) |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 994 | cpu_relax(); |
| 995 | |
| 996 | return status == VIRTIO_NET_OK; |
| 997 | } |
| 998 | |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 999 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| 1000 | { |
| 1001 | struct virtnet_info *vi = netdev_priv(dev); |
| 1002 | struct virtio_device *vdev = vi->vdev; |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1003 | int ret; |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1004 | struct sockaddr *addr = p; |
| 1005 | struct scatterlist sg; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1006 | |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1007 | ret = eth_prepare_mac_addr_change(dev, p); |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1008 | if (ret) |
| 1009 | return ret; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1010 | |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1011 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
| 1012 | sg_init_one(&sg, addr->sa_data, dev->addr_len); |
| 1013 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1014 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1015 | dev_warn(&vdev->dev, |
| 1016 | "Failed to set mac address by vq command.\n"); |
| 1017 | return -EINVAL; |
| 1018 | } |
| 1019 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 1020 | unsigned int i; |
| 1021 | |
| 1022 | /* Naturally, this has an atomicity problem. */ |
| 1023 | for (i = 0; i < dev->addr_len; i++) |
| 1024 | virtio_cwrite8(vdev, |
| 1025 | offsetof(struct virtio_net_config, mac) + |
| 1026 | i, addr->sa_data[i]); |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1027 | } |
| 1028 | |
| 1029 | eth_commit_mac_addr_change(dev, p); |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1030 | |
| 1031 | return 0; |
| 1032 | } |
| 1033 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1034 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
| 1035 | struct rtnl_link_stats64 *tot) |
| 1036 | { |
| 1037 | struct virtnet_info *vi = netdev_priv(dev); |
| 1038 | int cpu; |
| 1039 | unsigned int start; |
| 1040 | |
| 1041 | for_each_possible_cpu(cpu) { |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 1042 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1043 | u64 tpackets, tbytes, rpackets, rbytes; |
| 1044 | |
| 1045 | do { |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1046 | start = u64_stats_fetch_begin_irq(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1047 | tpackets = stats->tx_packets; |
| 1048 | tbytes = stats->tx_bytes; |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1049 | } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 1050 | |
| 1051 | do { |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1052 | start = u64_stats_fetch_begin_irq(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1053 | rpackets = stats->rx_packets; |
| 1054 | rbytes = stats->rx_bytes; |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1055 | } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1056 | |
| 1057 | tot->rx_packets += rpackets; |
| 1058 | tot->tx_packets += tpackets; |
| 1059 | tot->rx_bytes += rbytes; |
| 1060 | tot->tx_bytes += tbytes; |
| 1061 | } |
| 1062 | |
| 1063 | tot->tx_dropped = dev->stats.tx_dropped; |
Rick Jones | 021ac8d | 2011-11-21 09:28:17 +0000 | [diff] [blame] | 1064 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1065 | tot->rx_dropped = dev->stats.rx_dropped; |
| 1066 | tot->rx_length_errors = dev->stats.rx_length_errors; |
| 1067 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
| 1068 | |
| 1069 | return tot; |
| 1070 | } |
| 1071 | |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1072 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1073 | static void virtnet_netpoll(struct net_device *dev) |
| 1074 | { |
| 1075 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1076 | int i; |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1077 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1078 | for (i = 0; i < vi->curr_queue_pairs; i++) |
| 1079 | napi_schedule(&vi->rq[i].napi); |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1080 | } |
| 1081 | #endif |
| 1082 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1083 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
| 1084 | { |
| 1085 | rtnl_lock(); |
| 1086 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1087 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1088 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
| 1089 | rtnl_unlock(); |
| 1090 | } |
| 1091 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1092 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
| 1093 | { |
| 1094 | struct scatterlist sg; |
| 1095 | struct virtio_net_ctrl_mq s; |
| 1096 | struct net_device *dev = vi->dev; |
| 1097 | |
| 1098 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
| 1099 | return 0; |
| 1100 | |
| 1101 | s.virtqueue_pairs = queue_pairs; |
| 1102 | sg_init_one(&sg, &s, sizeof(s)); |
| 1103 | |
| 1104 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1105 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1106 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
| 1107 | queue_pairs); |
| 1108 | return -EINVAL; |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1109 | } else { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1110 | vi->curr_queue_pairs = queue_pairs; |
Jason Wang | 35ed159 | 2013-10-15 11:18:59 +0800 | [diff] [blame] | 1111 | /* virtnet_open() will refill when device is going to up. */ |
| 1112 | if (dev->flags & IFF_UP) |
| 1113 | schedule_delayed_work(&vi->refill, 0); |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1114 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1115 | |
| 1116 | return 0; |
| 1117 | } |
| 1118 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1119 | static int virtnet_close(struct net_device *dev) |
| 1120 | { |
| 1121 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1122 | int i; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1123 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 1124 | /* Make sure refill_work doesn't re-enable napi! */ |
| 1125 | cancel_delayed_work_sync(&vi->refill); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1126 | |
| 1127 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1128 | napi_disable(&vi->rq[i].napi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1129 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1130 | return 0; |
| 1131 | } |
| 1132 | |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1133 | static void virtnet_set_rx_mode(struct net_device *dev) |
| 1134 | { |
| 1135 | struct virtnet_info *vi = netdev_priv(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1136 | struct scatterlist sg[2]; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1137 | u8 promisc, allmulti; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1138 | struct virtio_net_ctrl_mac *mac_data; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1139 | struct netdev_hw_addr *ha; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1140 | int uc_count; |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1141 | int mc_count; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1142 | void *buf; |
| 1143 | int i; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1144 | |
stephen hemminger | 788a8b6 | 2013-12-09 16:18:45 -0800 | [diff] [blame] | 1145 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1146 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 1147 | return; |
| 1148 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1149 | promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 1150 | allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1151 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1152 | sg_init_one(sg, &promisc, sizeof(promisc)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1153 | |
| 1154 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1155 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1156 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
| 1157 | promisc ? "en" : "dis"); |
| 1158 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1159 | sg_init_one(sg, &allmulti, sizeof(allmulti)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1160 | |
| 1161 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1162 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1163 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
| 1164 | allmulti ? "en" : "dis"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1165 | |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1166 | uc_count = netdev_uc_count(dev); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1167 | mc_count = netdev_mc_count(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1168 | /* MAC filter - use one buffer for both lists */ |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1169 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
| 1170 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
| 1171 | mac_data = buf; |
Joe Perches | e68ed8f | 2013-02-03 17:28:15 +0000 | [diff] [blame] | 1172 | if (!buf) |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1173 | return; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1174 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1175 | sg_init_table(sg, 2); |
| 1176 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1177 | /* Store the unicast list and count in the front of the buffer */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1178 | mac_data->entries = uc_count; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1179 | i = 0; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1180 | netdev_for_each_uc_addr(ha, dev) |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1181 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1182 | |
| 1183 | sg_set_buf(&sg[0], mac_data, |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1184 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1185 | |
| 1186 | /* multicast list and count fill the end */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1187 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1188 | |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1189 | mac_data->entries = mc_count; |
Jiri Pirko | 567ec87 | 2010-02-23 23:17:07 +0000 | [diff] [blame] | 1190 | i = 0; |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1191 | netdev_for_each_mc_addr(ha, dev) |
| 1192 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1193 | |
| 1194 | sg_set_buf(&sg[1], mac_data, |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1195 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1196 | |
| 1197 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1198 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
Thomas Huth | 99e872a | 2013-11-29 10:02:19 +0100 | [diff] [blame] | 1199 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1200 | |
| 1201 | kfree(buf); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1202 | } |
| 1203 | |
Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 1204 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
| 1205 | __be16 proto, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1206 | { |
| 1207 | struct virtnet_info *vi = netdev_priv(dev); |
| 1208 | struct scatterlist sg; |
| 1209 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1210 | sg_init_one(&sg, &vid, sizeof(vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1211 | |
| 1212 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1213 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1214 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1215 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1216 | } |
| 1217 | |
Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 1218 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
| 1219 | __be16 proto, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1220 | { |
| 1221 | struct virtnet_info *vi = netdev_priv(dev); |
| 1222 | struct scatterlist sg; |
| 1223 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1224 | sg_init_one(&sg, &vid, sizeof(vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1225 | |
| 1226 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1227 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1228 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1229 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1230 | } |
| 1231 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1232 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1233 | { |
| 1234 | int i; |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1235 | |
| 1236 | if (vi->affinity_hint_set) { |
| 1237 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1238 | virtqueue_set_affinity(vi->rq[i].vq, -1); |
| 1239 | virtqueue_set_affinity(vi->sq[i].vq, -1); |
| 1240 | } |
| 1241 | |
| 1242 | vi->affinity_hint_set = false; |
| 1243 | } |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1244 | } |
| 1245 | |
| 1246 | static void virtnet_set_affinity(struct virtnet_info *vi) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1247 | { |
| 1248 | int i; |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1249 | int cpu; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1250 | |
| 1251 | /* In multiqueue mode, when the number of cpu is equal to the number of |
| 1252 | * queue pairs, we let the queue pairs to be private to one cpu by |
| 1253 | * setting the affinity hint to eliminate the contention. |
| 1254 | */ |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1255 | if (vi->curr_queue_pairs == 1 || |
| 1256 | vi->max_queue_pairs != num_online_cpus()) { |
| 1257 | virtnet_clean_affinity(vi, -1); |
| 1258 | return; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1259 | } |
| 1260 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1261 | i = 0; |
| 1262 | for_each_online_cpu(cpu) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1263 | virtqueue_set_affinity(vi->rq[i].vq, cpu); |
| 1264 | virtqueue_set_affinity(vi->sq[i].vq, cpu); |
Jason Wang | 9bb8ca8 | 2013-11-05 18:19:45 +0800 | [diff] [blame] | 1265 | netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1266 | i++; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1267 | } |
| 1268 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1269 | vi->affinity_hint_set = true; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1270 | } |
| 1271 | |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1272 | static int virtnet_cpu_callback(struct notifier_block *nfb, |
| 1273 | unsigned long action, void *hcpu) |
| 1274 | { |
| 1275 | struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); |
| 1276 | |
| 1277 | switch(action & ~CPU_TASKS_FROZEN) { |
| 1278 | case CPU_ONLINE: |
| 1279 | case CPU_DOWN_FAILED: |
| 1280 | case CPU_DEAD: |
| 1281 | virtnet_set_affinity(vi); |
| 1282 | break; |
| 1283 | case CPU_DOWN_PREPARE: |
| 1284 | virtnet_clean_affinity(vi, (long)hcpu); |
| 1285 | break; |
| 1286 | default: |
| 1287 | break; |
| 1288 | } |
Jason Wang | 3ab098d | 2013-10-15 11:18:58 +0800 | [diff] [blame] | 1289 | |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1290 | return NOTIFY_OK; |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1291 | } |
| 1292 | |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1293 | static void virtnet_get_ringparam(struct net_device *dev, |
| 1294 | struct ethtool_ringparam *ring) |
| 1295 | { |
| 1296 | struct virtnet_info *vi = netdev_priv(dev); |
| 1297 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1298 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
| 1299 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1300 | ring->rx_pending = ring->rx_max_pending; |
| 1301 | ring->tx_pending = ring->tx_max_pending; |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1302 | } |
| 1303 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 1304 | |
| 1305 | static void virtnet_get_drvinfo(struct net_device *dev, |
| 1306 | struct ethtool_drvinfo *info) |
| 1307 | { |
| 1308 | struct virtnet_info *vi = netdev_priv(dev); |
| 1309 | struct virtio_device *vdev = vi->vdev; |
| 1310 | |
| 1311 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 1312 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
| 1313 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
| 1314 | |
| 1315 | } |
| 1316 | |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1317 | /* TODO: Eliminate OOO packets during switching */ |
| 1318 | static int virtnet_set_channels(struct net_device *dev, |
| 1319 | struct ethtool_channels *channels) |
| 1320 | { |
| 1321 | struct virtnet_info *vi = netdev_priv(dev); |
| 1322 | u16 queue_pairs = channels->combined_count; |
| 1323 | int err; |
| 1324 | |
| 1325 | /* We don't support separate rx/tx channels. |
| 1326 | * We don't allow setting 'other' channels. |
| 1327 | */ |
| 1328 | if (channels->rx_count || channels->tx_count || channels->other_count) |
| 1329 | return -EINVAL; |
| 1330 | |
Amos Kong | c18e9cd | 2014-04-18 13:45:41 +0800 | [diff] [blame] | 1331 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1332 | return -EINVAL; |
| 1333 | |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1334 | get_online_cpus(); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1335 | err = virtnet_set_queues(vi, queue_pairs); |
| 1336 | if (!err) { |
| 1337 | netif_set_real_num_tx_queues(dev, queue_pairs); |
| 1338 | netif_set_real_num_rx_queues(dev, queue_pairs); |
| 1339 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1340 | virtnet_set_affinity(vi); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1341 | } |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1342 | put_online_cpus(); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1343 | |
| 1344 | return err; |
| 1345 | } |
| 1346 | |
| 1347 | static void virtnet_get_channels(struct net_device *dev, |
| 1348 | struct ethtool_channels *channels) |
| 1349 | { |
| 1350 | struct virtnet_info *vi = netdev_priv(dev); |
| 1351 | |
| 1352 | channels->combined_count = vi->curr_queue_pairs; |
| 1353 | channels->max_combined = vi->max_queue_pairs; |
| 1354 | channels->max_other = 0; |
| 1355 | channels->rx_count = 0; |
| 1356 | channels->tx_count = 0; |
| 1357 | channels->other_count = 0; |
| 1358 | } |
| 1359 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 1360 | static const struct ethtool_ops virtnet_ethtool_ops = { |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 1361 | .get_drvinfo = virtnet_get_drvinfo, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1362 | .get_link = ethtool_op_get_link, |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1363 | .get_ringparam = virtnet_get_ringparam, |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1364 | .set_channels = virtnet_set_channels, |
| 1365 | .get_channels = virtnet_get_channels, |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1366 | }; |
| 1367 | |
Mark McLoughlin | 39da581 | 2008-11-26 13:58:11 +0000 | [diff] [blame] | 1368 | #define MIN_MTU 68 |
| 1369 | #define MAX_MTU 65535 |
| 1370 | |
| 1371 | static int virtnet_change_mtu(struct net_device *dev, int new_mtu) |
| 1372 | { |
| 1373 | if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) |
| 1374 | return -EINVAL; |
| 1375 | dev->mtu = new_mtu; |
| 1376 | return 0; |
| 1377 | } |
| 1378 | |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1379 | static const struct net_device_ops virtnet_netdev = { |
| 1380 | .ndo_open = virtnet_open, |
| 1381 | .ndo_stop = virtnet_close, |
| 1382 | .ndo_start_xmit = start_xmit, |
| 1383 | .ndo_validate_addr = eth_validate_addr, |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1384 | .ndo_set_mac_address = virtnet_set_mac_address, |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1385 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1386 | .ndo_change_mtu = virtnet_change_mtu, |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1387 | .ndo_get_stats64 = virtnet_stats, |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 1388 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 1389 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1390 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1391 | .ndo_poll_controller = virtnet_netpoll, |
| 1392 | #endif |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 1393 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 1394 | .ndo_busy_poll = virtnet_busy_poll, |
| 1395 | #endif |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1396 | }; |
| 1397 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1398 | static void virtnet_config_changed_work(struct work_struct *work) |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1399 | { |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1400 | struct virtnet_info *vi = |
| 1401 | container_of(work, struct virtnet_info, config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1402 | u16 v; |
| 1403 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 1404 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
| 1405 | struct virtio_net_config, status, &v) < 0) |
Michael S. Tsirkin | 507613b | 2014-10-15 10:22:30 +1030 | [diff] [blame^] | 1406 | return; |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1407 | |
| 1408 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
Amerigo Wang | ee89bab | 2012-08-09 22:14:56 +0000 | [diff] [blame] | 1409 | netdev_notify_peers(vi->dev); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1410 | virtnet_ack_link_announce(vi); |
| 1411 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1412 | |
| 1413 | /* Ignore unknown (future) status bits */ |
| 1414 | v &= VIRTIO_NET_S_LINK_UP; |
| 1415 | |
| 1416 | if (vi->status == v) |
Michael S. Tsirkin | 507613b | 2014-10-15 10:22:30 +1030 | [diff] [blame^] | 1417 | return; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1418 | |
| 1419 | vi->status = v; |
| 1420 | |
| 1421 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
| 1422 | netif_carrier_on(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1423 | netif_tx_wake_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1424 | } else { |
| 1425 | netif_carrier_off(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1426 | netif_tx_stop_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1427 | } |
| 1428 | } |
| 1429 | |
| 1430 | static void virtnet_config_changed(struct virtio_device *vdev) |
| 1431 | { |
| 1432 | struct virtnet_info *vi = vdev->priv; |
| 1433 | |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1434 | schedule_work(&vi->config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1435 | } |
| 1436 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1437 | static void virtnet_free_queues(struct virtnet_info *vi) |
| 1438 | { |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 1439 | int i; |
| 1440 | |
| 1441 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1442 | netif_napi_del(&vi->rq[i].napi); |
| 1443 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1444 | kfree(vi->rq); |
| 1445 | kfree(vi->sq); |
| 1446 | } |
| 1447 | |
| 1448 | static void free_receive_bufs(struct virtnet_info *vi) |
| 1449 | { |
| 1450 | int i; |
| 1451 | |
| 1452 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1453 | while (vi->rq[i].pages) |
| 1454 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); |
| 1455 | } |
| 1456 | } |
| 1457 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1458 | static void free_receive_page_frags(struct virtnet_info *vi) |
| 1459 | { |
| 1460 | int i; |
| 1461 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1462 | if (vi->rq[i].alloc_frag.page) |
| 1463 | put_page(vi->rq[i].alloc_frag.page); |
| 1464 | } |
| 1465 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1466 | static void free_unused_bufs(struct virtnet_info *vi) |
| 1467 | { |
| 1468 | void *buf; |
| 1469 | int i; |
| 1470 | |
| 1471 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1472 | struct virtqueue *vq = vi->sq[i].vq; |
| 1473 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) |
| 1474 | dev_kfree_skb(buf); |
| 1475 | } |
| 1476 | |
| 1477 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1478 | struct virtqueue *vq = vi->rq[i].vq; |
| 1479 | |
| 1480 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1481 | if (vi->mergeable_rx_bufs) { |
| 1482 | unsigned long ctx = (unsigned long)buf; |
| 1483 | void *base = mergeable_ctx_to_buf_address(ctx); |
| 1484 | put_page(virt_to_head_page(base)); |
| 1485 | } else if (vi->big_packets) { |
Andrey Vagin | fa9fac1 | 2013-12-05 18:36:20 +0400 | [diff] [blame] | 1486 | give_pages(&vi->rq[i], buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1487 | } else { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1488 | dev_kfree_skb(buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1489 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1490 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1491 | } |
| 1492 | } |
| 1493 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1494 | static void virtnet_del_vqs(struct virtnet_info *vi) |
| 1495 | { |
| 1496 | struct virtio_device *vdev = vi->vdev; |
| 1497 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1498 | virtnet_clean_affinity(vi, -1); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1499 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1500 | vdev->config->del_vqs(vdev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1501 | |
| 1502 | virtnet_free_queues(vi); |
| 1503 | } |
| 1504 | |
| 1505 | static int virtnet_find_vqs(struct virtnet_info *vi) |
| 1506 | { |
| 1507 | vq_callback_t **callbacks; |
| 1508 | struct virtqueue **vqs; |
| 1509 | int ret = -ENOMEM; |
| 1510 | int i, total_vqs; |
| 1511 | const char **names; |
| 1512 | |
| 1513 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by |
| 1514 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by |
| 1515 | * possible control vq. |
| 1516 | */ |
| 1517 | total_vqs = vi->max_queue_pairs * 2 + |
| 1518 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); |
| 1519 | |
| 1520 | /* Allocate space for find_vqs parameters */ |
| 1521 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); |
| 1522 | if (!vqs) |
| 1523 | goto err_vq; |
| 1524 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); |
| 1525 | if (!callbacks) |
| 1526 | goto err_callback; |
| 1527 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); |
| 1528 | if (!names) |
| 1529 | goto err_names; |
| 1530 | |
| 1531 | /* Parameters for control virtqueue, if any */ |
| 1532 | if (vi->has_cvq) { |
| 1533 | callbacks[total_vqs - 1] = NULL; |
| 1534 | names[total_vqs - 1] = "control"; |
| 1535 | } |
| 1536 | |
| 1537 | /* Allocate/initialize parameters for send/receive virtqueues */ |
| 1538 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1539 | callbacks[rxq2vq(i)] = skb_recv_done; |
| 1540 | callbacks[txq2vq(i)] = skb_xmit_done; |
| 1541 | sprintf(vi->rq[i].name, "input.%d", i); |
| 1542 | sprintf(vi->sq[i].name, "output.%d", i); |
| 1543 | names[rxq2vq(i)] = vi->rq[i].name; |
| 1544 | names[txq2vq(i)] = vi->sq[i].name; |
| 1545 | } |
| 1546 | |
| 1547 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
| 1548 | names); |
| 1549 | if (ret) |
| 1550 | goto err_find; |
| 1551 | |
| 1552 | if (vi->has_cvq) { |
| 1553 | vi->cvq = vqs[total_vqs - 1]; |
| 1554 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 1555 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1556 | } |
| 1557 | |
| 1558 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1559 | vi->rq[i].vq = vqs[rxq2vq(i)]; |
| 1560 | vi->sq[i].vq = vqs[txq2vq(i)]; |
| 1561 | } |
| 1562 | |
| 1563 | kfree(names); |
| 1564 | kfree(callbacks); |
| 1565 | kfree(vqs); |
| 1566 | |
| 1567 | return 0; |
| 1568 | |
| 1569 | err_find: |
| 1570 | kfree(names); |
| 1571 | err_names: |
| 1572 | kfree(callbacks); |
| 1573 | err_callback: |
| 1574 | kfree(vqs); |
| 1575 | err_vq: |
| 1576 | return ret; |
| 1577 | } |
| 1578 | |
| 1579 | static int virtnet_alloc_queues(struct virtnet_info *vi) |
| 1580 | { |
| 1581 | int i; |
| 1582 | |
| 1583 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); |
| 1584 | if (!vi->sq) |
| 1585 | goto err_sq; |
| 1586 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); |
Amerigo Wang | 008d427 | 2012-12-10 02:24:08 +0000 | [diff] [blame] | 1587 | if (!vi->rq) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1588 | goto err_rq; |
| 1589 | |
| 1590 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
| 1591 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1592 | vi->rq[i].pages = NULL; |
| 1593 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, |
| 1594 | napi_weight); |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 1595 | napi_hash_add(&vi->rq[i].napi); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1596 | |
| 1597 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1598 | ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1599 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
| 1600 | } |
| 1601 | |
| 1602 | return 0; |
| 1603 | |
| 1604 | err_rq: |
| 1605 | kfree(vi->sq); |
| 1606 | err_sq: |
| 1607 | return -ENOMEM; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1608 | } |
| 1609 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1610 | static int init_vqs(struct virtnet_info *vi) |
| 1611 | { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1612 | int ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1613 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1614 | /* Allocate send & receive queues */ |
| 1615 | ret = virtnet_alloc_queues(vi); |
| 1616 | if (ret) |
| 1617 | goto err; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1618 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1619 | ret = virtnet_find_vqs(vi); |
| 1620 | if (ret) |
| 1621 | goto err_free; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1622 | |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1623 | get_online_cpus(); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1624 | virtnet_set_affinity(vi); |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1625 | put_online_cpus(); |
| 1626 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1627 | return 0; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1628 | |
| 1629 | err_free: |
| 1630 | virtnet_free_queues(vi); |
| 1631 | err: |
| 1632 | return ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1633 | } |
| 1634 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 1635 | #ifdef CONFIG_SYSFS |
| 1636 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, |
| 1637 | struct rx_queue_attribute *attribute, char *buf) |
| 1638 | { |
| 1639 | struct virtnet_info *vi = netdev_priv(queue->dev); |
| 1640 | unsigned int queue_index = get_netdev_rx_queue_index(queue); |
| 1641 | struct ewma *avg; |
| 1642 | |
| 1643 | BUG_ON(queue_index >= vi->max_queue_pairs); |
| 1644 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; |
| 1645 | return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); |
| 1646 | } |
| 1647 | |
| 1648 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = |
| 1649 | __ATTR_RO(mergeable_rx_buffer_size); |
| 1650 | |
| 1651 | static struct attribute *virtio_net_mrg_rx_attrs[] = { |
| 1652 | &mergeable_rx_buffer_size_attribute.attr, |
| 1653 | NULL |
| 1654 | }; |
| 1655 | |
| 1656 | static const struct attribute_group virtio_net_mrg_rx_group = { |
| 1657 | .name = "virtio_net", |
| 1658 | .attrs = virtio_net_mrg_rx_attrs |
| 1659 | }; |
| 1660 | #endif |
| 1661 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1662 | static int virtnet_probe(struct virtio_device *vdev) |
| 1663 | { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1664 | int i, err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1665 | struct net_device *dev; |
| 1666 | struct virtnet_info *vi; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1667 | u16 max_queue_pairs; |
| 1668 | |
| 1669 | /* Find if host supports multiqueue virtio_net device */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 1670 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
| 1671 | struct virtio_net_config, |
| 1672 | max_virtqueue_pairs, &max_queue_pairs); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1673 | |
| 1674 | /* We need at least 2 queue's */ |
| 1675 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
| 1676 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || |
| 1677 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 1678 | max_queue_pairs = 1; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1679 | |
| 1680 | /* Allocate ourselves a network device with room for our info */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1681 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1682 | if (!dev) |
| 1683 | return -ENOMEM; |
| 1684 | |
| 1685 | /* Set up network device as normal. */ |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1686 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1687 | dev->netdev_ops = &virtnet_netdev; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1688 | dev->features = NETIF_F_HIGHDMA; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1689 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 1690 | dev->ethtool_ops = &virtnet_ethtool_ops; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1691 | SET_NETDEV_DEV(dev, &vdev->dev); |
| 1692 | |
| 1693 | /* Do we support "hardware" checksums? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1694 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1695 | /* This opens up the world of extra features. */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1696 | dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
| 1697 | if (csum) |
| 1698 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
| 1699 | |
| 1700 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
| 1701 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1702 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
| 1703 | } |
Rusty Russell | 5539ae96 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1704 | /* Individual feature bits: what can host handle? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1705 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
| 1706 | dev->hw_features |= NETIF_F_TSO; |
| 1707 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
| 1708 | dev->hw_features |= NETIF_F_TSO6; |
| 1709 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
| 1710 | dev->hw_features |= NETIF_F_TSO_ECN; |
| 1711 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) |
| 1712 | dev->hw_features |= NETIF_F_UFO; |
| 1713 | |
| 1714 | if (gso) |
| 1715 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
| 1716 | /* (!csum && gso) case will be fixed by register_netdev() */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1717 | } |
Thomas Huth | 4f49129 | 2013-08-27 17:09:02 +0200 | [diff] [blame] | 1718 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
| 1719 | dev->features |= NETIF_F_RXCSUM; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1720 | |
Jason Wang | 4fda830 | 2013-04-10 23:32:21 +0000 | [diff] [blame] | 1721 | dev->vlan_features = dev->features; |
| 1722 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1723 | /* Configuration may specify what MAC to use. Otherwise random. */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 1724 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
| 1725 | virtio_cread_bytes(vdev, |
| 1726 | offsetof(struct virtio_net_config, mac), |
| 1727 | dev->dev_addr, dev->addr_len); |
| 1728 | else |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 1729 | eth_hw_addr_random(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1730 | |
| 1731 | /* Set up our device-specific information */ |
| 1732 | vi = netdev_priv(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1733 | vi->dev = dev; |
| 1734 | vi->vdev = vdev; |
Christian Borntraeger | d9d5dcc | 2008-02-18 10:02:51 +0100 | [diff] [blame] | 1735 | vdev->priv = vi; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1736 | vi->stats = alloc_percpu(struct virtnet_stats); |
| 1737 | err = -ENOMEM; |
| 1738 | if (vi->stats == NULL) |
| 1739 | goto free; |
| 1740 | |
John Stultz | 827da44 | 2013-10-07 15:51:58 -0700 | [diff] [blame] | 1741 | for_each_possible_cpu(i) { |
| 1742 | struct virtnet_stats *virtnet_stats; |
| 1743 | virtnet_stats = per_cpu_ptr(vi->stats, i); |
| 1744 | u64_stats_init(&virtnet_stats->tx_syncp); |
| 1745 | u64_stats_init(&virtnet_stats->rx_syncp); |
| 1746 | } |
| 1747 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1748 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1749 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1750 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 1751 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 1752 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
Jason Wang | 0e7ede8 | 2014-02-21 13:08:04 +0800 | [diff] [blame] | 1753 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 1754 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1755 | vi->big_packets = true; |
| 1756 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1757 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 1758 | vi->mergeable_rx_bufs = true; |
| 1759 | |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1760 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) |
| 1761 | vi->any_header_sg = true; |
| 1762 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1763 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 1764 | vi->has_cvq = true; |
| 1765 | |
Zhangjie \(HZ\) | 6ebbc1a | 2014-04-29 18:43:22 +0800 | [diff] [blame] | 1766 | if (vi->any_header_sg) { |
| 1767 | if (vi->mergeable_rx_bufs) |
| 1768 | dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 1769 | else |
| 1770 | dev->needed_headroom = sizeof(struct virtio_net_hdr); |
| 1771 | } |
| 1772 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1773 | /* Use single tx/rx queue pair as default */ |
| 1774 | vi->curr_queue_pairs = 1; |
| 1775 | vi->max_queue_pairs = max_queue_pairs; |
| 1776 | |
| 1777 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1778 | err = init_vqs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1779 | if (err) |
Jason Wang | 9bb8ca8 | 2013-11-05 18:19:45 +0800 | [diff] [blame] | 1780 | goto free_stats; |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1781 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 1782 | #ifdef CONFIG_SYSFS |
| 1783 | if (vi->mergeable_rx_bufs) |
| 1784 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; |
| 1785 | #endif |
Zhi Yong Wu | 0f13b66b | 2013-11-18 21:19:27 +0800 | [diff] [blame] | 1786 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
| 1787 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1788 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1789 | err = register_netdev(dev); |
| 1790 | if (err) { |
| 1791 | pr_debug("virtio_net: registering device failed\n"); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1792 | goto free_vqs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1793 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1794 | |
| 1795 | /* Last of all, set up some receive buffers. */ |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1796 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1797 | try_fill_recv(&vi->rq[i], GFP_KERNEL); |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1798 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1799 | /* If we didn't even get one input buffer, we're useless. */ |
Jason Wang | be121f4 | 2014-01-16 14:45:24 +0800 | [diff] [blame] | 1800 | if (vi->rq[i].vq->num_free == |
| 1801 | virtqueue_get_vring_size(vi->rq[i].vq)) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1802 | free_unused_bufs(vi); |
| 1803 | err = -ENOMEM; |
| 1804 | goto free_recv_bufs; |
| 1805 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1806 | } |
| 1807 | |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1808 | vi->nb.notifier_call = &virtnet_cpu_callback; |
| 1809 | err = register_hotcpu_notifier(&vi->nb); |
| 1810 | if (err) { |
| 1811 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
| 1812 | goto free_recv_bufs; |
| 1813 | } |
| 1814 | |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 1815 | /* Assume link up if device can't report link status, |
| 1816 | otherwise get link status from config. */ |
| 1817 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
| 1818 | netif_carrier_off(dev); |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1819 | schedule_work(&vi->config_work); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 1820 | } else { |
| 1821 | vi->status = VIRTIO_NET_S_LINK_UP; |
| 1822 | netif_carrier_on(dev); |
| 1823 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1824 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1825 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
| 1826 | dev->name, max_queue_pairs); |
| 1827 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1828 | return 0; |
| 1829 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1830 | free_recv_bufs: |
| 1831 | free_receive_bufs(vi); |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1832 | unregister_netdev(dev); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1833 | free_vqs: |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1834 | cancel_delayed_work_sync(&vi->refill); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1835 | free_receive_page_frags(vi); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1836 | virtnet_del_vqs(vi); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1837 | free_stats: |
| 1838 | free_percpu(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1839 | free: |
| 1840 | free_netdev(dev); |
| 1841 | return err; |
| 1842 | } |
| 1843 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1844 | static void remove_vq_common(struct virtnet_info *vi) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1845 | { |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1846 | vi->vdev->config->reset(vi->vdev); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 1847 | |
| 1848 | /* Free unused buffers in both send and recv, if any. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1849 | free_unused_bufs(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1850 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1851 | free_receive_bufs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1852 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1853 | free_receive_page_frags(vi); |
| 1854 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1855 | virtnet_del_vqs(vi); |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1856 | } |
| 1857 | |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 1858 | static void virtnet_remove(struct virtio_device *vdev) |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1859 | { |
| 1860 | struct virtnet_info *vi = vdev->priv; |
| 1861 | |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1862 | unregister_hotcpu_notifier(&vi->nb); |
| 1863 | |
Michael S. Tsirkin | 102a278 | 2014-10-15 10:22:29 +1030 | [diff] [blame] | 1864 | /* Make sure no work handler is accessing the device. */ |
| 1865 | flush_work(&vi->config_work); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1866 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1867 | unregister_netdev(vi->dev); |
| 1868 | |
| 1869 | remove_vq_common(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1870 | |
Krishna Kumar | 2e66f55 | 2011-07-20 03:56:02 +0000 | [diff] [blame] | 1871 | free_percpu(vi->stats); |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 1872 | free_netdev(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1873 | } |
| 1874 | |
Aaron Lu | 8910700 | 2013-09-17 09:25:23 +0930 | [diff] [blame] | 1875 | #ifdef CONFIG_PM_SLEEP |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1876 | static int virtnet_freeze(struct virtio_device *vdev) |
| 1877 | { |
| 1878 | struct virtnet_info *vi = vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1879 | int i; |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1880 | |
Jason Wang | ec9debb | 2013-10-29 15:11:07 +0800 | [diff] [blame] | 1881 | unregister_hotcpu_notifier(&vi->nb); |
| 1882 | |
Michael S. Tsirkin | 102a278 | 2014-10-15 10:22:29 +1030 | [diff] [blame] | 1883 | /* Make sure no work handler is accessing the device */ |
| 1884 | flush_work(&vi->config_work); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1885 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1886 | netif_device_detach(vi->dev); |
| 1887 | cancel_delayed_work_sync(&vi->refill); |
| 1888 | |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 1889 | if (netif_running(vi->dev)) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1890 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1891 | napi_disable(&vi->rq[i].napi); |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 1892 | napi_hash_del(&vi->rq[i].napi); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1893 | netif_napi_del(&vi->rq[i].napi); |
| 1894 | } |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 1895 | } |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1896 | |
| 1897 | remove_vq_common(vi); |
| 1898 | |
| 1899 | return 0; |
| 1900 | } |
| 1901 | |
| 1902 | static int virtnet_restore(struct virtio_device *vdev) |
| 1903 | { |
| 1904 | struct virtnet_info *vi = vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1905 | int err, i; |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1906 | |
| 1907 | err = init_vqs(vi); |
| 1908 | if (err) |
| 1909 | return err; |
| 1910 | |
Jason Wang | 6cd4ce0 | 2013-12-30 11:34:40 +0800 | [diff] [blame] | 1911 | if (netif_running(vi->dev)) { |
| 1912 | for (i = 0; i < vi->curr_queue_pairs; i++) |
| 1913 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) |
| 1914 | schedule_delayed_work(&vi->refill, 0); |
| 1915 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1916 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1917 | virtnet_napi_enable(&vi->rq[i]); |
Jason Wang | 6cd4ce0 | 2013-12-30 11:34:40 +0800 | [diff] [blame] | 1918 | } |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1919 | |
| 1920 | netif_device_attach(vi->dev); |
| 1921 | |
Jason Wang | 35ed159 | 2013-10-15 11:18:59 +0800 | [diff] [blame] | 1922 | rtnl_lock(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1923 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
Jason Wang | 35ed159 | 2013-10-15 11:18:59 +0800 | [diff] [blame] | 1924 | rtnl_unlock(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1925 | |
Jason Wang | ec9debb | 2013-10-29 15:11:07 +0800 | [diff] [blame] | 1926 | err = register_hotcpu_notifier(&vi->nb); |
| 1927 | if (err) |
| 1928 | return err; |
| 1929 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1930 | return 0; |
| 1931 | } |
| 1932 | #endif |
| 1933 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1934 | static struct virtio_device_id id_table[] = { |
| 1935 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
| 1936 | { 0 }, |
| 1937 | }; |
| 1938 | |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1939 | static unsigned int features[] = { |
Mark McLoughlin | 5e4fe5c | 2008-07-08 17:10:42 +1000 | [diff] [blame] | 1940 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
| 1941 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1942 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1943 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
Sridhar Samudrala | 5c51675 | 2009-07-14 14:21:02 +0000 | [diff] [blame] | 1944 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 1945 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1946 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1947 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1948 | VIRTIO_NET_F_CTRL_MAC_ADDR, |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1949 | VIRTIO_F_ANY_LAYOUT, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1950 | }; |
| 1951 | |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1952 | static struct virtio_driver virtio_net_driver = { |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1953 | .feature_table = features, |
| 1954 | .feature_table_size = ARRAY_SIZE(features), |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1955 | .driver.name = KBUILD_MODNAME, |
| 1956 | .driver.owner = THIS_MODULE, |
| 1957 | .id_table = id_table, |
| 1958 | .probe = virtnet_probe, |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 1959 | .remove = virtnet_remove, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1960 | .config_changed = virtnet_config_changed, |
Aaron Lu | 8910700 | 2013-09-17 09:25:23 +0930 | [diff] [blame] | 1961 | #ifdef CONFIG_PM_SLEEP |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1962 | .freeze = virtnet_freeze, |
| 1963 | .restore = virtnet_restore, |
| 1964 | #endif |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1965 | }; |
| 1966 | |
Rusty Russell | b2a1702 | 2013-02-13 16:59:28 +1030 | [diff] [blame] | 1967 | module_virtio_driver(virtio_net_driver); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1968 | |
| 1969 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 1970 | MODULE_DESCRIPTION("Virtio network driver"); |
| 1971 | MODULE_LICENSE("GPL"); |