blob: 1abea9dc6f0fe80e0e12a5a5a30717111875e13d [file] [log] [blame]
Rusty Russell296f96f2007-10-22 11:03:37 +10001/* A simple network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
Herbert Xua9ea3fc2008-04-18 11:21:42 +080022#include <linux/ethtool.h>
Rusty Russell296f96f2007-10-22 11:03:37 +100023#include <linux/module.h>
24#include <linux/virtio.h>
25#include <linux/virtio_net.h>
26#include <linux/scatterlist.h>
Alex Williamsone918085a2009-01-25 18:06:26 -080027#include <linux/if_vlan.h>
Rusty Russell296f96f2007-10-22 11:03:37 +100028
Dor Laor6c0cd7c2007-12-16 15:19:43 +020029static int napi_weight = 128;
30module_param(napi_weight, int, 0444);
31
Rusty Russell34a48572008-02-04 23:50:02 -050032static int csum = 1, gso = 1;
33module_param(csum, bool, 0444);
34module_param(gso, bool, 0444);
35
Rusty Russell296f96f2007-10-22 11:03:37 +100036/* FIXME: MTU in config. */
Alex Williamsone918085a2009-01-25 18:06:26 -080037#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080038#define GOOD_COPY_LEN 128
Rusty Russell296f96f2007-10-22 11:03:37 +100039
Alex Williamson2af76982009-02-04 09:02:40 +000040#define VIRTNET_SEND_COMMAND_SG_MAX 1
Alex Williamson2a41f712009-02-04 09:02:34 +000041
Rusty Russell296f96f2007-10-22 11:03:37 +100042struct virtnet_info
43{
44 struct virtio_device *vdev;
Alex Williamson2a41f712009-02-04 09:02:34 +000045 struct virtqueue *rvq, *svq, *cvq;
Rusty Russell296f96f2007-10-22 11:03:37 +100046 struct net_device *dev;
47 struct napi_struct napi;
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -080048 unsigned int status;
Rusty Russell296f96f2007-10-22 11:03:37 +100049
Rusty Russell99ffc692008-05-02 21:50:46 -050050 /* The skb we couldn't send because buffers were full. */
51 struct sk_buff *last_xmit_skb;
52
Rusty Russell363f1512008-06-08 20:51:55 +100053 /* If we need to free in a timer, this is it. */
Mark McLoughlin14c998f2008-06-08 20:50:56 +100054 struct timer_list xmit_free_timer;
55
Rusty Russell296f96f2007-10-22 11:03:37 +100056 /* Number of input buffers, and max we've ever had. */
57 unsigned int num, max;
58
Rusty Russell11a3a152008-05-26 17:48:13 +100059 /* For cleaning up after transmission. */
60 struct tasklet_struct tasklet;
Rusty Russell363f1512008-06-08 20:51:55 +100061 bool free_in_tasklet;
Rusty Russell11a3a152008-05-26 17:48:13 +100062
Herbert Xu97402b92008-04-18 11:24:27 +080063 /* I like... big packets and I cannot lie! */
64 bool big_packets;
65
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080066 /* Host will merge rx buffers for big packets (shake it! shake it!) */
67 bool mergeable_rx_bufs;
68
Rusty Russell296f96f2007-10-22 11:03:37 +100069 /* Receive & send queues. */
70 struct sk_buff_head recv;
71 struct sk_buff_head send;
Rusty Russellfb6813f2008-07-25 12:06:01 -050072
73 /* Chain pages by the private ptr. */
74 struct page *pages;
Rusty Russell296f96f2007-10-22 11:03:37 +100075};
76
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080077static inline void *skb_vnet_hdr(struct sk_buff *skb)
Rusty Russell296f96f2007-10-22 11:03:37 +100078{
79 return (struct virtio_net_hdr *)skb->cb;
80}
81
Rusty Russellfb6813f2008-07-25 12:06:01 -050082static void give_a_page(struct virtnet_info *vi, struct page *page)
83{
84 page->private = (unsigned long)vi->pages;
85 vi->pages = page;
86}
87
Mark McLoughlin0a888fd2008-11-16 22:39:18 -080088static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
89{
90 unsigned int i;
91
92 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
93 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
94 skb_shinfo(skb)->nr_frags = 0;
95 skb->data_len = 0;
96}
97
Rusty Russellfb6813f2008-07-25 12:06:01 -050098static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
99{
100 struct page *p = vi->pages;
101
102 if (p)
103 vi->pages = (struct page *)p->private;
104 else
105 p = alloc_page(gfp_mask);
106 return p;
107}
108
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500109static void skb_xmit_done(struct virtqueue *svq)
Rusty Russell296f96f2007-10-22 11:03:37 +1000110{
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500111 struct virtnet_info *vi = svq->vdev->priv;
Rusty Russell296f96f2007-10-22 11:03:37 +1000112
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500113 /* Suppress further interrupts. */
114 svq->vq_ops->disable_cb(svq);
Rusty Russell11a3a152008-05-26 17:48:13 +1000115
Rusty Russell363f1512008-06-08 20:51:55 +1000116 /* We were probably waiting for more output buffers. */
Rusty Russell296f96f2007-10-22 11:03:37 +1000117 netif_wake_queue(vi->dev);
Rusty Russell11a3a152008-05-26 17:48:13 +1000118
119 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
120 * queued, start_xmit won't be called. */
121 tasklet_schedule(&vi->tasklet);
Rusty Russell296f96f2007-10-22 11:03:37 +1000122}
123
124static void receive_skb(struct net_device *dev, struct sk_buff *skb,
125 unsigned len)
126{
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800127 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000128 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
Herbert Xu97402b92008-04-18 11:24:27 +0800129 int err;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800130 int i;
Rusty Russell296f96f2007-10-22 11:03:37 +1000131
132 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
133 pr_debug("%s: short packet %i\n", dev->name, len);
134 dev->stats.rx_length_errors++;
135 goto drop;
136 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000137
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800138 if (vi->mergeable_rx_bufs) {
139 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
140 unsigned int copy;
141 char *p = page_address(skb_shinfo(skb)->frags[0].page);
Rusty Russellfb6813f2008-07-25 12:06:01 -0500142
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800143 if (len > PAGE_SIZE)
144 len = PAGE_SIZE;
145 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
146
147 memcpy(hdr, p, sizeof(*mhdr));
148 p += sizeof(*mhdr);
149
150 copy = len;
151 if (copy > skb_tailroom(skb))
152 copy = skb_tailroom(skb);
153
154 memcpy(skb_put(skb, copy), p, copy);
155
156 len -= copy;
157
158 if (!len) {
159 give_a_page(vi, skb_shinfo(skb)->frags[0].page);
160 skb_shinfo(skb)->nr_frags--;
161 } else {
162 skb_shinfo(skb)->frags[0].page_offset +=
163 sizeof(*mhdr) + copy;
164 skb_shinfo(skb)->frags[0].size = len;
165 skb->data_len += len;
166 skb->len += len;
167 }
168
169 while (--mhdr->num_buffers) {
170 struct sk_buff *nskb;
171
172 i = skb_shinfo(skb)->nr_frags;
173 if (i >= MAX_SKB_FRAGS) {
174 pr_debug("%s: packet too long %d\n", dev->name,
175 len);
176 dev->stats.rx_length_errors++;
177 goto drop;
178 }
179
180 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
181 if (!nskb) {
182 pr_debug("%s: rx error: %d buffers missing\n",
183 dev->name, mhdr->num_buffers);
184 dev->stats.rx_length_errors++;
185 goto drop;
186 }
187
188 __skb_unlink(nskb, &vi->recv);
189 vi->num--;
190
191 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
192 skb_shinfo(nskb)->nr_frags = 0;
193 kfree_skb(nskb);
194
195 if (len > PAGE_SIZE)
196 len = PAGE_SIZE;
197
198 skb_shinfo(skb)->frags[i].size = len;
199 skb_shinfo(skb)->nr_frags++;
200 skb->data_len += len;
201 skb->len += len;
202 }
203 } else {
204 len -= sizeof(struct virtio_net_hdr);
205
206 if (len <= MAX_PACKET_LEN)
207 trim_pages(vi, skb);
208
209 err = pskb_trim(skb, len);
210 if (err) {
211 pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
212 len, err);
213 dev->stats.rx_dropped++;
214 goto drop;
215 }
Herbert Xu97402b92008-04-18 11:24:27 +0800216 }
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800217
Herbert Xu97402b92008-04-18 11:24:27 +0800218 skb->truesize += skb->data_len;
Rusty Russell296f96f2007-10-22 11:03:37 +1000219 dev->stats.rx_bytes += skb->len;
220 dev->stats.rx_packets++;
221
222 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
223 pr_debug("Needs csum!\n");
Rusty Russellf35d9d82008-02-04 23:49:54 -0500224 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
Rusty Russell296f96f2007-10-22 11:03:37 +1000225 goto frame_err;
Rusty Russell296f96f2007-10-22 11:03:37 +1000226 }
227
Mark McLoughlin23cde762008-06-08 20:49:00 +1000228 skb->protocol = eth_type_trans(skb, dev);
229 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
230 ntohs(skb->protocol), skb->len, skb->pkt_type);
231
Rusty Russell296f96f2007-10-22 11:03:37 +1000232 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
233 pr_debug("GSO!\n");
Rusty Russell34a48572008-02-04 23:50:02 -0500234 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
Rusty Russell296f96f2007-10-22 11:03:37 +1000235 case VIRTIO_NET_HDR_GSO_TCPV4:
236 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
237 break;
Rusty Russell296f96f2007-10-22 11:03:37 +1000238 case VIRTIO_NET_HDR_GSO_UDP:
239 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
240 break;
241 case VIRTIO_NET_HDR_GSO_TCPV6:
242 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
243 break;
244 default:
245 if (net_ratelimit())
246 printk(KERN_WARNING "%s: bad gso type %u.\n",
247 dev->name, hdr->gso_type);
248 goto frame_err;
249 }
250
Rusty Russell34a48572008-02-04 23:50:02 -0500251 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
252 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
253
Rusty Russell296f96f2007-10-22 11:03:37 +1000254 skb_shinfo(skb)->gso_size = hdr->gso_size;
255 if (skb_shinfo(skb)->gso_size == 0) {
256 if (net_ratelimit())
257 printk(KERN_WARNING "%s: zero gso size.\n",
258 dev->name);
259 goto frame_err;
260 }
261
262 /* Header must be checked, and gso_segs computed. */
263 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
264 skb_shinfo(skb)->gso_segs = 0;
265 }
266
267 netif_receive_skb(skb);
268 return;
269
270frame_err:
271 dev->stats.rx_frame_errors++;
272drop:
273 dev_kfree_skb(skb);
274}
275
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800276static void try_fill_recv_maxbufs(struct virtnet_info *vi)
Rusty Russell296f96f2007-10-22 11:03:37 +1000277{
278 struct sk_buff *skb;
Rusty Russell05271682008-05-02 21:50:45 -0500279 struct scatterlist sg[2+MAX_SKB_FRAGS];
Herbert Xu97402b92008-04-18 11:24:27 +0800280 int num, err, i;
Rusty Russell296f96f2007-10-22 11:03:37 +1000281
Rusty Russell05271682008-05-02 21:50:45 -0500282 sg_init_table(sg, 2+MAX_SKB_FRAGS);
Rusty Russell296f96f2007-10-22 11:03:37 +1000283 for (;;) {
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800284 struct virtio_net_hdr *hdr;
285
Rusty Russell296f96f2007-10-22 11:03:37 +1000286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
287 if (unlikely(!skb))
288 break;
289
290 skb_put(skb, MAX_PACKET_LEN);
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800291
292 hdr = skb_vnet_hdr(skb);
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800293 sg_set_buf(sg, hdr, sizeof(*hdr));
Herbert Xu97402b92008-04-18 11:24:27 +0800294
295 if (vi->big_packets) {
296 for (i = 0; i < MAX_SKB_FRAGS; i++) {
297 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
Rusty Russellfb6813f2008-07-25 12:06:01 -0500298 f->page = get_a_page(vi, GFP_ATOMIC);
Herbert Xu97402b92008-04-18 11:24:27 +0800299 if (!f->page)
300 break;
301
302 f->page_offset = 0;
303 f->size = PAGE_SIZE;
304
305 skb->data_len += PAGE_SIZE;
306 skb->len += PAGE_SIZE;
307
308 skb_shinfo(skb)->nr_frags++;
309 }
310 }
311
Rusty Russell296f96f2007-10-22 11:03:37 +1000312 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
313 skb_queue_head(&vi->recv, skb);
314
315 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
316 if (err) {
317 skb_unlink(skb, &vi->recv);
Mark McLoughlin0a888fd2008-11-16 22:39:18 -0800318 trim_pages(vi, skb);
Rusty Russell296f96f2007-10-22 11:03:37 +1000319 kfree_skb(skb);
320 break;
321 }
322 vi->num++;
323 }
324 if (unlikely(vi->num > vi->max))
325 vi->max = vi->num;
326 vi->rvq->vq_ops->kick(vi->rvq);
327}
328
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800329static void try_fill_recv(struct virtnet_info *vi)
330{
331 struct sk_buff *skb;
332 struct scatterlist sg[1];
333 int err;
334
335 if (!vi->mergeable_rx_bufs) {
336 try_fill_recv_maxbufs(vi);
337 return;
338 }
339
340 for (;;) {
341 skb_frag_t *f;
342
343 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
344 if (unlikely(!skb))
345 break;
346
347 skb_reserve(skb, NET_IP_ALIGN);
348
349 f = &skb_shinfo(skb)->frags[0];
350 f->page = get_a_page(vi, GFP_ATOMIC);
351 if (!f->page) {
352 kfree_skb(skb);
353 break;
354 }
355
356 f->page_offset = 0;
357 f->size = PAGE_SIZE;
358
359 skb_shinfo(skb)->nr_frags++;
360
361 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
362 skb_queue_head(&vi->recv, skb);
363
364 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
365 if (err) {
366 skb_unlink(skb, &vi->recv);
367 kfree_skb(skb);
368 break;
369 }
370 vi->num++;
371 }
372 if (unlikely(vi->num > vi->max))
373 vi->max = vi->num;
374 vi->rvq->vq_ops->kick(vi->rvq);
375}
376
Rusty Russell18445c42008-02-04 23:49:57 -0500377static void skb_recv_done(struct virtqueue *rvq)
Rusty Russell296f96f2007-10-22 11:03:37 +1000378{
379 struct virtnet_info *vi = rvq->vdev->priv;
Rusty Russell18445c42008-02-04 23:49:57 -0500380 /* Schedule NAPI, Suppress further interrupts if successful. */
Ben Hutchings288379f2009-01-19 16:43:59 -0800381 if (napi_schedule_prep(&vi->napi)) {
Rusty Russell18445c42008-02-04 23:49:57 -0500382 rvq->vq_ops->disable_cb(rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800383 __napi_schedule(&vi->napi);
Rusty Russell18445c42008-02-04 23:49:57 -0500384 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000385}
386
387static int virtnet_poll(struct napi_struct *napi, int budget)
388{
389 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
390 struct sk_buff *skb = NULL;
391 unsigned int len, received = 0;
392
393again:
394 while (received < budget &&
395 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
396 __skb_unlink(skb, &vi->recv);
397 receive_skb(vi->dev, skb, len);
398 vi->num--;
399 received++;
400 }
401
402 /* FIXME: If we oom and completely run out of inbufs, we need
403 * to start a timer trying to fill more. */
404 if (vi->num < vi->max / 2)
405 try_fill_recv(vi);
406
Rusty Russell8329d982007-11-19 11:20:43 -0500407 /* Out of packets? */
408 if (received < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800409 napi_complete(napi);
Rusty Russell18445c42008-02-04 23:49:57 -0500410 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
Christian Borntraeger4265f162008-03-14 14:17:05 +0100411 && napi_schedule_prep(napi)) {
412 vi->rvq->vq_ops->disable_cb(vi->rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800413 __napi_schedule(napi);
Rusty Russell296f96f2007-10-22 11:03:37 +1000414 goto again;
Christian Borntraeger4265f162008-03-14 14:17:05 +0100415 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000416 }
417
418 return received;
419}
420
421static void free_old_xmit_skbs(struct virtnet_info *vi)
422{
423 struct sk_buff *skb;
424 unsigned int len;
425
426 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
427 pr_debug("Sent skb %p\n", skb);
428 __skb_unlink(skb, &vi->send);
Rusty Russell655aa312008-05-02 21:50:43 -0500429 vi->dev->stats.tx_bytes += skb->len;
Rusty Russell296f96f2007-10-22 11:03:37 +1000430 vi->dev->stats.tx_packets++;
431 kfree_skb(skb);
432 }
433}
434
Rusty Russell363f1512008-06-08 20:51:55 +1000435/* If the virtio transport doesn't always notify us when all in-flight packets
436 * are consumed, we fall back to using this function on a timer to free them. */
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000437static void xmit_free(unsigned long data)
438{
439 struct virtnet_info *vi = (void *)data;
440
441 netif_tx_lock(vi->dev);
442
443 free_old_xmit_skbs(vi);
444
445 if (!skb_queue_empty(&vi->send))
446 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
447
448 netif_tx_unlock(vi->dev);
449}
450
Rusty Russell99ffc692008-05-02 21:50:46 -0500451static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
Rusty Russell296f96f2007-10-22 11:03:37 +1000452{
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000453 int num, err;
Rusty Russell05271682008-05-02 21:50:45 -0500454 struct scatterlist sg[2+MAX_SKB_FRAGS];
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800455 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
456 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
Rusty Russell296f96f2007-10-22 11:03:37 +1000457 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
Rusty Russell296f96f2007-10-22 11:03:37 +1000458
Rusty Russell05271682008-05-02 21:50:45 -0500459 sg_init_table(sg, 2+MAX_SKB_FRAGS);
Rusty Russell4d125de2007-11-07 16:34:49 +1100460
Johannes Berge1749612008-10-27 15:59:26 -0700461 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
Rusty Russell296f96f2007-10-22 11:03:37 +1000462
Rusty Russell296f96f2007-10-22 11:03:37 +1000463 if (skb->ip_summed == CHECKSUM_PARTIAL) {
464 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
465 hdr->csum_start = skb->csum_start - skb_headroom(skb);
466 hdr->csum_offset = skb->csum_offset;
467 } else {
468 hdr->flags = 0;
469 hdr->csum_offset = hdr->csum_start = 0;
470 }
471
472 if (skb_is_gso(skb)) {
Rusty Russell50c8ea82008-02-04 23:50:01 -0500473 hdr->hdr_len = skb_transport_header(skb) - skb->data;
Rusty Russell296f96f2007-10-22 11:03:37 +1000474 hdr->gso_size = skb_shinfo(skb)->gso_size;
Rusty Russell34a48572008-02-04 23:50:02 -0500475 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Rusty Russell296f96f2007-10-22 11:03:37 +1000476 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
477 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
478 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
479 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
480 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
481 else
482 BUG();
Rusty Russell34a48572008-02-04 23:50:02 -0500483 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
484 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
Rusty Russell296f96f2007-10-22 11:03:37 +1000485 } else {
486 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
Rusty Russell50c8ea82008-02-04 23:50:01 -0500487 hdr->gso_size = hdr->hdr_len = 0;
Rusty Russell296f96f2007-10-22 11:03:37 +1000488 }
489
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800490 mhdr->num_buffers = 0;
491
492 /* Encode metadata header at front. */
493 if (vi->mergeable_rx_bufs)
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800494 sg_set_buf(sg, mhdr, sizeof(*mhdr));
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800495 else
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800496 sg_set_buf(sg, hdr, sizeof(*hdr));
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800497
Rusty Russell296f96f2007-10-22 11:03:37 +1000498 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
Rusty Russell99ffc692008-05-02 21:50:46 -0500499
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000500 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
Rusty Russell363f1512008-06-08 20:51:55 +1000501 if (!err && !vi->free_in_tasklet)
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000502 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
503
504 return err;
Rusty Russell99ffc692008-05-02 21:50:46 -0500505}
506
Rusty Russell11a3a152008-05-26 17:48:13 +1000507static void xmit_tasklet(unsigned long data)
508{
509 struct virtnet_info *vi = (void *)data;
510
511 netif_tx_lock_bh(vi->dev);
512 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
513 vi->svq->vq_ops->kick(vi->svq);
514 vi->last_xmit_skb = NULL;
515 }
Rusty Russell363f1512008-06-08 20:51:55 +1000516 if (vi->free_in_tasklet)
517 free_old_xmit_skbs(vi);
Rusty Russell11a3a152008-05-26 17:48:13 +1000518 netif_tx_unlock_bh(vi->dev);
519}
520
Rusty Russell99ffc692008-05-02 21:50:46 -0500521static int start_xmit(struct sk_buff *skb, struct net_device *dev)
522{
523 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500524
525again:
526 /* Free up any pending old buffers before queueing new ones. */
527 free_old_xmit_skbs(vi);
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500528
Rusty Russell99ffc692008-05-02 21:50:46 -0500529 /* If we has a buffer left over from last time, send it now. */
Mark McLoughlin9953ca62008-05-27 12:06:26 +0100530 if (unlikely(vi->last_xmit_skb) &&
531 xmit_skb(vi, vi->last_xmit_skb) != 0)
532 goto stop_queue;
533
534 vi->last_xmit_skb = NULL;
Rusty Russell296f96f2007-10-22 11:03:37 +1000535
Rusty Russell99ffc692008-05-02 21:50:46 -0500536 /* Put new one in send queue and do transmit */
Rusty Russell7eb2e252008-05-26 17:42:42 +1000537 if (likely(skb)) {
538 __skb_queue_head(&vi->send, skb);
539 if (xmit_skb(vi, skb) != 0) {
540 vi->last_xmit_skb = skb;
541 skb = NULL;
542 goto stop_queue;
543 }
Rusty Russell99ffc692008-05-02 21:50:46 -0500544 }
545done:
546 vi->svq->vq_ops->kick(vi->svq);
547 return NETDEV_TX_OK;
548
549stop_queue:
550 pr_debug("%s: virtio not prepared to send\n", dev->name);
551 netif_stop_queue(dev);
552
553 /* Activate callback for using skbs: if this returns false it
554 * means some were used in the meantime. */
555 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
556 vi->svq->vq_ops->disable_cb(vi->svq);
557 netif_start_queue(dev);
558 goto again;
559 }
Mark McLoughlin9953ca62008-05-27 12:06:26 +0100560 if (skb) {
561 /* Drop this skb: we only queue one. */
562 vi->dev->stats.tx_dropped++;
563 kfree_skb(skb);
564 }
Rusty Russell99ffc692008-05-02 21:50:46 -0500565 goto done;
Rusty Russell296f96f2007-10-22 11:03:37 +1000566}
567
Amit Shahda74e892008-02-29 16:24:50 +0530568#ifdef CONFIG_NET_POLL_CONTROLLER
569static void virtnet_netpoll(struct net_device *dev)
570{
571 struct virtnet_info *vi = netdev_priv(dev);
572
573 napi_schedule(&vi->napi);
574}
575#endif
576
Rusty Russell296f96f2007-10-22 11:03:37 +1000577static int virtnet_open(struct net_device *dev)
578{
579 struct virtnet_info *vi = netdev_priv(dev);
580
Rusty Russell296f96f2007-10-22 11:03:37 +1000581 napi_enable(&vi->napi);
Rusty Russella48bd8f2008-02-04 23:50:07 -0500582
583 /* If all buffers were filled by other side before we napi_enabled, we
584 * won't get another interrupt, so process any outstanding packets
Christian Borntraeger370076d2008-02-06 08:50:11 +0100585 * now. virtnet_poll wants re-enable the queue, so we disable here.
586 * We synchronize against interrupts via NAPI_STATE_SCHED */
Ben Hutchings288379f2009-01-19 16:43:59 -0800587 if (napi_schedule_prep(&vi->napi)) {
Christian Borntraeger370076d2008-02-06 08:50:11 +0100588 vi->rvq->vq_ops->disable_cb(vi->rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800589 __napi_schedule(&vi->napi);
Christian Borntraeger370076d2008-02-06 08:50:11 +0100590 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000591 return 0;
592}
593
Alex Williamson2a41f712009-02-04 09:02:34 +0000594/*
595 * Send command via the control virtqueue and check status. Commands
596 * supported by the hypervisor, as indicated by feature bits, should
597 * never fail unless improperly formated.
598 */
599static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
600 struct scatterlist *data, int out, int in)
601{
602 struct scatterlist sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
603 struct virtio_net_ctrl_hdr ctrl;
604 virtio_net_ctrl_ack status = ~0;
605 unsigned int tmp;
606
607 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
608 BUG(); /* Caller should know better */
609 return false;
610 }
611
612 BUG_ON(out + in > VIRTNET_SEND_COMMAND_SG_MAX);
613
614 out++; /* Add header */
615 in++; /* Add return status */
616
617 ctrl.class = class;
618 ctrl.cmd = cmd;
619
620 sg_init_table(sg, out + in);
621
622 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
623 memcpy(&sg[1], data, sizeof(struct scatterlist) * (out + in - 2));
624 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
625
626 if (vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) != 0)
627 BUG();
628
629 vi->cvq->vq_ops->kick(vi->cvq);
630
631 /*
632 * Spin for a response, the kick causes an ioport write, trapping
633 * into the hypervisor, so the request should be handled immediately.
634 */
635 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
636 cpu_relax();
637
638 return status == VIRTIO_NET_OK;
639}
640
Rusty Russell296f96f2007-10-22 11:03:37 +1000641static int virtnet_close(struct net_device *dev)
642{
643 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000644
645 napi_disable(&vi->napi);
646
Rusty Russell296f96f2007-10-22 11:03:37 +1000647 return 0;
648}
649
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800650static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
651{
652 struct virtnet_info *vi = netdev_priv(dev);
653 struct virtio_device *vdev = vi->vdev;
654
655 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
656 return -ENOSYS;
657
658 return ethtool_op_set_tx_hw_csum(dev, data);
659}
660
Alex Williamson2af76982009-02-04 09:02:40 +0000661static void virtnet_set_rx_mode(struct net_device *dev)
662{
663 struct virtnet_info *vi = netdev_priv(dev);
664 struct scatterlist sg;
665 u8 promisc, allmulti;
666
667 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
668 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
669 return;
670
671 promisc = ((dev->flags & IFF_PROMISC) != 0 || dev->uc_count > 0);
672 allmulti = ((dev->flags & IFF_ALLMULTI) != 0 || dev->mc_count > 0);
673
674 sg_set_buf(&sg, &promisc, sizeof(promisc));
675
676 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
677 VIRTIO_NET_CTRL_RX_PROMISC,
678 &sg, 1, 0))
679 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
680 promisc ? "en" : "dis");
681
682 sg_set_buf(&sg, &allmulti, sizeof(allmulti));
683
684 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
685 VIRTIO_NET_CTRL_RX_ALLMULTI,
686 &sg, 1, 0))
687 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
688 allmulti ? "en" : "dis");
689}
690
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800691static struct ethtool_ops virtnet_ethtool_ops = {
692 .set_tx_csum = virtnet_set_tx_csum,
693 .set_sg = ethtool_op_set_sg,
Mark McLoughlin0276b492008-11-16 22:40:36 -0800694 .set_tso = ethtool_op_set_tso,
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800695 .get_link = ethtool_op_get_link,
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800696};
697
Mark McLoughlin39da5812008-11-26 13:58:11 +0000698#define MIN_MTU 68
699#define MAX_MTU 65535
700
701static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
702{
703 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
704 return -EINVAL;
705 dev->mtu = new_mtu;
706 return 0;
707}
708
Stephen Hemminger76288b42009-01-06 10:44:22 -0800709static const struct net_device_ops virtnet_netdev = {
710 .ndo_open = virtnet_open,
711 .ndo_stop = virtnet_close,
712 .ndo_start_xmit = start_xmit,
713 .ndo_validate_addr = eth_validate_addr,
714 .ndo_set_mac_address = eth_mac_addr,
Alex Williamson2af76982009-02-04 09:02:40 +0000715 .ndo_set_rx_mode = virtnet_set_rx_mode,
Stephen Hemminger76288b42009-01-06 10:44:22 -0800716 .ndo_change_mtu = virtnet_change_mtu,
717#ifdef CONFIG_NET_POLL_CONTROLLER
718 .ndo_poll_controller = virtnet_netpoll,
719#endif
720};
721
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800722static void virtnet_update_status(struct virtnet_info *vi)
723{
724 u16 v;
725
726 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
727 return;
728
729 vi->vdev->config->get(vi->vdev,
730 offsetof(struct virtio_net_config, status),
731 &v, sizeof(v));
732
733 /* Ignore unknown (future) status bits */
734 v &= VIRTIO_NET_S_LINK_UP;
735
736 if (vi->status == v)
737 return;
738
739 vi->status = v;
740
741 if (vi->status & VIRTIO_NET_S_LINK_UP) {
742 netif_carrier_on(vi->dev);
743 netif_wake_queue(vi->dev);
744 } else {
745 netif_carrier_off(vi->dev);
746 netif_stop_queue(vi->dev);
747 }
748}
749
750static void virtnet_config_changed(struct virtio_device *vdev)
751{
752 struct virtnet_info *vi = vdev->priv;
753
754 virtnet_update_status(vi);
755}
756
Rusty Russell296f96f2007-10-22 11:03:37 +1000757static int virtnet_probe(struct virtio_device *vdev)
758{
759 int err;
Rusty Russell296f96f2007-10-22 11:03:37 +1000760 struct net_device *dev;
761 struct virtnet_info *vi;
Rusty Russell296f96f2007-10-22 11:03:37 +1000762
763 /* Allocate ourselves a network device with room for our info */
764 dev = alloc_etherdev(sizeof(struct virtnet_info));
765 if (!dev)
766 return -ENOMEM;
767
768 /* Set up network device as normal. */
Stephen Hemminger76288b42009-01-06 10:44:22 -0800769 dev->netdev_ops = &virtnet_netdev;
Rusty Russell296f96f2007-10-22 11:03:37 +1000770 dev->features = NETIF_F_HIGHDMA;
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800771 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
Rusty Russell296f96f2007-10-22 11:03:37 +1000772 SET_NETDEV_DEV(dev, &vdev->dev);
773
774 /* Do we support "hardware" checksums? */
Rusty Russellc45a6812008-05-02 21:50:50 -0500775 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
Rusty Russell296f96f2007-10-22 11:03:37 +1000776 /* This opens up the world of extra features. */
777 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
Rusty Russellc45a6812008-05-02 21:50:50 -0500778 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
Rusty Russell34a48572008-02-04 23:50:02 -0500779 dev->features |= NETIF_F_TSO | NETIF_F_UFO
780 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
781 }
Rusty Russell5539ae962008-05-02 21:50:46 -0500782 /* Individual feature bits: what can host handle? */
Rusty Russellc45a6812008-05-02 21:50:50 -0500783 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
Rusty Russell5539ae962008-05-02 21:50:46 -0500784 dev->features |= NETIF_F_TSO;
Rusty Russellc45a6812008-05-02 21:50:50 -0500785 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
Rusty Russell5539ae962008-05-02 21:50:46 -0500786 dev->features |= NETIF_F_TSO6;
Rusty Russellc45a6812008-05-02 21:50:50 -0500787 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
Rusty Russell5539ae962008-05-02 21:50:46 -0500788 dev->features |= NETIF_F_TSO_ECN;
Rusty Russellc45a6812008-05-02 21:50:50 -0500789 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
Rusty Russell5539ae962008-05-02 21:50:46 -0500790 dev->features |= NETIF_F_UFO;
Rusty Russell296f96f2007-10-22 11:03:37 +1000791 }
792
793 /* Configuration may specify what MAC to use. Otherwise random. */
Rusty Russellc45a6812008-05-02 21:50:50 -0500794 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
Rusty Russella586d4f2008-02-04 23:49:56 -0500795 vdev->config->get(vdev,
796 offsetof(struct virtio_net_config, mac),
797 dev->dev_addr, dev->addr_len);
Rusty Russell296f96f2007-10-22 11:03:37 +1000798 } else
799 random_ether_addr(dev->dev_addr);
800
801 /* Set up our device-specific information */
802 vi = netdev_priv(dev);
Dor Laor6c0cd7c2007-12-16 15:19:43 +0200803 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
Rusty Russell296f96f2007-10-22 11:03:37 +1000804 vi->dev = dev;
805 vi->vdev = vdev;
Christian Borntraegerd9d5dcc2008-02-18 10:02:51 +0100806 vdev->priv = vi;
Rusty Russellfb6813f2008-07-25 12:06:01 -0500807 vi->pages = NULL;
Rusty Russell296f96f2007-10-22 11:03:37 +1000808
Rusty Russell363f1512008-06-08 20:51:55 +1000809 /* If they give us a callback when all buffers are done, we don't need
810 * the timer. */
811 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
812
Herbert Xu97402b92008-04-18 11:24:27 +0800813 /* If we can receive ANY GSO packets, we must allocate large ones. */
814 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
815 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
816 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
817 vi->big_packets = true;
818
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800819 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
820 vi->mergeable_rx_bufs = true;
821
Rusty Russell296f96f2007-10-22 11:03:37 +1000822 /* We expect two virtqueues, receive then send. */
Rusty Russella586d4f2008-02-04 23:49:56 -0500823 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
Rusty Russell296f96f2007-10-22 11:03:37 +1000824 if (IS_ERR(vi->rvq)) {
825 err = PTR_ERR(vi->rvq);
826 goto free;
827 }
828
Rusty Russella586d4f2008-02-04 23:49:56 -0500829 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done);
Rusty Russell296f96f2007-10-22 11:03:37 +1000830 if (IS_ERR(vi->svq)) {
831 err = PTR_ERR(vi->svq);
832 goto free_recv;
833 }
834
Alex Williamson2a41f712009-02-04 09:02:34 +0000835 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
836 vi->cvq = vdev->config->find_vq(vdev, 2, NULL);
837 if (IS_ERR(vi->cvq)) {
838 err = PTR_ERR(vi->svq);
839 goto free_send;
840 }
841 }
842
Rusty Russell296f96f2007-10-22 11:03:37 +1000843 /* Initialize our empty receive and send queues. */
844 skb_queue_head_init(&vi->recv);
845 skb_queue_head_init(&vi->send);
846
Rusty Russell11a3a152008-05-26 17:48:13 +1000847 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
848
Rusty Russell363f1512008-06-08 20:51:55 +1000849 if (!vi->free_in_tasklet)
850 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000851
Rusty Russell296f96f2007-10-22 11:03:37 +1000852 err = register_netdev(dev);
853 if (err) {
854 pr_debug("virtio_net: registering device failed\n");
Alex Williamson2a41f712009-02-04 09:02:34 +0000855 goto free_ctrl;
Rusty Russell296f96f2007-10-22 11:03:37 +1000856 }
Rusty Russellb3369c12008-02-04 23:50:02 -0500857
858 /* Last of all, set up some receive buffers. */
859 try_fill_recv(vi);
860
861 /* If we didn't even get one input buffer, we're useless. */
862 if (vi->num == 0) {
863 err = -ENOMEM;
864 goto unregister;
865 }
866
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800867 vi->status = VIRTIO_NET_S_LINK_UP;
868 virtnet_update_status(vi);
869
Rusty Russell296f96f2007-10-22 11:03:37 +1000870 pr_debug("virtnet: registered device %s\n", dev->name);
Rusty Russell296f96f2007-10-22 11:03:37 +1000871 return 0;
872
Rusty Russellb3369c12008-02-04 23:50:02 -0500873unregister:
874 unregister_netdev(dev);
Alex Williamson2a41f712009-02-04 09:02:34 +0000875free_ctrl:
876 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
877 vdev->config->del_vq(vi->cvq);
Rusty Russell296f96f2007-10-22 11:03:37 +1000878free_send:
879 vdev->config->del_vq(vi->svq);
880free_recv:
881 vdev->config->del_vq(vi->rvq);
882free:
883 free_netdev(dev);
884 return err;
885}
886
887static void virtnet_remove(struct virtio_device *vdev)
888{
Rusty Russell74b25532007-11-19 11:20:42 -0500889 struct virtnet_info *vi = vdev->priv;
Rusty Russellb3369c12008-02-04 23:50:02 -0500890 struct sk_buff *skb;
891
Rusty Russell6e5aa7e2008-02-04 23:50:03 -0500892 /* Stop all the virtqueues. */
893 vdev->config->reset(vdev);
894
Rusty Russell363f1512008-06-08 20:51:55 +1000895 if (!vi->free_in_tasklet)
896 del_timer_sync(&vi->xmit_free_timer);
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000897
Rusty Russellb3369c12008-02-04 23:50:02 -0500898 /* Free our skbs in send and recv queues, if any. */
Rusty Russellb3369c12008-02-04 23:50:02 -0500899 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
900 kfree_skb(skb);
901 vi->num--;
902 }
Wang Chen288369c2008-05-22 18:07:43 +0800903 __skb_queue_purge(&vi->send);
Rusty Russellb3369c12008-02-04 23:50:02 -0500904
905 BUG_ON(vi->num != 0);
Rusty Russell74b25532007-11-19 11:20:42 -0500906
907 vdev->config->del_vq(vi->svq);
908 vdev->config->del_vq(vi->rvq);
Alex Williamson2a41f712009-02-04 09:02:34 +0000909 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
910 vdev->config->del_vq(vi->cvq);
Rusty Russell74b25532007-11-19 11:20:42 -0500911 unregister_netdev(vi->dev);
Rusty Russellfb6813f2008-07-25 12:06:01 -0500912
913 while (vi->pages)
914 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
915
Rusty Russell74b25532007-11-19 11:20:42 -0500916 free_netdev(vi->dev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000917}
918
919static struct virtio_device_id id_table[] = {
920 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
921 { 0 },
922};
923
Rusty Russellc45a6812008-05-02 21:50:50 -0500924static unsigned int features[] = {
Mark McLoughlin5e4fe5c2008-07-08 17:10:42 +1000925 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
926 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
Rusty Russellc45a6812008-05-02 21:50:50 -0500927 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
Herbert Xu97402b92008-04-18 11:24:27 +0800928 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
929 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
Alex Williamson2a41f712009-02-04 09:02:34 +0000930 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
Alex Williamson2af76982009-02-04 09:02:40 +0000931 VIRTIO_NET_F_CTRL_RX,
Herbert Xu97402b92008-04-18 11:24:27 +0800932 VIRTIO_F_NOTIFY_ON_EMPTY,
Rusty Russellc45a6812008-05-02 21:50:50 -0500933};
934
Rusty Russell296f96f2007-10-22 11:03:37 +1000935static struct virtio_driver virtio_net = {
Rusty Russellc45a6812008-05-02 21:50:50 -0500936 .feature_table = features,
937 .feature_table_size = ARRAY_SIZE(features),
Rusty Russell296f96f2007-10-22 11:03:37 +1000938 .driver.name = KBUILD_MODNAME,
939 .driver.owner = THIS_MODULE,
940 .id_table = id_table,
941 .probe = virtnet_probe,
942 .remove = __devexit_p(virtnet_remove),
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800943 .config_changed = virtnet_config_changed,
Rusty Russell296f96f2007-10-22 11:03:37 +1000944};
945
946static int __init init(void)
947{
948 return register_virtio_driver(&virtio_net);
949}
950
951static void __exit fini(void)
952{
953 unregister_virtio_driver(&virtio_net);
954}
955module_init(init);
956module_exit(fini);
957
958MODULE_DEVICE_TABLE(virtio, id_table);
959MODULE_DESCRIPTION("Virtio network driver");
960MODULE_LICENSE("GPL");