blob: 2ea9b4976f4a759a5e280a7e3efab36cf3face1e [file] [log] [blame]
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001#include <linux/etherdevice.h>
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08002#include <linux/if_tap.h>
Basil Gorf09e2242012-05-03 22:55:24 +00003#include <linux/if_vlan.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +00004#include <linux/interrupt.h>
5#include <linux/nsproxy.h>
6#include <linux/compat.h>
7#include <linux/if_tun.h>
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/cache.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010011#include <linux/sched/signal.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000012#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000014#include <linux/wait.h>
15#include <linux/cdev.h>
Al Viro40401532012-02-13 03:58:52 +000016#include <linux/idr.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000017#include <linux/fs.h>
Herbert Xu6c36d2e2014-11-07 21:22:25 +080018#include <linux/uio.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000019
20#include <net/net_namespace.h>
21#include <net/rtnetlink.h>
22#include <net/sock.h>
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000023#include <linux/virtio_net.h>
Jason Wang362899b2016-07-15 03:46:31 -040024#include <linux/skb_array.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000025
Sainath Grandhi635b8c82017-02-10 16:03:47 -080026#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +020027
Sainath Grandhi635b8c82017-02-10 16:03:47 -080028#define TAP_VNET_LE 0x80000000
29#define TAP_VNET_BE 0x40000000
Greg Kurz8b8e6582015-04-24 14:50:36 +020030
31#ifdef CONFIG_TUN_VNET_CROSS_LE
Sainath Grandhi635b8c82017-02-10 16:03:47 -080032static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
Greg Kurz8b8e6582015-04-24 14:50:36 +020033{
Sainath Grandhi635b8c82017-02-10 16:03:47 -080034 return q->flags & TAP_VNET_BE ? false :
Greg Kurz8b8e6582015-04-24 14:50:36 +020035 virtio_legacy_is_little_endian();
36}
37
Sainath Grandhi635b8c82017-02-10 16:03:47 -080038static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
Greg Kurz8b8e6582015-04-24 14:50:36 +020039{
Sainath Grandhi635b8c82017-02-10 16:03:47 -080040 int s = !!(q->flags & TAP_VNET_BE);
Greg Kurz8b8e6582015-04-24 14:50:36 +020041
42 if (put_user(s, sp))
43 return -EFAULT;
44
45 return 0;
46}
47
Sainath Grandhi635b8c82017-02-10 16:03:47 -080048static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
Greg Kurz8b8e6582015-04-24 14:50:36 +020049{
50 int s;
51
52 if (get_user(s, sp))
53 return -EFAULT;
54
55 if (s)
Sainath Grandhi635b8c82017-02-10 16:03:47 -080056 q->flags |= TAP_VNET_BE;
Greg Kurz8b8e6582015-04-24 14:50:36 +020057 else
Sainath Grandhi635b8c82017-02-10 16:03:47 -080058 q->flags &= ~TAP_VNET_BE;
Greg Kurz8b8e6582015-04-24 14:50:36 +020059
60 return 0;
61}
62#else
Sainath Grandhi635b8c82017-02-10 16:03:47 -080063static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
Greg Kurz8b8e6582015-04-24 14:50:36 +020064{
65 return virtio_legacy_is_little_endian();
66}
67
Sainath Grandhi635b8c82017-02-10 16:03:47 -080068static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
Greg Kurz8b8e6582015-04-24 14:50:36 +020069{
70 return -EINVAL;
71}
72
Sainath Grandhi635b8c82017-02-10 16:03:47 -080073static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
Greg Kurz8b8e6582015-04-24 14:50:36 +020074{
75 return -EINVAL;
76}
77#endif /* CONFIG_TUN_VNET_CROSS_LE */
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +020078
Sainath Grandhi635b8c82017-02-10 16:03:47 -080079static inline bool tap_is_little_endian(struct tap_queue *q)
Greg Kurz5b11e152015-04-24 14:24:48 +020080{
Sainath Grandhi635b8c82017-02-10 16:03:47 -080081 return q->flags & TAP_VNET_LE ||
82 tap_legacy_is_little_endian(q);
Greg Kurz5b11e152015-04-24 14:24:48 +020083}
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +020084
Sainath Grandhi635b8c82017-02-10 16:03:47 -080085static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +020086{
Sainath Grandhi635b8c82017-02-10 16:03:47 -080087 return __virtio16_to_cpu(tap_is_little_endian(q), val);
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +020088}
89
Sainath Grandhi635b8c82017-02-10 16:03:47 -080090static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +020091{
Sainath Grandhi635b8c82017-02-10 16:03:47 -080092 return __cpu_to_virtio16(tap_is_little_endian(q), val);
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +020093}
94
Sainath Grandhi635b8c82017-02-10 16:03:47 -080095static struct proto tap_proto = {
96 .name = "tap",
Arnd Bergmann20d29d72010-01-30 12:24:26 +000097 .owner = THIS_MODULE,
Sainath Grandhi635b8c82017-02-10 16:03:47 -080098 .obj_size = sizeof(struct tap_queue),
Arnd Bergmann20d29d72010-01-30 12:24:26 +000099};
100
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800101#define TAP_NUM_DEVS (1U << MINORBITS)
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800102
103static LIST_HEAD(major_list);
104
Sainath Grandhiebc05ba2017-02-10 16:03:48 -0800105struct major_info {
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800106 struct rcu_head rcu;
Sainath Grandhiebc05ba2017-02-10 16:03:48 -0800107 dev_t major;
108 struct idr minor_idr;
WANG Congffa423f2017-07-10 10:05:50 -0700109 spinlock_t minor_lock;
Sainath Grandhiebc05ba2017-02-10 16:03:48 -0800110 const char *device_name;
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800111 struct list_head next;
112};
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000113
Shirley Ma97bc3632011-07-06 12:26:11 +0000114#define GOODCOPY_LEN 128
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000115
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800116static const struct proto_ops tap_socket_ops;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000117
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400118#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
Jason Wangf23d5382015-10-23 00:57:05 -0400119#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400120
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800121static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500122{
123 return rcu_dereference(dev->rx_handler_data);
124}
125
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000126/*
127 * RCU usage:
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800128 * The tap_queue and the macvlan_dev are loosely coupled, the
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000129 * pointers from one to the other can only be read while rcu_read_lock
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400130 * or rtnl is held.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000131 *
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800132 * Both the file and the macvlan_dev hold a reference on the tap_queue
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000133 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
134 * q->vlan becomes inaccessible. When the files gets closed,
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800135 * tap_get_queue() fails.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000136 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000137 * There may still be references to the struct sock inside of the
138 * queue from outbound SKBs, but these never reference back to the
139 * file or the dev. The data structure is freed through __sk_free
140 * when both our references and any pending SKBs are gone.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000141 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000142
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800143static int tap_enable_queue(struct tap_dev *tap, struct file *file,
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800144 struct tap_queue *q)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000145{
Jason Wang815f2362013-06-05 23:54:39 +0000146 int err = -EINVAL;
147
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400148 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000149
150 if (q->enabled)
151 goto out;
152
153 err = 0;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800154 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
155 q->queue_index = tap->numvtaps;
Jason Wang815f2362013-06-05 23:54:39 +0000156 q->enabled = true;
157
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800158 tap->numvtaps++;
Jason Wang815f2362013-06-05 23:54:39 +0000159out:
Jason Wang815f2362013-06-05 23:54:39 +0000160 return err;
161}
162
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400163/* Requires RTNL */
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800164static int tap_set_queue(struct tap_dev *tap, struct file *file,
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800165 struct tap_queue *q)
Jason Wang815f2362013-06-05 23:54:39 +0000166{
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800167 if (tap->numqueues == MAX_TAP_QUEUES)
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400168 return -EBUSY;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000169
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800170 rcu_assign_pointer(q->tap, tap);
171 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000172 sock_hold(&q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000173
174 q->file = file;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800175 q->queue_index = tap->numvtaps;
Jason Wang815f2362013-06-05 23:54:39 +0000176 q->enabled = true;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000177 file->private_data = q;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800178 list_add_tail(&q->next, &tap->queue_list);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000179
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800180 tap->numvtaps++;
181 tap->numqueues++;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000182
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400183 return 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000184}
185
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800186static int tap_disable_queue(struct tap_queue *q)
Jason Wang815f2362013-06-05 23:54:39 +0000187{
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800188 struct tap_dev *tap;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800189 struct tap_queue *nq;
Jason Wang815f2362013-06-05 23:54:39 +0000190
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400191 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000192 if (!q->enabled)
193 return -EINVAL;
194
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800195 tap = rtnl_dereference(q->tap);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400196
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800197 if (tap) {
Jason Wang815f2362013-06-05 23:54:39 +0000198 int index = q->queue_index;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800199 BUG_ON(index >= tap->numvtaps);
200 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
Jason Wang815f2362013-06-05 23:54:39 +0000201 nq->queue_index = index;
202
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800203 rcu_assign_pointer(tap->taps[index], nq);
204 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
Jason Wang815f2362013-06-05 23:54:39 +0000205 q->enabled = false;
206
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800207 tap->numvtaps--;
Jason Wang815f2362013-06-05 23:54:39 +0000208 }
209
210 return 0;
211}
212
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000213/*
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000214 * The file owning the queue got closed, give up both
215 * the reference that the files holds as well as the
216 * one from the macvlan_dev if that still exists.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000217 *
218 * Using the spinlock makes sure that we don't get
219 * to the queue again after destroying it.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000220 */
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800221static void tap_put_queue(struct tap_queue *q)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000222{
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800223 struct tap_dev *tap;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000224
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400225 rtnl_lock();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800226 tap = rtnl_dereference(q->tap);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400227
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800228 if (tap) {
Jason Wang815f2362013-06-05 23:54:39 +0000229 if (q->enabled)
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800230 BUG_ON(tap_disable_queue(q));
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000231
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800232 tap->numqueues--;
233 RCU_INIT_POINTER(q->tap, NULL);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000234 sock_put(&q->sk);
Jason Wang815f2362013-06-05 23:54:39 +0000235 list_del_init(&q->next);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000236 }
237
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400238 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000239
240 synchronize_rcu();
241 sock_put(&q->sk);
242}
243
244/*
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000245 * Select a queue based on the rxq of the device on which this packet
246 * arrived. If the incoming device is not mq, calculate a flow hash
247 * to select a queue. If all fails, find the first available queue.
248 * Cache vlan->numvtaps since it can become zero during the execution
249 * of this function.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000250 */
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800251static struct tap_queue *tap_get_queue(struct tap_dev *tap,
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800252 struct sk_buff *skb)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000253{
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800254 struct tap_queue *queue = NULL;
Jason Wang815f2362013-06-05 23:54:39 +0000255 /* Access to taps array is protected by rcu, but access to numvtaps
256 * isn't. Below we use it to lookup a queue, but treat it as a hint
257 * and validate that the result isn't NULL - in case we are
258 * racing against queue removal.
259 */
Mark Rutland6aa7de02017-10-23 14:07:29 -0700260 int numvtaps = READ_ONCE(tap->numvtaps);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000261 __u32 rxq;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000262
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000263 if (!numvtaps)
264 goto out;
265
Jason Wang1b16bf42016-07-15 03:46:30 -0400266 if (numvtaps == 1)
267 goto single;
268
Krishna Kumaref0002b2011-11-23 22:17:14 +0000269 /* Check if we can use flow to select a queue */
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800270 rxq = skb_get_hash(skb);
Krishna Kumaref0002b2011-11-23 22:17:14 +0000271 if (rxq) {
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800272 queue = rcu_dereference(tap->taps[rxq % numvtaps]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000273 goto out;
Krishna Kumaref0002b2011-11-23 22:17:14 +0000274 }
275
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000276 if (likely(skb_rx_queue_recorded(skb))) {
277 rxq = skb_get_rx_queue(skb);
278
279 while (unlikely(rxq >= numvtaps))
280 rxq -= numvtaps;
281
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800282 queue = rcu_dereference(tap->taps[rxq]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000283 goto out;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000284 }
285
Jason Wang1b16bf42016-07-15 03:46:30 -0400286single:
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800287 queue = rcu_dereference(tap->taps[0]);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000288out:
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800289 return queue;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000290}
291
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000292/*
293 * The net_device is going away, give up the reference
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000294 * that it holds on all queues and safely set the pointer
295 * from the queues to NULL.
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000296 */
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800297void tap_del_queues(struct tap_dev *tap)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000298{
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800299 struct tap_queue *q, *tmp;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000300
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400301 ASSERT_RTNL();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800302 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
Jason Wang815f2362013-06-05 23:54:39 +0000303 list_del_init(&q->next);
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800304 RCU_INIT_POINTER(q->tap, NULL);
Jason Wang815f2362013-06-05 23:54:39 +0000305 if (q->enabled)
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800306 tap->numvtaps--;
307 tap->numqueues--;
Pankaj Guptadfe816c2015-06-19 19:47:53 +0530308 sock_put(&q->sk);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000309 }
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800310 BUG_ON(tap->numvtaps);
311 BUG_ON(tap->numqueues);
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800312 /* guarantee that any future tap_set_queue will fail */
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800313 tap->numvtaps = MAX_TAP_QUEUES;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000314}
Sainath Grandhi9a393b52017-02-10 16:03:51 -0800315EXPORT_SYMBOL_GPL(tap_del_queues);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000316
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800317rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000318{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500319 struct sk_buff *skb = *pskb;
320 struct net_device *dev = skb->dev;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800321 struct tap_dev *tap;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800322 struct tap_queue *q;
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400323 netdev_features_t features = TAP_FEATURES;
324
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800325 tap = tap_dev_get_rcu(dev);
326 if (!tap)
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500327 return RX_HANDLER_PASS;
328
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800329 q = tap_get_queue(tap, skb);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000330 if (!q)
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500331 return RX_HANDLER_PASS;
Herbert Xu8a357472010-07-21 21:44:31 +0000332
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500333 skb_push(skb, ETH_HLEN);
334
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400335 /* Apply the forward feature mask so that we perform segmentation
Vlad Yaseviche5733322013-08-16 15:25:02 -0400336 * according to users wishes. This only works if VNET_HDR is
337 * enabled.
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400338 */
Vlad Yaseviche5733322013-08-16 15:25:02 -0400339 if (q->flags & IFF_VNET_HDR)
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800340 features |= tap->tap_features;
Johannes Berg8b86a612015-04-17 15:45:04 +0200341 if (netif_needs_gso(skb, features)) {
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400342 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
343
344 if (IS_ERR(segs))
345 goto drop;
346
347 if (!segs) {
Jason Wang5990a302018-01-04 11:14:27 +0800348 if (ptr_ring_produce(&q->ring, skb))
Jason Wang362899b2016-07-15 03:46:31 -0400349 goto drop;
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400350 goto wake_up;
351 }
352
Eric Dumazetbe0bd312016-05-06 05:58:21 -0700353 consume_skb(skb);
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400354 while (segs) {
355 struct sk_buff *nskb = segs->next;
356
357 segs->next = NULL;
Jason Wang5990a302018-01-04 11:14:27 +0800358 if (ptr_ring_produce(&q->ring, segs)) {
Jason Wang362899b2016-07-15 03:46:31 -0400359 kfree_skb(segs);
360 kfree_skb_list(nskb);
361 break;
362 }
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400363 segs = nskb;
364 }
365 } else {
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400366 /* If we receive a partial checksum and the tap side
367 * doesn't support checksum offload, compute the checksum.
368 * Note: it doesn't matter which checksum feature to
Sainath Grandhia8e04692017-02-10 16:03:46 -0800369 * check, we either support them all or none.
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400370 */
371 if (skb->ip_summed == CHECKSUM_PARTIAL &&
Tom Herberta1882222015-12-14 11:19:43 -0800372 !(features & NETIF_F_CSUM_MASK) &&
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400373 skb_checksum_help(skb))
374 goto drop;
Jason Wang5990a302018-01-04 11:14:27 +0800375 if (ptr_ring_produce(&q->ring, skb))
Jason Wang362899b2016-07-15 03:46:31 -0400376 goto drop;
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400377 }
378
379wake_up:
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800380 wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500381 return RX_HANDLER_CONSUMED;
Herbert Xu8a357472010-07-21 21:44:31 +0000382
383drop:
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500384 /* Count errors/drops only here, thus don't care about args. */
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800385 if (tap->count_rx_dropped)
386 tap->count_rx_dropped(tap);
Herbert Xu8a357472010-07-21 21:44:31 +0000387 kfree_skb(skb);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500388 return RX_HANDLER_CONSUMED;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000389}
Sainath Grandhi9a393b52017-02-10 16:03:51 -0800390EXPORT_SYMBOL_GPL(tap_handle_frame);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000391
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800392static struct major_info *tap_get_major(int major)
393{
394 struct major_info *tap_major;
395
396 list_for_each_entry_rcu(tap_major, &major_list, next) {
397 if (tap_major->major == major)
398 return tap_major;
399 }
400
401 return NULL;
402}
403
404int tap_get_minor(dev_t major, struct tap_dev *tap)
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000405{
406 int retval = -ENOMEM;
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800407 struct major_info *tap_major;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000408
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800409 rcu_read_lock();
410 tap_major = tap_get_major(MAJOR(major));
411 if (!tap_major) {
412 retval = -EINVAL;
413 goto unlock;
414 }
415
WANG Congffa423f2017-07-10 10:05:50 -0700416 spin_lock(&tap_major->minor_lock);
417 retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800418 if (retval >= 0) {
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800419 tap->minor = retval;
Tejun Heoec09ebc2013-02-27 17:04:34 -0800420 } else if (retval == -ENOSPC) {
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800421 netdev_err(tap->dev, "Too many tap devices\n");
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000422 retval = -EINVAL;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000423 }
WANG Congffa423f2017-07-10 10:05:50 -0700424 spin_unlock(&tap_major->minor_lock);
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800425
426unlock:
427 rcu_read_unlock();
Tejun Heoec09ebc2013-02-27 17:04:34 -0800428 return retval < 0 ? retval : 0;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000429}
Sainath Grandhi9a393b52017-02-10 16:03:51 -0800430EXPORT_SYMBOL_GPL(tap_get_minor);
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000431
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800432void tap_free_minor(dev_t major, struct tap_dev *tap)
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000433{
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800434 struct major_info *tap_major;
435
436 rcu_read_lock();
437 tap_major = tap_get_major(MAJOR(major));
438 if (!tap_major) {
439 goto unlock;
440 }
441
WANG Congffa423f2017-07-10 10:05:50 -0700442 spin_lock(&tap_major->minor_lock);
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800443 if (tap->minor) {
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800444 idr_remove(&tap_major->minor_idr, tap->minor);
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800445 tap->minor = 0;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000446 }
WANG Congffa423f2017-07-10 10:05:50 -0700447 spin_unlock(&tap_major->minor_lock);
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800448
449unlock:
450 rcu_read_unlock();
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000451}
Sainath Grandhi9a393b52017-02-10 16:03:51 -0800452EXPORT_SYMBOL_GPL(tap_free_minor);
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000453
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800454static struct tap_dev *dev_get_by_tap_file(int major, int minor)
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000455{
456 struct net_device *dev = NULL;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800457 struct tap_dev *tap;
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800458 struct major_info *tap_major;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000459
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800460 rcu_read_lock();
461 tap_major = tap_get_major(major);
462 if (!tap_major) {
463 tap = NULL;
464 goto unlock;
465 }
466
WANG Congffa423f2017-07-10 10:05:50 -0700467 spin_lock(&tap_major->minor_lock);
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800468 tap = idr_find(&tap_major->minor_idr, minor);
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800469 if (tap) {
470 dev = tap->dev;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000471 dev_hold(dev);
472 }
WANG Congffa423f2017-07-10 10:05:50 -0700473 spin_unlock(&tap_major->minor_lock);
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800474
475unlock:
476 rcu_read_unlock();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800477 return tap;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000478}
479
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800480static void tap_sock_write_space(struct sock *sk)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000481{
Eric Dumazet43815482010-04-29 11:01:49 +0000482 wait_queue_head_t *wqueue;
483
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000484 if (!sock_writeable(sk) ||
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800485 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000486 return;
487
Eric Dumazet43815482010-04-29 11:01:49 +0000488 wqueue = sk_sleep(sk);
489 if (wqueue && waitqueue_active(wqueue))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800490 wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000491}
492
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800493static void tap_sock_destruct(struct sock *sk)
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000494{
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800495 struct tap_queue *q = container_of(sk, struct tap_queue, sk);
Jason Wang362899b2016-07-15 03:46:31 -0400496
Jason Wang5990a302018-01-04 11:14:27 +0800497 ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000498}
499
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800500static int tap_open(struct inode *inode, struct file *file)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000501{
502 struct net *net = current->nsproxy->net_ns;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800503 struct tap_dev *tap;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800504 struct tap_queue *q;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400505 int err = -ENODEV;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000506
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400507 rtnl_lock();
Sainath Grandhid9f1f612017-02-10 16:03:50 -0800508 tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800509 if (!tap)
Jason Wang362899b2016-07-15 03:46:31 -0400510 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000511
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000512 err = -ENOMEM;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800513 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
514 &tap_proto, 0);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000515 if (!q)
Jason Wang362899b2016-07-15 03:46:31 -0400516 goto err;
Jason Wang5990a302018-01-04 11:14:27 +0800517 if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
Girish Moodalbail78e0ea62017-10-25 00:23:04 -0700518 sk_free(&q->sk);
519 goto err;
520 }
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000521
Jason Wangd9a90a32013-06-13 14:23:35 +0800522 RCU_INIT_POINTER(q->sock.wq, &q->wq);
Eric Dumazet43815482010-04-29 11:01:49 +0000523 init_waitqueue_head(&q->wq.wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000524 q->sock.type = SOCK_RAW;
525 q->sock.state = SS_CONNECTED;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000526 q->sock.file = file;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800527 q->sock.ops = &tap_socket_ops;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000528 sock_init_data(&q->sock, &q->sk);
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800529 q->sk.sk_write_space = tap_sock_write_space;
530 q->sk.sk_destruct = tap_sock_destruct;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000531 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300532 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000533
Shirley Ma97bc3632011-07-06 12:26:11 +0000534 /*
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800535 * so far only KVM virtio_net uses tap, enable zero copy between
Shirley Ma97bc3632011-07-06 12:26:11 +0000536 * guest kernel and host kernel when lower device supports zerocopy
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000537 *
538 * The macvlan supports zerocopy iff the lower device supports zero
539 * copy so we don't have to look at the lower device directly.
Shirley Ma97bc3632011-07-06 12:26:11 +0000540 */
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800541 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000542 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
Shirley Ma97bc3632011-07-06 12:26:11 +0000543
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800544 err = tap_set_queue(tap, file, q);
Girish Moodalbail78e0ea62017-10-25 00:23:04 -0700545 if (err) {
Jason Wang5990a302018-01-04 11:14:27 +0800546 /* tap_sock_destruct() will take care of freeing ptr_ring */
Girish Moodalbail78e0ea62017-10-25 00:23:04 -0700547 goto err_put;
548 }
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000549
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800550 dev_put(tap->dev);
Jason Wang362899b2016-07-15 03:46:31 -0400551
552 rtnl_unlock();
553 return err;
554
Girish Moodalbail78e0ea62017-10-25 00:23:04 -0700555err_put:
Jason Wang362899b2016-07-15 03:46:31 -0400556 sock_put(&q->sk);
557err:
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800558 if (tap)
559 dev_put(tap->dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000560
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400561 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000562 return err;
563}
564
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800565static int tap_release(struct inode *inode, struct file *file)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000566{
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800567 struct tap_queue *q = file->private_data;
568 tap_put_queue(q);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000569 return 0;
570}
571
Al Viroafc9a422017-07-03 06:39:46 -0400572static __poll_t tap_poll(struct file *file, poll_table *wait)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000573{
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800574 struct tap_queue *q = file->private_data;
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800575 __poll_t mask = EPOLLERR;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000576
577 if (!q)
578 goto out;
579
580 mask = 0;
Eric Dumazet43815482010-04-29 11:01:49 +0000581 poll_wait(file, &q->wq.wait, wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000582
Jason Wang5990a302018-01-04 11:14:27 +0800583 if (!ptr_ring_empty(&q->ring))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800584 mask |= EPOLLIN | EPOLLRDNORM;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000585
586 if (sock_writeable(&q->sk) ||
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800587 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000588 sock_writeable(&q->sk)))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800589 mask |= EPOLLOUT | EPOLLWRNORM;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000590
591out:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000592 return mask;
593}
594
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800595static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
596 size_t len, size_t linear,
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000597 int noblock, int *err)
598{
599 struct sk_buff *skb;
600
601 /* Under a page? Don't bother with paged skb. */
602 if (prepad + len < PAGE_SIZE || !linear)
603 linear = len;
604
605 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -0700606 err, 0);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000607 if (!skb)
608 return NULL;
609
610 skb_reserve(skb, prepad);
611 skb_put(skb, linear);
612 skb->data_len = len - linear;
613 skb->len += len - linear;
614
615 return skb;
616}
617
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800618/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800619#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800620
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000621/* Get packet from user space buffer */
Jason Wangfe8dd452018-09-12 11:17:06 +0800622static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800623 struct iov_iter *from, int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000624{
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800625 int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000626 struct sk_buff *skb;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800627 struct tap_dev *tap;
Al Virof5ff53b2014-06-19 15:36:49 -0400628 unsigned long total_len = iov_iter_count(from);
Shirley Ma97bc3632011-07-06 12:26:11 +0000629 unsigned long len = total_len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000630 int err;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000631 struct virtio_net_hdr vnet_hdr = { 0 };
632 int vnet_hdr_len = 0;
Jason Wangb92946e2012-05-02 11:42:15 +0800633 int copylen = 0;
Ivan Vecerac5c62f12015-07-23 16:37:43 +0200634 int depth;
Shirley Ma97bc3632011-07-06 12:26:11 +0000635 bool zerocopy = false;
Jason Wang61d46bf2013-07-10 13:43:28 +0800636 size_t linear;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000637
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000638 if (q->flags & IFF_VNET_HDR) {
Willem de Bruijn837585a2017-02-03 18:20:49 -0500639 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000640
641 err = -EINVAL;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000642 if (len < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000643 goto err;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000644 len -= vnet_hdr_len;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000645
Al Virof5ff53b2014-06-19 15:36:49 -0400646 err = -EFAULT;
Al Virocbbd26b2016-11-01 22:09:04 -0400647 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000648 goto err;
Al Virof5ff53b2014-06-19 15:36:49 -0400649 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000650 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800651 tap16_to_cpu(q, vnet_hdr.csum_start) +
652 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
653 tap16_to_cpu(q, vnet_hdr.hdr_len))
654 vnet_hdr.hdr_len = cpu_to_tap16(q,
655 tap16_to_cpu(q, vnet_hdr.csum_start) +
656 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000657 err = -EINVAL;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800658 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000659 goto err;
660 }
661
662 err = -EINVAL;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000663 if (unlikely(len < ETH_HLEN))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000664 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000665
Jason Wangfe8dd452018-09-12 11:17:06 +0800666 if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
Al Virof5ff53b2014-06-19 15:36:49 -0400667 struct iov_iter i;
668
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200669 copylen = vnet_hdr.hdr_len ?
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800670 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
Jason Wang16a3fa22013-11-13 14:00:40 +0800671 if (copylen > good_linear)
672 copylen = good_linear;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500673 else if (copylen < ETH_HLEN)
674 copylen = ETH_HLEN;
Jason Wang61d46bf2013-07-10 13:43:28 +0800675 linear = copylen;
Al Virof5ff53b2014-06-19 15:36:49 -0400676 i = *from;
677 iov_iter_advance(&i, copylen);
678 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
Jason Wangece793f2013-07-18 10:55:16 +0800679 zerocopy = true;
680 }
681
682 if (!zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000683 copylen = len;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800684 linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500685 if (linear > good_linear)
Jason Wang16a3fa22013-11-13 14:00:40 +0800686 linear = good_linear;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500687 else if (linear < ETH_HLEN)
688 linear = ETH_HLEN;
Jason Wang61d46bf2013-07-10 13:43:28 +0800689 }
Shirley Ma97bc3632011-07-06 12:26:11 +0000690
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800691 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
692 linear, noblock, &err);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000693 if (!skb)
694 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000695
Jason Wang01d66572012-05-02 11:42:06 +0800696 if (zerocopy)
Al Virof5ff53b2014-06-19 15:36:49 -0400697 err = zerocopy_sg_from_iter(skb, from);
Jason Wangaa196ee2016-11-30 13:17:52 +0800698 else
Al Virof5ff53b2014-06-19 15:36:49 -0400699 err = skb_copy_datagram_from_iter(skb, 0, from, len);
Jason Wangece793f2013-07-18 10:55:16 +0800700
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000701 if (err)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000702 goto err_kfree;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000703
704 skb_set_network_header(skb, ETH_HLEN);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000705 skb_reset_mac_header(skb);
706 skb->protocol = eth_hdr(skb)->h_proto;
707
708 if (vnet_hdr_len) {
Mike Rapoportfd88d682016-06-08 16:09:19 +0300709 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800710 tap_is_little_endian(q));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000711 if (err)
712 goto err_kfree;
713 }
714
Maxim Mikityanskiyd2aa1252019-02-21 12:39:57 +0000715 skb_probe_transport_header(skb);
Jason Wang9b4d6692013-03-25 20:19:55 +0000716
Ivan Vecerac5c62f12015-07-23 16:37:43 +0200717 /* Move network header to the right position for VLAN tagged packets */
718 if ((skb->protocol == htons(ETH_P_8021Q) ||
719 skb->protocol == htons(ETH_P_8021AD)) &&
720 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
721 skb_set_network_header(skb, depth);
722
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400723 rcu_read_lock();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800724 tap = rcu_dereference(q->tap);
Shirley Ma97bc3632011-07-06 12:26:11 +0000725 /* copy skb_ubuf_info for callback when skb has no error */
Jason Wang01d66572012-05-02 11:42:06 +0800726 if (zerocopy) {
Jason Wangfe8dd452018-09-12 11:17:06 +0800727 skb_shinfo(skb)->destructor_arg = msg_control;
Jason Wang01d66572012-05-02 11:42:06 +0800728 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000729 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
Jason Wangfe8dd452018-09-12 11:17:06 +0800730 } else if (msg_control) {
731 struct ubuf_info *uarg = msg_control;
Jason Wangaa196ee2016-11-30 13:17:52 +0800732 uarg->callback(uarg, false);
Jason Wang01d66572012-05-02 11:42:06 +0800733 }
Jason Wangaa196ee2016-11-30 13:17:52 +0800734
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800735 if (tap) {
736 skb->dev = tap->dev;
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500737 dev_queue_xmit(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700738 } else {
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000739 kfree_skb(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700740 }
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400741 rcu_read_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000742
Shirley Ma97bc3632011-07-06 12:26:11 +0000743 return total_len;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000744
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000745err_kfree:
746 kfree_skb(skb);
747
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000748err:
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400749 rcu_read_lock();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800750 tap = rcu_dereference(q->tap);
751 if (tap && tap->count_tx_dropped)
752 tap->count_tx_dropped(tap);
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400753 rcu_read_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000754
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000755 return err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000756}
757
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800758static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000759{
760 struct file *file = iocb->ki_filp;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800761 struct tap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000762
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800763 return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000764}
765
766/* Put packet to the user space buffer */
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800767static ssize_t tap_put_user(struct tap_queue *q,
768 const struct sk_buff *skb,
769 struct iov_iter *iter)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000770{
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000771 int ret;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000772 int vnet_hdr_len = 0;
Basil Gorf09e2242012-05-03 22:55:24 +0000773 int vlan_offset = 0;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800774 int total;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000775
776 if (q->flags & IFF_VNET_HDR) {
Willem de Bruijnfd3a8862018-06-06 11:23:01 -0400777 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000778 struct virtio_net_hdr vnet_hdr;
Willem de Bruijnfd3a8862018-06-06 11:23:01 -0400779
Willem de Bruijn837585a2017-02-03 18:20:49 -0500780 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800781 if (iov_iter_count(iter) < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000782 return -EINVAL;
783
Jarno Rajahalme3e9e40e2016-11-18 15:40:38 -0800784 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
Willem de Bruijnfd3a8862018-06-06 11:23:01 -0400785 tap_is_little_endian(q), true,
786 vlan_hlen))
Mike Rapoportfd88d682016-06-08 16:09:19 +0300787 BUG();
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000788
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800789 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
790 sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000791 return -EFAULT;
Jason Wang7cc76f52014-11-20 16:31:05 +0800792
793 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000794 }
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800795 total = vnet_hdr_len;
Jason Wangce232ce2013-12-11 13:08:34 +0800796 total += skb->len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000797
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100798 if (skb_vlan_tag_present(skb)) {
Basil Gorf09e2242012-05-03 22:55:24 +0000799 struct {
800 __be16 h_vlan_proto;
801 __be16 h_vlan_TCI;
802 } veth;
Jason Wang0fbe0d42013-07-16 13:36:34 +0800803 veth.h_vlan_proto = skb->vlan_proto;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100804 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000805
Basil Gorf09e2242012-05-03 22:55:24 +0000806 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
Jason Wangce232ce2013-12-11 13:08:34 +0800807 total += VLAN_HLEN;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000808
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800809 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
810 if (ret || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000811 goto done;
812
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800813 ret = copy_to_iter(&veth, sizeof(veth), iter);
814 if (ret != sizeof(veth) || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000815 goto done;
816 }
817
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800818 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
819 skb->len - vlan_offset);
Basil Gorf09e2242012-05-03 22:55:24 +0000820
821done:
Jason Wangce232ce2013-12-11 13:08:34 +0800822 return ret ? ret : total;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000823}
824
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800825static ssize_t tap_do_read(struct tap_queue *q,
826 struct iov_iter *to,
Jason Wang3b4ba042017-05-17 12:14:44 +0800827 int noblock, struct sk_buff *skb)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000828{
Hong zhi guoccf7e722012-06-06 22:36:27 +0000829 DEFINE_WAIT(wait);
Arnd Bergmann501c7742010-02-18 05:46:50 +0000830 ssize_t ret = 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000831
Wei Xu61d78532017-12-01 05:10:38 -0500832 if (!iov_iter_count(to)) {
zhong jiang144a6ad2018-09-20 17:37:41 +0800833 kfree_skb(skb);
Al Viro3af0bfe2014-11-07 14:13:53 -0500834 return 0;
Wei Xu61d78532017-12-01 05:10:38 -0500835 }
Al Viro3af0bfe2014-11-07 14:13:53 -0500836
Jason Wang3b4ba042017-05-17 12:14:44 +0800837 if (skb)
838 goto put;
839
Al Viro3af0bfe2014-11-07 14:13:53 -0500840 while (1) {
Jason Wang89cee912013-06-05 23:54:34 +0000841 if (!noblock)
842 prepare_to_wait(sk_sleep(&q->sk), &wait,
843 TASK_INTERRUPTIBLE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000844
845 /* Read frames from the queue */
Jason Wang5990a302018-01-04 11:14:27 +0800846 skb = ptr_ring_consume(&q->ring);
Al Viro3af0bfe2014-11-07 14:13:53 -0500847 if (skb)
848 break;
849 if (noblock) {
850 ret = -EAGAIN;
851 break;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000852 }
Al Viro3af0bfe2014-11-07 14:13:53 -0500853 if (signal_pending(current)) {
854 ret = -ERESTARTSYS;
855 break;
856 }
857 /* Nothing to read, let's sleep */
858 schedule();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000859 }
Vlad Yasevicha499a2e2015-11-09 09:14:17 -0500860 if (!noblock)
861 finish_wait(sk_sleep(&q->sk), &wait);
862
Jason Wang3b4ba042017-05-17 12:14:44 +0800863put:
Al Viro3af0bfe2014-11-07 14:13:53 -0500864 if (skb) {
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800865 ret = tap_put_user(q, skb, to);
Jason Wangf51a5e82014-12-01 16:53:15 +0800866 if (unlikely(ret < 0))
867 kfree_skb(skb);
868 else
869 consume_skb(skb);
Al Viro3af0bfe2014-11-07 14:13:53 -0500870 }
Arnd Bergmann501c7742010-02-18 05:46:50 +0000871 return ret;
872}
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000873
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800874static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
Arnd Bergmann501c7742010-02-18 05:46:50 +0000875{
876 struct file *file = iocb->ki_filp;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800877 struct tap_queue *q = file->private_data;
Al Viro3af0bfe2014-11-07 14:13:53 -0500878 ssize_t len = iov_iter_count(to), ret;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000879
Jason Wang3b4ba042017-05-17 12:14:44 +0800880 ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL);
Jason Wangce232ce2013-12-11 13:08:34 +0800881 ret = min_t(ssize_t, ret, len);
Zhi Yong Wue6ebc7f2013-12-06 14:16:50 +0800882 if (ret > 0)
883 iocb->ki_pos = ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000884 return ret;
885}
886
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800887static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
Jason Wang8f475a32013-06-05 23:54:36 +0000888{
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800889 struct tap_dev *tap;
Jason Wang8f475a32013-06-05 23:54:36 +0000890
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400891 ASSERT_RTNL();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800892 tap = rtnl_dereference(q->tap);
893 if (tap)
894 dev_hold(tap->dev);
Jason Wang8f475a32013-06-05 23:54:36 +0000895
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800896 return tap;
Jason Wang8f475a32013-06-05 23:54:36 +0000897}
898
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800899static void tap_put_tap_dev(struct tap_dev *tap)
Jason Wang8f475a32013-06-05 23:54:36 +0000900{
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800901 dev_put(tap->dev);
Jason Wang8f475a32013-06-05 23:54:36 +0000902}
903
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800904static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
Jason Wang815f2362013-06-05 23:54:39 +0000905{
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800906 struct tap_queue *q = file->private_data;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800907 struct tap_dev *tap;
Jason Wang815f2362013-06-05 23:54:39 +0000908 int ret;
909
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800910 tap = tap_get_tap_dev(q);
911 if (!tap)
Jason Wang815f2362013-06-05 23:54:39 +0000912 return -EINVAL;
913
914 if (flags & IFF_ATTACH_QUEUE)
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800915 ret = tap_enable_queue(tap, file, q);
Jason Wang815f2362013-06-05 23:54:39 +0000916 else if (flags & IFF_DETACH_QUEUE)
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800917 ret = tap_disable_queue(q);
Jason Wangf57855a2013-06-13 14:23:36 +0800918 else
919 ret = -EINVAL;
Jason Wang815f2362013-06-05 23:54:39 +0000920
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800921 tap_put_tap_dev(tap);
Jason Wang815f2362013-06-05 23:54:39 +0000922 return ret;
923}
924
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800925static int set_offload(struct tap_queue *q, unsigned long arg)
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400926{
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800927 struct tap_dev *tap;
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400928 netdev_features_t features;
929 netdev_features_t feature_mask = 0;
930
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800931 tap = rtnl_dereference(q->tap);
932 if (!tap)
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400933 return -ENOLINK;
934
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800935 features = tap->dev->features;
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400936
937 if (arg & TUN_F_CSUM) {
938 feature_mask = NETIF_F_HW_CSUM;
939
940 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
941 if (arg & TUN_F_TSO_ECN)
942 feature_mask |= NETIF_F_TSO_ECN;
943 if (arg & TUN_F_TSO4)
944 feature_mask |= NETIF_F_TSO;
945 if (arg & TUN_F_TSO6)
946 feature_mask |= NETIF_F_TSO6;
947 }
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400948 }
949
950 /* tun/tap driver inverts the usage for TSO offloads, where
951 * setting the TSO bit means that the userspace wants to
952 * accept TSO frames and turning it off means that user space
953 * does not support TSO.
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800954 * For tap, we have to invert it to mean the same thing.
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400955 * When user space turns off TSO, we turn off GSO/LRO so that
956 * user-space will not receive TSO frames.
957 */
David S. Millerd591a1f2017-07-03 06:35:32 -0700958 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400959 features |= RX_OFFLOADS;
960 else
961 features &= ~RX_OFFLOADS;
962
963 /* tap_features are the same as features on tun/tap and
964 * reflect user expectations.
965 */
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800966 tap->tap_features = feature_mask;
967 if (tap->update_features)
968 tap->update_features(tap, features);
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400969
970 return 0;
971}
972
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000973/*
974 * provide compatibility with generic tun/tap interface
975 */
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800976static long tap_ioctl(struct file *file, unsigned int cmd,
977 unsigned long arg)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000978{
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800979 struct tap_queue *q = file->private_data;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -0800980 struct tap_dev *tap;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000981 void __user *argp = (void __user *)arg;
982 struct ifreq __user *ifr = argp;
983 unsigned int __user *up = argp;
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +0200984 unsigned short u;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300985 int __user *sp = argp;
Justin Cormack7f460d32015-05-13 19:19:02 +0100986 struct sockaddr sa;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300987 int s;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000988 int ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000989
990 switch (cmd) {
991 case TUNSETIFF:
992 /* ignore the name, just look at flags */
993 if (get_user(u, &ifr->ifr_flags))
994 return -EFAULT;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000995
996 ret = 0;
Sainath Grandhi635b8c82017-02-10 16:03:47 -0800997 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000998 ret = -EINVAL;
999 else
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001000 q->flags = (q->flags & ~TAP_IFFEATURES) | u;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001001
1002 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001003
1004 case TUNGETIFF:
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001005 rtnl_lock();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001006 tap = tap_get_tap_dev(q);
1007 if (!tap) {
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001008 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001009 return -ENOLINK;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001010 }
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001011
1012 ret = 0;
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001013 u = q->flags;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001014 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001015 put_user(u, &ifr->ifr_flags))
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001016 ret = -EFAULT;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001017 tap_put_tap_dev(tap);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001018 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001019 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001020
Jason Wang815f2362013-06-05 23:54:39 +00001021 case TUNSETQUEUE:
1022 if (get_user(u, &ifr->ifr_flags))
1023 return -EFAULT;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001024 rtnl_lock();
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001025 ret = tap_ioctl_set_queue(file, u);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001026 rtnl_unlock();
Jason Wang82a19eb2013-07-16 13:36:33 +08001027 return ret;
Jason Wang815f2362013-06-05 23:54:39 +00001028
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001029 case TUNGETFEATURES:
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001030 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001031 return -EFAULT;
1032 return 0;
1033
1034 case TUNSETSNDBUF:
Michael S. Tsirkin3ea79242015-09-18 13:41:09 +03001035 if (get_user(s, sp))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001036 return -EFAULT;
Craig Gallek931619222017-10-30 18:50:11 -04001037 if (s <= 0)
1038 return -EINVAL;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001039
Michael S. Tsirkin3ea79242015-09-18 13:41:09 +03001040 q->sk.sk_sndbuf = s;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001041 return 0;
1042
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001043 case TUNGETVNETHDRSZ:
1044 s = q->vnet_hdr_sz;
1045 if (put_user(s, sp))
1046 return -EFAULT;
1047 return 0;
1048
1049 case TUNSETVNETHDRSZ:
1050 if (get_user(s, sp))
1051 return -EFAULT;
1052 if (s < (int)sizeof(struct virtio_net_hdr))
1053 return -EINVAL;
1054
1055 q->vnet_hdr_sz = s;
1056 return 0;
1057
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +02001058 case TUNGETVNETLE:
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001059 s = !!(q->flags & TAP_VNET_LE);
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +02001060 if (put_user(s, sp))
1061 return -EFAULT;
1062 return 0;
1063
1064 case TUNSETVNETLE:
1065 if (get_user(s, sp))
1066 return -EFAULT;
1067 if (s)
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001068 q->flags |= TAP_VNET_LE;
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +02001069 else
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001070 q->flags &= ~TAP_VNET_LE;
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +02001071 return 0;
1072
Greg Kurz8b8e6582015-04-24 14:50:36 +02001073 case TUNGETVNETBE:
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001074 return tap_get_vnet_be(q, sp);
Greg Kurz8b8e6582015-04-24 14:50:36 +02001075
1076 case TUNSETVNETBE:
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001077 return tap_set_vnet_be(q, sp);
Greg Kurz8b8e6582015-04-24 14:50:36 +02001078
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001079 case TUNSETOFFLOAD:
1080 /* let the user check for future flags */
1081 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
Willem de Bruijn0c19f8462017-11-21 10:22:25 -05001082 TUN_F_TSO_ECN | TUN_F_UFO))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001083 return -EINVAL;
1084
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001085 rtnl_lock();
1086 ret = set_offload(q, arg);
1087 rtnl_unlock();
1088 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001089
Justin Cormackb5082082015-05-11 20:00:10 +01001090 case SIOCGIFHWADDR:
1091 rtnl_lock();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001092 tap = tap_get_tap_dev(q);
1093 if (!tap) {
Justin Cormackb5082082015-05-11 20:00:10 +01001094 rtnl_unlock();
1095 return -ENOLINK;
1096 }
1097 ret = 0;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001098 u = tap->dev->type;
1099 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1100 copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) ||
Justin Cormackb5082082015-05-11 20:00:10 +01001101 put_user(u, &ifr->ifr_hwaddr.sa_family))
1102 ret = -EFAULT;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001103 tap_put_tap_dev(tap);
Justin Cormackb5082082015-05-11 20:00:10 +01001104 rtnl_unlock();
1105 return ret;
1106
1107 case SIOCSIFHWADDR:
Justin Cormack7f460d32015-05-13 19:19:02 +01001108 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1109 return -EFAULT;
Justin Cormackb5082082015-05-11 20:00:10 +01001110 rtnl_lock();
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001111 tap = tap_get_tap_dev(q);
1112 if (!tap) {
Justin Cormackb5082082015-05-11 20:00:10 +01001113 rtnl_unlock();
1114 return -ENOLINK;
1115 }
Petr Machata3a37a962018-12-13 11:54:30 +00001116 ret = dev_set_mac_address(tap->dev, &sa, NULL);
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001117 tap_put_tap_dev(tap);
Justin Cormackb5082082015-05-11 20:00:10 +01001118 rtnl_unlock();
1119 return ret;
1120
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001121 default:
1122 return -EINVAL;
1123 }
1124}
1125
1126#ifdef CONFIG_COMPAT
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001127static long tap_compat_ioctl(struct file *file, unsigned int cmd,
1128 unsigned long arg)
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001129{
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001130 return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001131}
1132#endif
1133
Colin Ian Kingd17eb732017-08-12 22:52:31 +01001134static const struct file_operations tap_fops = {
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001135 .owner = THIS_MODULE,
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001136 .open = tap_open,
1137 .release = tap_release,
1138 .read_iter = tap_read_iter,
1139 .write_iter = tap_write_iter,
1140 .poll = tap_poll,
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001141 .llseek = no_llseek,
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001142 .unlocked_ioctl = tap_ioctl,
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001143#ifdef CONFIG_COMPAT
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001144 .compat_ioctl = tap_compat_ioctl,
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001145#endif
1146};
1147
Jason Wang0efac272018-09-12 11:17:08 +08001148static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
1149{
1150 struct tun_xdp_hdr *hdr = xdp->data_hard_start;
1151 struct virtio_net_hdr *gso = &hdr->gso;
1152 int buflen = hdr->buflen;
1153 int vnet_hdr_len = 0;
1154 struct tap_dev *tap;
1155 struct sk_buff *skb;
1156 int err, depth;
1157
1158 if (q->flags & IFF_VNET_HDR)
1159 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
1160
1161 skb = build_skb(xdp->data_hard_start, buflen);
1162 if (!skb) {
1163 err = -ENOMEM;
1164 goto err;
1165 }
1166
1167 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1168 skb_put(skb, xdp->data_end - xdp->data);
1169
1170 skb_set_network_header(skb, ETH_HLEN);
1171 skb_reset_mac_header(skb);
1172 skb->protocol = eth_hdr(skb)->h_proto;
1173
1174 if (vnet_hdr_len) {
1175 err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
1176 if (err)
1177 goto err_kfree;
1178 }
1179
Jason Wang0efac272018-09-12 11:17:08 +08001180 /* Move network header to the right position for VLAN tagged packets */
1181 if ((skb->protocol == htons(ETH_P_8021Q) ||
1182 skb->protocol == htons(ETH_P_8021AD)) &&
1183 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
1184 skb_set_network_header(skb, depth);
1185
1186 rcu_read_lock();
1187 tap = rcu_dereference(q->tap);
1188 if (tap) {
1189 skb->dev = tap->dev;
Maxim Mikityanskiyd2aa1252019-02-21 12:39:57 +00001190 skb_probe_transport_header(skb);
Jason Wang0efac272018-09-12 11:17:08 +08001191 dev_queue_xmit(skb);
1192 } else {
1193 kfree_skb(skb);
1194 }
1195 rcu_read_unlock();
1196
1197 return 0;
1198
1199err_kfree:
1200 kfree_skb(skb);
1201err:
1202 rcu_read_lock();
1203 tap = rcu_dereference(q->tap);
1204 if (tap && tap->count_tx_dropped)
1205 tap->count_tx_dropped(tap);
1206 rcu_read_unlock();
1207 return err;
1208}
1209
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001210static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1211 size_t total_len)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001212{
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001213 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
Jason Wangfe8dd452018-09-12 11:17:06 +08001214 struct tun_msg_ctl *ctl = m->msg_control;
Jason Wang0efac272018-09-12 11:17:08 +08001215 struct xdp_buff *xdp;
1216 int i;
Jason Wangfe8dd452018-09-12 11:17:06 +08001217
Jason Wang0efac272018-09-12 11:17:08 +08001218 if (ctl && (ctl->type == TUN_MSG_PTR)) {
1219 for (i = 0; i < ctl->num; i++) {
1220 xdp = &((struct xdp_buff *)ctl->ptr)[i];
1221 tap_get_user_xdp(q, xdp);
1222 }
1223 return 0;
1224 }
Jason Wangfe8dd452018-09-12 11:17:06 +08001225
1226 return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter,
1227 m->msg_flags & MSG_DONTWAIT);
Arnd Bergmann501c7742010-02-18 05:46:50 +00001228}
1229
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001230static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1231 size_t total_len, int flags)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001232{
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001233 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
Wei Xu61d78532017-12-01 05:10:38 -05001234 struct sk_buff *skb = m->msg_control;
Arnd Bergmann501c7742010-02-18 05:46:50 +00001235 int ret;
Wei Xu61d78532017-12-01 05:10:38 -05001236 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
zhong jiang144a6ad2018-09-20 17:37:41 +08001237 kfree_skb(skb);
Arnd Bergmann501c7742010-02-18 05:46:50 +00001238 return -EINVAL;
Wei Xu61d78532017-12-01 05:10:38 -05001239 }
1240 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
David S. Millerde2aa472013-12-10 22:06:18 -05001241 if (ret > total_len) {
1242 m->msg_flags |= MSG_TRUNC;
1243 ret = flags & MSG_TRUNC ? ret : total_len;
1244 }
Arnd Bergmann501c7742010-02-18 05:46:50 +00001245 return ret;
1246}
1247
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001248static int tap_peek_len(struct socket *sock)
Jason Wang362899b2016-07-15 03:46:31 -04001249{
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001250 struct tap_queue *q = container_of(sock, struct tap_queue,
Jason Wang362899b2016-07-15 03:46:31 -04001251 sock);
Jason Wang5990a302018-01-04 11:14:27 +08001252 return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
Jason Wang362899b2016-07-15 03:46:31 -04001253}
1254
Arnd Bergmann501c7742010-02-18 05:46:50 +00001255/* Ops structure to mimic raw sockets with tun */
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001256static const struct proto_ops tap_socket_ops = {
1257 .sendmsg = tap_sendmsg,
1258 .recvmsg = tap_recvmsg,
1259 .peek_len = tap_peek_len,
Arnd Bergmann501c7742010-02-18 05:46:50 +00001260};
1261
1262/* Get an underlying socket object from tun file. Returns error unless file is
1263 * attached to a device. The returned object works like a packet socket, it
1264 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1265 * holding a reference to the file for as long as the socket is in use. */
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001266struct socket *tap_get_socket(struct file *file)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001267{
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001268 struct tap_queue *q;
1269 if (file->f_op != &tap_fops)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001270 return ERR_PTR(-EINVAL);
1271 q = file->private_data;
1272 if (!q)
1273 return ERR_PTR(-EBADFD);
1274 return &q->sock;
1275}
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001276EXPORT_SYMBOL_GPL(tap_get_socket);
Arnd Bergmann501c7742010-02-18 05:46:50 +00001277
Jason Wang5990a302018-01-04 11:14:27 +08001278struct ptr_ring *tap_get_ptr_ring(struct file *file)
Jason Wang49f96fd2017-05-17 12:14:42 +08001279{
1280 struct tap_queue *q;
1281
1282 if (file->f_op != &tap_fops)
1283 return ERR_PTR(-EINVAL);
1284 q = file->private_data;
1285 if (!q)
1286 return ERR_PTR(-EBADFD);
Jason Wang5990a302018-01-04 11:14:27 +08001287 return &q->ring;
Jason Wang49f96fd2017-05-17 12:14:42 +08001288}
Jason Wang5990a302018-01-04 11:14:27 +08001289EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
Jason Wang49f96fd2017-05-17 12:14:42 +08001290
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001291int tap_queue_resize(struct tap_dev *tap)
Jason Wang362899b2016-07-15 03:46:31 -04001292{
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001293 struct net_device *dev = tap->dev;
Sainath Grandhi635b8c82017-02-10 16:03:47 -08001294 struct tap_queue *q;
Jason Wang5990a302018-01-04 11:14:27 +08001295 struct ptr_ring **rings;
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001296 int n = tap->numqueues;
Jason Wang362899b2016-07-15 03:46:31 -04001297 int ret, i = 0;
1298
Jason Wang5990a302018-01-04 11:14:27 +08001299 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
1300 if (!rings)
Jason Wang362899b2016-07-15 03:46:31 -04001301 return -ENOMEM;
1302
Sainath Grandhi6fe3faf2017-02-10 16:03:49 -08001303 list_for_each_entry(q, &tap->queue_list, next)
Jason Wang5990a302018-01-04 11:14:27 +08001304 rings[i++] = &q->ring;
Jason Wang362899b2016-07-15 03:46:31 -04001305
Jason Wang5990a302018-01-04 11:14:27 +08001306 ret = ptr_ring_resize_multiple(rings, n,
1307 dev->tx_queue_len, GFP_KERNEL,
1308 __skb_array_destroy_skb);
Jason Wang362899b2016-07-15 03:46:31 -04001309
Jason Wang5990a302018-01-04 11:14:27 +08001310 kfree(rings);
Jason Wang362899b2016-07-15 03:46:31 -04001311 return ret;
1312}
Sainath Grandhi9a393b52017-02-10 16:03:51 -08001313EXPORT_SYMBOL_GPL(tap_queue_resize);
Sainath Grandhiebc05ba2017-02-10 16:03:48 -08001314
Sainath Grandhid9f1f612017-02-10 16:03:50 -08001315static int tap_list_add(dev_t major, const char *device_name)
1316{
1317 struct major_info *tap_major;
1318
1319 tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
1320 if (!tap_major)
1321 return -ENOMEM;
1322
1323 tap_major->major = MAJOR(major);
1324
1325 idr_init(&tap_major->minor_idr);
WANG Congffa423f2017-07-10 10:05:50 -07001326 spin_lock_init(&tap_major->minor_lock);
Sainath Grandhid9f1f612017-02-10 16:03:50 -08001327
1328 tap_major->device_name = device_name;
1329
1330 list_add_tail_rcu(&tap_major->next, &major_list);
1331 return 0;
1332}
1333
Girish Moodalbaildea6e192017-10-27 00:00:16 -07001334int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
1335 const char *device_name, struct module *module)
Sainath Grandhiebc05ba2017-02-10 16:03:48 -08001336{
1337 int err;
1338
1339 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1340 if (err)
1341 goto out1;
1342
1343 cdev_init(tap_cdev, &tap_fops);
Girish Moodalbaildea6e192017-10-27 00:00:16 -07001344 tap_cdev->owner = module;
Sainath Grandhiebc05ba2017-02-10 16:03:48 -08001345 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1346 if (err)
1347 goto out2;
1348
Sainath Grandhid9f1f612017-02-10 16:03:50 -08001349 err = tap_list_add(*tap_major, device_name);
1350 if (err)
1351 goto out3;
Sainath Grandhiebc05ba2017-02-10 16:03:48 -08001352
1353 return 0;
1354
Sainath Grandhid9f1f612017-02-10 16:03:50 -08001355out3:
1356 cdev_del(tap_cdev);
Sainath Grandhiebc05ba2017-02-10 16:03:48 -08001357out2:
1358 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1359out1:
1360 return err;
1361}
Sainath Grandhi9a393b52017-02-10 16:03:51 -08001362EXPORT_SYMBOL_GPL(tap_create_cdev);
Sainath Grandhiebc05ba2017-02-10 16:03:48 -08001363
1364void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1365{
Sainath Grandhid9f1f612017-02-10 16:03:50 -08001366 struct major_info *tap_major, *tmp;
1367
Sainath Grandhiebc05ba2017-02-10 16:03:48 -08001368 cdev_del(tap_cdev);
1369 unregister_chrdev_region(major, TAP_NUM_DEVS);
Sainath Grandhid9f1f612017-02-10 16:03:50 -08001370 list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
1371 if (tap_major->major == MAJOR(major)) {
1372 idr_destroy(&tap_major->minor_idr);
1373 list_del_rcu(&tap_major->next);
1374 kfree_rcu(tap_major, rcu);
1375 }
1376 }
Sainath Grandhiebc05ba2017-02-10 16:03:48 -08001377}
Sainath Grandhi9a393b52017-02-10 16:03:51 -08001378EXPORT_SYMBOL_GPL(tap_destroy_cdev);
1379
1380MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1381MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
1382MODULE_LICENSE("GPL");