Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1 | #include <linux/etherdevice.h> |
| 2 | #include <linux/if_macvlan.h> |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 3 | #include <linux/if_vlan.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 4 | #include <linux/interrupt.h> |
| 5 | #include <linux/nsproxy.h> |
| 6 | #include <linux/compat.h> |
| 7 | #include <linux/if_tun.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/skbuff.h> |
| 10 | #include <linux/cache.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 14 | #include <linux/wait.h> |
| 15 | #include <linux/cdev.h> |
Al Viro | 4040153 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 16 | #include <linux/idr.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 17 | #include <linux/fs.h> |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 18 | #include <linux/uio.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 19 | |
| 20 | #include <net/net_namespace.h> |
| 21 | #include <net/rtnetlink.h> |
| 22 | #include <net/sock.h> |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 23 | #include <linux/virtio_net.h> |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 24 | #include <linux/skb_array.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 25 | |
| 26 | /* |
| 27 | * A macvtap queue is the central object of this driver, it connects |
| 28 | * an open character device to a macvlan interface. There can be |
| 29 | * multiple queues on one interface, which map back to queues |
| 30 | * implemented in hardware on the underlying device. |
| 31 | * |
| 32 | * macvtap_proto is used to allocate queues through the sock allocation |
| 33 | * mechanism. |
| 34 | * |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 35 | */ |
| 36 | struct macvtap_queue { |
| 37 | struct sock sk; |
| 38 | struct socket sock; |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 39 | struct socket_wq wq; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 40 | int vnet_hdr_sz; |
Eric Dumazet | 13707f9 | 2011-01-26 19:28:23 +0000 | [diff] [blame] | 41 | struct macvlan_dev __rcu *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 42 | struct file *file; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 43 | unsigned int flags; |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 44 | u16 queue_index; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 45 | bool enabled; |
| 46 | struct list_head next; |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 47 | struct skb_array skb_array; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 48 | }; |
| 49 | |
Michael S. Tsirkin | 01b07fb | 2014-12-16 15:05:10 +0200 | [diff] [blame] | 50 | #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) |
| 51 | |
| 52 | #define MACVTAP_VNET_LE 0x80000000 |
Greg Kurz | 8b8e658 | 2015-04-24 14:50:36 +0200 | [diff] [blame] | 53 | #define MACVTAP_VNET_BE 0x40000000 |
| 54 | |
| 55 | #ifdef CONFIG_TUN_VNET_CROSS_LE |
| 56 | static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) |
| 57 | { |
| 58 | return q->flags & MACVTAP_VNET_BE ? false : |
| 59 | virtio_legacy_is_little_endian(); |
| 60 | } |
| 61 | |
| 62 | static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp) |
| 63 | { |
| 64 | int s = !!(q->flags & MACVTAP_VNET_BE); |
| 65 | |
| 66 | if (put_user(s, sp)) |
| 67 | return -EFAULT; |
| 68 | |
| 69 | return 0; |
| 70 | } |
| 71 | |
| 72 | static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp) |
| 73 | { |
| 74 | int s; |
| 75 | |
| 76 | if (get_user(s, sp)) |
| 77 | return -EFAULT; |
| 78 | |
| 79 | if (s) |
| 80 | q->flags |= MACVTAP_VNET_BE; |
| 81 | else |
| 82 | q->flags &= ~MACVTAP_VNET_BE; |
| 83 | |
| 84 | return 0; |
| 85 | } |
| 86 | #else |
| 87 | static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) |
| 88 | { |
| 89 | return virtio_legacy_is_little_endian(); |
| 90 | } |
| 91 | |
| 92 | static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp) |
| 93 | { |
| 94 | return -EINVAL; |
| 95 | } |
| 96 | |
| 97 | static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp) |
| 98 | { |
| 99 | return -EINVAL; |
| 100 | } |
| 101 | #endif /* CONFIG_TUN_VNET_CROSS_LE */ |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 102 | |
Greg Kurz | 5b11e15 | 2015-04-24 14:24:48 +0200 | [diff] [blame] | 103 | static inline bool macvtap_is_little_endian(struct macvtap_queue *q) |
| 104 | { |
Greg Kurz | 7d82410 | 2015-04-24 14:26:24 +0200 | [diff] [blame] | 105 | return q->flags & MACVTAP_VNET_LE || |
Greg Kurz | 8b8e658 | 2015-04-24 14:50:36 +0200 | [diff] [blame] | 106 | macvtap_legacy_is_little_endian(q); |
Greg Kurz | 5b11e15 | 2015-04-24 14:24:48 +0200 | [diff] [blame] | 107 | } |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 108 | |
| 109 | static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) |
| 110 | { |
Greg Kurz | 5b11e15 | 2015-04-24 14:24:48 +0200 | [diff] [blame] | 111 | return __virtio16_to_cpu(macvtap_is_little_endian(q), val); |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) |
| 115 | { |
Greg Kurz | 5b11e15 | 2015-04-24 14:24:48 +0200 | [diff] [blame] | 116 | return __cpu_to_virtio16(macvtap_is_little_endian(q), val); |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 117 | } |
| 118 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 119 | static struct proto macvtap_proto = { |
| 120 | .name = "macvtap", |
| 121 | .owner = THIS_MODULE, |
| 122 | .obj_size = sizeof (struct macvtap_queue), |
| 123 | }; |
| 124 | |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 125 | #define MACVTAP_NUM_DEVS (1U << MINORBITS) |
| 126 | static DEFINE_MUTEX(minor_lock); |
Sainath Grandhi | a8e0469 | 2017-02-10 16:03:46 -0800 | [diff] [blame^] | 127 | DEFINE_IDR(minor_idr); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 128 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 129 | #define GOODCOPY_LEN 128 |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 130 | |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 131 | static const struct proto_ops macvtap_socket_ops; |
| 132 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 133 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
Jason Wang | f23d538 | 2015-10-23 00:57:05 -0400 | [diff] [blame] | 134 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 135 | |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 136 | static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) |
| 137 | { |
| 138 | return rcu_dereference(dev->rx_handler_data); |
| 139 | } |
| 140 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 141 | /* |
| 142 | * RCU usage: |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 143 | * The macvtap_queue and the macvlan_dev are loosely coupled, the |
| 144 | * pointers from one to the other can only be read while rcu_read_lock |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 145 | * or rtnl is held. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 146 | * |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 147 | * Both the file and the macvlan_dev hold a reference on the macvtap_queue |
| 148 | * through sock_hold(&q->sk). When the macvlan_dev goes away first, |
| 149 | * q->vlan becomes inaccessible. When the files gets closed, |
| 150 | * macvtap_get_queue() fails. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 151 | * |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 152 | * There may still be references to the struct sock inside of the |
| 153 | * queue from outbound SKBs, but these never reference back to the |
| 154 | * file or the dev. The data structure is freed through __sk_free |
| 155 | * when both our references and any pending SKBs are gone. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 156 | */ |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 157 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 158 | static int macvtap_enable_queue(struct net_device *dev, struct file *file, |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 159 | struct macvtap_queue *q) |
| 160 | { |
| 161 | struct macvlan_dev *vlan = netdev_priv(dev); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 162 | int err = -EINVAL; |
| 163 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 164 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 165 | |
| 166 | if (q->enabled) |
| 167 | goto out; |
| 168 | |
| 169 | err = 0; |
| 170 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
| 171 | q->queue_index = vlan->numvtaps; |
| 172 | q->enabled = true; |
| 173 | |
| 174 | vlan->numvtaps++; |
| 175 | out: |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 176 | return err; |
| 177 | } |
| 178 | |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 179 | /* Requires RTNL */ |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 180 | static int macvtap_set_queue(struct net_device *dev, struct file *file, |
| 181 | struct macvtap_queue *q) |
| 182 | { |
| 183 | struct macvlan_dev *vlan = netdev_priv(dev); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 184 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 185 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 186 | return -EBUSY; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 187 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 188 | rcu_assign_pointer(q->vlan, vlan); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 189 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 190 | sock_hold(&q->sk); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 191 | |
| 192 | q->file = file; |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 193 | q->queue_index = vlan->numvtaps; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 194 | q->enabled = true; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 195 | file->private_data = q; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 196 | list_add_tail(&q->next, &vlan->queue_list); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 197 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 198 | vlan->numvtaps++; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 199 | vlan->numqueues++; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 200 | |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 201 | return 0; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 202 | } |
| 203 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 204 | static int macvtap_disable_queue(struct macvtap_queue *q) |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 205 | { |
| 206 | struct macvlan_dev *vlan; |
| 207 | struct macvtap_queue *nq; |
| 208 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 209 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 210 | if (!q->enabled) |
| 211 | return -EINVAL; |
| 212 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 213 | vlan = rtnl_dereference(q->vlan); |
| 214 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 215 | if (vlan) { |
| 216 | int index = q->queue_index; |
| 217 | BUG_ON(index >= vlan->numvtaps); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 218 | nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 219 | nq->queue_index = index; |
| 220 | |
| 221 | rcu_assign_pointer(vlan->taps[index], nq); |
| 222 | RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); |
| 223 | q->enabled = false; |
| 224 | |
| 225 | vlan->numvtaps--; |
| 226 | } |
| 227 | |
| 228 | return 0; |
| 229 | } |
| 230 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 231 | /* |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 232 | * The file owning the queue got closed, give up both |
| 233 | * the reference that the files holds as well as the |
| 234 | * one from the macvlan_dev if that still exists. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 235 | * |
| 236 | * Using the spinlock makes sure that we don't get |
| 237 | * to the queue again after destroying it. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 238 | */ |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 239 | static void macvtap_put_queue(struct macvtap_queue *q) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 240 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 241 | struct macvlan_dev *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 242 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 243 | rtnl_lock(); |
| 244 | vlan = rtnl_dereference(q->vlan); |
| 245 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 246 | if (vlan) { |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 247 | if (q->enabled) |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 248 | BUG_ON(macvtap_disable_queue(q)); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 249 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 250 | vlan->numqueues--; |
Eric Dumazet | 2cfa5a0 | 2011-11-23 07:09:32 +0000 | [diff] [blame] | 251 | RCU_INIT_POINTER(q->vlan, NULL); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 252 | sock_put(&q->sk); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 253 | list_del_init(&q->next); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 254 | } |
| 255 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 256 | rtnl_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 257 | |
| 258 | synchronize_rcu(); |
| 259 | sock_put(&q->sk); |
| 260 | } |
| 261 | |
| 262 | /* |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 263 | * Select a queue based on the rxq of the device on which this packet |
| 264 | * arrived. If the incoming device is not mq, calculate a flow hash |
| 265 | * to select a queue. If all fails, find the first available queue. |
| 266 | * Cache vlan->numvtaps since it can become zero during the execution |
| 267 | * of this function. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 268 | */ |
| 269 | static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, |
| 270 | struct sk_buff *skb) |
| 271 | { |
| 272 | struct macvlan_dev *vlan = netdev_priv(dev); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 273 | struct macvtap_queue *tap = NULL; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 274 | /* Access to taps array is protected by rcu, but access to numvtaps |
| 275 | * isn't. Below we use it to lookup a queue, but treat it as a hint |
| 276 | * and validate that the result isn't NULL - in case we are |
| 277 | * racing against queue removal. |
| 278 | */ |
Jason Wang | ed0483f | 2013-06-05 23:54:33 +0000 | [diff] [blame] | 279 | int numvtaps = ACCESS_ONCE(vlan->numvtaps); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 280 | __u32 rxq; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 281 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 282 | if (!numvtaps) |
| 283 | goto out; |
| 284 | |
Jason Wang | 1b16bf4 | 2016-07-15 03:46:30 -0400 | [diff] [blame] | 285 | if (numvtaps == 1) |
| 286 | goto single; |
| 287 | |
Krishna Kumar | ef0002b | 2011-11-23 22:17:14 +0000 | [diff] [blame] | 288 | /* Check if we can use flow to select a queue */ |
Tom Herbert | 3958afa1b | 2013-12-15 22:12:06 -0800 | [diff] [blame] | 289 | rxq = skb_get_hash(skb); |
Krishna Kumar | ef0002b | 2011-11-23 22:17:14 +0000 | [diff] [blame] | 290 | if (rxq) { |
| 291 | tap = rcu_dereference(vlan->taps[rxq % numvtaps]); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 292 | goto out; |
Krishna Kumar | ef0002b | 2011-11-23 22:17:14 +0000 | [diff] [blame] | 293 | } |
| 294 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 295 | if (likely(skb_rx_queue_recorded(skb))) { |
| 296 | rxq = skb_get_rx_queue(skb); |
| 297 | |
| 298 | while (unlikely(rxq >= numvtaps)) |
| 299 | rxq -= numvtaps; |
| 300 | |
| 301 | tap = rcu_dereference(vlan->taps[rxq]); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 302 | goto out; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 303 | } |
| 304 | |
Jason Wang | 1b16bf4 | 2016-07-15 03:46:30 -0400 | [diff] [blame] | 305 | single: |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 306 | tap = rcu_dereference(vlan->taps[0]); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 307 | out: |
| 308 | return tap; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 309 | } |
| 310 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 311 | /* |
| 312 | * The net_device is going away, give up the reference |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 313 | * that it holds on all queues and safely set the pointer |
| 314 | * from the queues to NULL. |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 315 | */ |
Sainath Grandhi | a8e0469 | 2017-02-10 16:03:46 -0800 | [diff] [blame^] | 316 | void macvtap_del_queues(struct net_device *dev) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 317 | { |
| 318 | struct macvlan_dev *vlan = netdev_priv(dev); |
Pankaj Gupta | dfe816c | 2015-06-19 19:47:53 +0530 | [diff] [blame] | 319 | struct macvtap_queue *q, *tmp; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 320 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 321 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 322 | list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { |
| 323 | list_del_init(&q->next); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 324 | RCU_INIT_POINTER(q->vlan, NULL); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 325 | if (q->enabled) |
| 326 | vlan->numvtaps--; |
| 327 | vlan->numqueues--; |
Pankaj Gupta | dfe816c | 2015-06-19 19:47:53 +0530 | [diff] [blame] | 328 | sock_put(&q->sk); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 329 | } |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 330 | BUG_ON(vlan->numvtaps); |
| 331 | BUG_ON(vlan->numqueues); |
Eric W. Biederman | 99f34b3 | 2011-10-20 04:26:01 +0000 | [diff] [blame] | 332 | /* guarantee that any future macvtap_set_queue will fail */ |
| 333 | vlan->numvtaps = MAX_MACVTAP_QUEUES; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 334 | } |
| 335 | |
Sainath Grandhi | a8e0469 | 2017-02-10 16:03:46 -0800 | [diff] [blame^] | 336 | rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 337 | { |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 338 | struct sk_buff *skb = *pskb; |
| 339 | struct net_device *dev = skb->dev; |
| 340 | struct macvlan_dev *vlan; |
| 341 | struct macvtap_queue *q; |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 342 | netdev_features_t features = TAP_FEATURES; |
| 343 | |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 344 | vlan = macvtap_get_vlan_rcu(dev); |
| 345 | if (!vlan) |
| 346 | return RX_HANDLER_PASS; |
| 347 | |
| 348 | q = macvtap_get_queue(dev, skb); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 349 | if (!q) |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 350 | return RX_HANDLER_PASS; |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 351 | |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 352 | if (__skb_array_full(&q->skb_array)) |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 353 | goto drop; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 354 | |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 355 | skb_push(skb, ETH_HLEN); |
| 356 | |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 357 | /* Apply the forward feature mask so that we perform segmentation |
Vlad Yasevich | e573332 | 2013-08-16 15:25:02 -0400 | [diff] [blame] | 358 | * according to users wishes. This only works if VNET_HDR is |
| 359 | * enabled. |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 360 | */ |
Vlad Yasevich | e573332 | 2013-08-16 15:25:02 -0400 | [diff] [blame] | 361 | if (q->flags & IFF_VNET_HDR) |
| 362 | features |= vlan->tap_features; |
Johannes Berg | 8b86a61 | 2015-04-17 15:45:04 +0200 | [diff] [blame] | 363 | if (netif_needs_gso(skb, features)) { |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 364 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); |
| 365 | |
| 366 | if (IS_ERR(segs)) |
| 367 | goto drop; |
| 368 | |
| 369 | if (!segs) { |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 370 | if (skb_array_produce(&q->skb_array, skb)) |
| 371 | goto drop; |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 372 | goto wake_up; |
| 373 | } |
| 374 | |
Eric Dumazet | be0bd31 | 2016-05-06 05:58:21 -0700 | [diff] [blame] | 375 | consume_skb(skb); |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 376 | while (segs) { |
| 377 | struct sk_buff *nskb = segs->next; |
| 378 | |
| 379 | segs->next = NULL; |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 380 | if (skb_array_produce(&q->skb_array, segs)) { |
| 381 | kfree_skb(segs); |
| 382 | kfree_skb_list(nskb); |
| 383 | break; |
| 384 | } |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 385 | segs = nskb; |
| 386 | } |
| 387 | } else { |
Vlad Yasevich | cbdb042 | 2014-04-29 10:09:50 -0400 | [diff] [blame] | 388 | /* If we receive a partial checksum and the tap side |
| 389 | * doesn't support checksum offload, compute the checksum. |
| 390 | * Note: it doesn't matter which checksum feature to |
Sainath Grandhi | a8e0469 | 2017-02-10 16:03:46 -0800 | [diff] [blame^] | 391 | * check, we either support them all or none. |
Vlad Yasevich | cbdb042 | 2014-04-29 10:09:50 -0400 | [diff] [blame] | 392 | */ |
| 393 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
Tom Herbert | a188222 | 2015-12-14 11:19:43 -0800 | [diff] [blame] | 394 | !(features & NETIF_F_CSUM_MASK) && |
Vlad Yasevich | cbdb042 | 2014-04-29 10:09:50 -0400 | [diff] [blame] | 395 | skb_checksum_help(skb)) |
| 396 | goto drop; |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 397 | if (skb_array_produce(&q->skb_array, skb)) |
| 398 | goto drop; |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | wake_up: |
Eric Dumazet | 4a4771a | 2010-04-25 22:20:06 +0000 | [diff] [blame] | 402 | wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 403 | return RX_HANDLER_CONSUMED; |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 404 | |
| 405 | drop: |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 406 | /* Count errors/drops only here, thus don't care about args. */ |
| 407 | macvlan_count_rx(vlan, 0, 0, 0); |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 408 | kfree_skb(skb); |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 409 | return RX_HANDLER_CONSUMED; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 410 | } |
| 411 | |
Sainath Grandhi | a8e0469 | 2017-02-10 16:03:46 -0800 | [diff] [blame^] | 412 | int macvtap_get_minor(struct macvlan_dev *vlan) |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 413 | { |
| 414 | int retval = -ENOMEM; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 415 | |
| 416 | mutex_lock(&minor_lock); |
Tejun Heo | ec09ebc | 2013-02-27 17:04:34 -0800 | [diff] [blame] | 417 | retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); |
| 418 | if (retval >= 0) { |
| 419 | vlan->minor = retval; |
| 420 | } else if (retval == -ENOSPC) { |
Zhang Shengju | 763dfa27 | 2016-11-29 11:26:32 +0800 | [diff] [blame] | 421 | netdev_err(vlan->dev, "Too many macvtap devices\n"); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 422 | retval = -EINVAL; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 423 | } |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 424 | mutex_unlock(&minor_lock); |
Tejun Heo | ec09ebc | 2013-02-27 17:04:34 -0800 | [diff] [blame] | 425 | return retval < 0 ? retval : 0; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 426 | } |
| 427 | |
Sainath Grandhi | a8e0469 | 2017-02-10 16:03:46 -0800 | [diff] [blame^] | 428 | void macvtap_free_minor(struct macvlan_dev *vlan) |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 429 | { |
| 430 | mutex_lock(&minor_lock); |
| 431 | if (vlan->minor) { |
| 432 | idr_remove(&minor_idr, vlan->minor); |
| 433 | vlan->minor = 0; |
| 434 | } |
| 435 | mutex_unlock(&minor_lock); |
| 436 | } |
| 437 | |
| 438 | static struct net_device *dev_get_by_macvtap_minor(int minor) |
| 439 | { |
| 440 | struct net_device *dev = NULL; |
| 441 | struct macvlan_dev *vlan; |
| 442 | |
| 443 | mutex_lock(&minor_lock); |
| 444 | vlan = idr_find(&minor_idr, minor); |
| 445 | if (vlan) { |
| 446 | dev = vlan->dev; |
| 447 | dev_hold(dev); |
| 448 | } |
| 449 | mutex_unlock(&minor_lock); |
| 450 | return dev; |
| 451 | } |
| 452 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 453 | static void macvtap_sock_write_space(struct sock *sk) |
| 454 | { |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 455 | wait_queue_head_t *wqueue; |
| 456 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 457 | if (!sock_writeable(sk) || |
Eric Dumazet | 9cd3e07 | 2015-11-29 20:03:10 -0800 | [diff] [blame] | 458 | !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 459 | return; |
| 460 | |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 461 | wqueue = sk_sleep(sk); |
| 462 | if (wqueue && waitqueue_active(wqueue)) |
| 463 | wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 464 | } |
| 465 | |
Eric W. Biederman | 2259fef | 2011-10-20 04:27:24 +0000 | [diff] [blame] | 466 | static void macvtap_sock_destruct(struct sock *sk) |
| 467 | { |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 468 | struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 469 | |
Jason Wang | 104a493 | 2016-08-11 18:15:56 +0800 | [diff] [blame] | 470 | skb_array_cleanup(&q->skb_array); |
Eric W. Biederman | 2259fef | 2011-10-20 04:27:24 +0000 | [diff] [blame] | 471 | } |
| 472 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 473 | static int macvtap_open(struct inode *inode, struct file *file) |
| 474 | { |
| 475 | struct net *net = current->nsproxy->net_ns; |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 476 | struct net_device *dev; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 477 | struct macvtap_queue *q; |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 478 | int err = -ENODEV; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 479 | |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 480 | rtnl_lock(); |
| 481 | dev = dev_get_by_macvtap_minor(iminor(inode)); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 482 | if (!dev) |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 483 | goto err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 484 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 485 | err = -ENOMEM; |
| 486 | q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, |
Eric W. Biederman | 11aa9c2 | 2015-05-08 21:09:13 -0500 | [diff] [blame] | 487 | &macvtap_proto, 0); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 488 | if (!q) |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 489 | goto err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 490 | |
Jason Wang | d9a90a3 | 2013-06-13 14:23:35 +0800 | [diff] [blame] | 491 | RCU_INIT_POINTER(q->sock.wq, &q->wq); |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 492 | init_waitqueue_head(&q->wq.wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 493 | q->sock.type = SOCK_RAW; |
| 494 | q->sock.state = SS_CONNECTED; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 495 | q->sock.file = file; |
| 496 | q->sock.ops = &macvtap_socket_ops; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 497 | sock_init_data(&q->sock, &q->sk); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 498 | q->sk.sk_write_space = macvtap_sock_write_space; |
Eric W. Biederman | 2259fef | 2011-10-20 04:27:24 +0000 | [diff] [blame] | 499 | q->sk.sk_destruct = macvtap_sock_destruct; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 500 | q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 501 | q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 502 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 503 | /* |
| 504 | * so far only KVM virtio_net uses macvtap, enable zero copy between |
| 505 | * guest kernel and host kernel when lower device supports zerocopy |
Eric W. Biederman | 047af9cf | 2011-10-20 04:26:39 +0000 | [diff] [blame] | 506 | * |
| 507 | * The macvlan supports zerocopy iff the lower device supports zero |
| 508 | * copy so we don't have to look at the lower device directly. |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 509 | */ |
Eric W. Biederman | 047af9cf | 2011-10-20 04:26:39 +0000 | [diff] [blame] | 510 | if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) |
| 511 | sock_set_flag(&q->sk, SOCK_ZEROCOPY); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 512 | |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 513 | err = -ENOMEM; |
| 514 | if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL)) |
| 515 | goto err_array; |
| 516 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 517 | err = macvtap_set_queue(dev, file, q); |
| 518 | if (err) |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 519 | goto err_queue; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 520 | |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 521 | dev_put(dev); |
| 522 | |
| 523 | rtnl_unlock(); |
| 524 | return err; |
| 525 | |
| 526 | err_queue: |
| 527 | skb_array_cleanup(&q->skb_array); |
| 528 | err_array: |
| 529 | sock_put(&q->sk); |
| 530 | err: |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 531 | if (dev) |
| 532 | dev_put(dev); |
| 533 | |
Vlad Yasevich | 40b8fe4 | 2014-09-22 16:34:17 -0400 | [diff] [blame] | 534 | rtnl_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 535 | return err; |
| 536 | } |
| 537 | |
| 538 | static int macvtap_release(struct inode *inode, struct file *file) |
| 539 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 540 | struct macvtap_queue *q = file->private_data; |
| 541 | macvtap_put_queue(q); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 542 | return 0; |
| 543 | } |
| 544 | |
| 545 | static unsigned int macvtap_poll(struct file *file, poll_table * wait) |
| 546 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 547 | struct macvtap_queue *q = file->private_data; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 548 | unsigned int mask = POLLERR; |
| 549 | |
| 550 | if (!q) |
| 551 | goto out; |
| 552 | |
| 553 | mask = 0; |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 554 | poll_wait(file, &q->wq.wait, wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 555 | |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 556 | if (!skb_array_empty(&q->skb_array)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 557 | mask |= POLLIN | POLLRDNORM; |
| 558 | |
| 559 | if (sock_writeable(&q->sk) || |
Eric Dumazet | 9cd3e07 | 2015-11-29 20:03:10 -0800 | [diff] [blame] | 560 | (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 561 | sock_writeable(&q->sk))) |
| 562 | mask |= POLLOUT | POLLWRNORM; |
| 563 | |
| 564 | out: |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 565 | return mask; |
| 566 | } |
| 567 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 568 | static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, |
| 569 | size_t len, size_t linear, |
| 570 | int noblock, int *err) |
| 571 | { |
| 572 | struct sk_buff *skb; |
| 573 | |
| 574 | /* Under a page? Don't bother with paged skb. */ |
| 575 | if (prepad + len < PAGE_SIZE || !linear) |
| 576 | linear = len; |
| 577 | |
| 578 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, |
Eric Dumazet | 28d6427 | 2013-08-08 14:38:47 -0700 | [diff] [blame] | 579 | err, 0); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 580 | if (!skb) |
| 581 | return NULL; |
| 582 | |
| 583 | skb_reserve(skb, prepad); |
| 584 | skb_put(skb, linear); |
| 585 | skb->data_len = len - linear; |
| 586 | skb->len += len - linear; |
| 587 | |
| 588 | return skb; |
| 589 | } |
| 590 | |
Eric Dumazet | 2f1d8b9 | 2015-02-27 18:35:35 -0800 | [diff] [blame] | 591 | /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ |
| 592 | #define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) |
| 593 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 594 | /* Get packet from user space buffer */ |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 595 | static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 596 | struct iov_iter *from, int noblock) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 597 | { |
Eric Dumazet | 2f1d8b9 | 2015-02-27 18:35:35 -0800 | [diff] [blame] | 598 | int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 599 | struct sk_buff *skb; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 600 | struct macvlan_dev *vlan; |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 601 | unsigned long total_len = iov_iter_count(from); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 602 | unsigned long len = total_len; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 603 | int err; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 604 | struct virtio_net_hdr vnet_hdr = { 0 }; |
| 605 | int vnet_hdr_len = 0; |
Jason Wang | b92946e | 2012-05-02 11:42:15 +0800 | [diff] [blame] | 606 | int copylen = 0; |
Ivan Vecera | c5c62f1 | 2015-07-23 16:37:43 +0200 | [diff] [blame] | 607 | int depth; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 608 | bool zerocopy = false; |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 609 | size_t linear; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 610 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 611 | if (q->flags & IFF_VNET_HDR) { |
Willem de Bruijn | 837585a | 2017-02-03 18:20:49 -0500 | [diff] [blame] | 612 | vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 613 | |
| 614 | err = -EINVAL; |
Nicolas Kaiser | ce3c869 | 2011-03-04 13:49:41 +0000 | [diff] [blame] | 615 | if (len < vnet_hdr_len) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 616 | goto err; |
Nicolas Kaiser | ce3c869 | 2011-03-04 13:49:41 +0000 | [diff] [blame] | 617 | len -= vnet_hdr_len; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 618 | |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 619 | err = -EFAULT; |
Al Viro | cbbd26b | 2016-11-01 22:09:04 -0400 | [diff] [blame] | 620 | if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 621 | goto err; |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 622 | iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 623 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 624 | macvtap16_to_cpu(q, vnet_hdr.csum_start) + |
| 625 | macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > |
| 626 | macvtap16_to_cpu(q, vnet_hdr.hdr_len)) |
| 627 | vnet_hdr.hdr_len = cpu_to_macvtap16(q, |
| 628 | macvtap16_to_cpu(q, vnet_hdr.csum_start) + |
| 629 | macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 630 | err = -EINVAL; |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 631 | if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 632 | goto err; |
| 633 | } |
| 634 | |
| 635 | err = -EINVAL; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 636 | if (unlikely(len < ETH_HLEN)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 637 | goto err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 638 | |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 639 | if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 640 | struct iov_iter i; |
| 641 | |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 642 | copylen = vnet_hdr.hdr_len ? |
| 643 | macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; |
Jason Wang | 16a3fa2 | 2013-11-13 14:00:40 +0800 | [diff] [blame] | 644 | if (copylen > good_linear) |
| 645 | copylen = good_linear; |
Willem de Bruijn | 8e2ad41 | 2016-03-08 15:18:54 -0500 | [diff] [blame] | 646 | else if (copylen < ETH_HLEN) |
| 647 | copylen = ETH_HLEN; |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 648 | linear = copylen; |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 649 | i = *from; |
| 650 | iov_iter_advance(&i, copylen); |
| 651 | if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 652 | zerocopy = true; |
| 653 | } |
| 654 | |
| 655 | if (!zerocopy) { |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 656 | copylen = len; |
Willem de Bruijn | 8e2ad41 | 2016-03-08 15:18:54 -0500 | [diff] [blame] | 657 | linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); |
| 658 | if (linear > good_linear) |
Jason Wang | 16a3fa2 | 2013-11-13 14:00:40 +0800 | [diff] [blame] | 659 | linear = good_linear; |
Willem de Bruijn | 8e2ad41 | 2016-03-08 15:18:54 -0500 | [diff] [blame] | 660 | else if (linear < ETH_HLEN) |
| 661 | linear = ETH_HLEN; |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 662 | } |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 663 | |
Eric Dumazet | 2f1d8b9 | 2015-02-27 18:35:35 -0800 | [diff] [blame] | 664 | skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 665 | linear, noblock, &err); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 666 | if (!skb) |
| 667 | goto err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 668 | |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 669 | if (zerocopy) |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 670 | err = zerocopy_sg_from_iter(skb, from); |
Jason Wang | aa196ee | 2016-11-30 13:17:52 +0800 | [diff] [blame] | 671 | else |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 672 | err = skb_copy_datagram_from_iter(skb, 0, from, len); |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 673 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 674 | if (err) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 675 | goto err_kfree; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 676 | |
| 677 | skb_set_network_header(skb, ETH_HLEN); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 678 | skb_reset_mac_header(skb); |
| 679 | skb->protocol = eth_hdr(skb)->h_proto; |
| 680 | |
| 681 | if (vnet_hdr_len) { |
Mike Rapoport | fd88d68 | 2016-06-08 16:09:19 +0300 | [diff] [blame] | 682 | err = virtio_net_hdr_to_skb(skb, &vnet_hdr, |
| 683 | macvtap_is_little_endian(q)); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 684 | if (err) |
| 685 | goto err_kfree; |
| 686 | } |
| 687 | |
Jason Wang | 40893fd | 2013-03-26 23:11:22 +0000 | [diff] [blame] | 688 | skb_probe_transport_header(skb, ETH_HLEN); |
Jason Wang | 9b4d669 | 2013-03-25 20:19:55 +0000 | [diff] [blame] | 689 | |
Ivan Vecera | c5c62f1 | 2015-07-23 16:37:43 +0200 | [diff] [blame] | 690 | /* Move network header to the right position for VLAN tagged packets */ |
| 691 | if ((skb->protocol == htons(ETH_P_8021Q) || |
| 692 | skb->protocol == htons(ETH_P_8021AD)) && |
| 693 | __vlan_get_protocol(skb, skb->protocol, &depth) != 0) |
| 694 | skb_set_network_header(skb, depth); |
| 695 | |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 696 | rcu_read_lock(); |
| 697 | vlan = rcu_dereference(q->vlan); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 698 | /* copy skb_ubuf_info for callback when skb has no error */ |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 699 | if (zerocopy) { |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 700 | skb_shinfo(skb)->destructor_arg = m->msg_control; |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 701 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 702 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
Jason Wang | aa196ee | 2016-11-30 13:17:52 +0800 | [diff] [blame] | 703 | } else if (m && m->msg_control) { |
| 704 | struct ubuf_info *uarg = m->msg_control; |
| 705 | uarg->callback(uarg, false); |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 706 | } |
Jason Wang | aa196ee | 2016-11-30 13:17:52 +0800 | [diff] [blame] | 707 | |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 708 | if (vlan) { |
Vlad Yasevich | 6acf54f | 2013-12-11 13:27:10 -0500 | [diff] [blame] | 709 | skb->dev = vlan->dev; |
| 710 | dev_queue_xmit(skb); |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 711 | } else { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 712 | kfree_skb(skb); |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 713 | } |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 714 | rcu_read_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 715 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 716 | return total_len; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 717 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 718 | err_kfree: |
| 719 | kfree_skb(skb); |
| 720 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 721 | err: |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 722 | rcu_read_lock(); |
| 723 | vlan = rcu_dereference(q->vlan); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 724 | if (vlan) |
Jason Wang | cd3e22b | 2013-11-25 17:19:04 +0800 | [diff] [blame] | 725 | this_cpu_inc(vlan->pcpu_stats->tx_dropped); |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 726 | rcu_read_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 727 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 728 | return err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 729 | } |
| 730 | |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 731 | static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 732 | { |
| 733 | struct file *file = iocb->ki_filp; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 734 | struct macvtap_queue *q = file->private_data; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 735 | |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 736 | return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 737 | } |
| 738 | |
| 739 | /* Put packet to the user space buffer */ |
| 740 | static ssize_t macvtap_put_user(struct macvtap_queue *q, |
| 741 | const struct sk_buff *skb, |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 742 | struct iov_iter *iter) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 743 | { |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 744 | int ret; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 745 | int vnet_hdr_len = 0; |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 746 | int vlan_offset = 0; |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 747 | int total; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 748 | |
| 749 | if (q->flags & IFF_VNET_HDR) { |
| 750 | struct virtio_net_hdr vnet_hdr; |
Willem de Bruijn | 837585a | 2017-02-03 18:20:49 -0500 | [diff] [blame] | 751 | vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 752 | if (iov_iter_count(iter) < vnet_hdr_len) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 753 | return -EINVAL; |
| 754 | |
Jarno Rajahalme | 3e9e40e | 2016-11-18 15:40:38 -0800 | [diff] [blame] | 755 | if (virtio_net_hdr_from_skb(skb, &vnet_hdr, |
Jason Wang | 6391a44 | 2017-01-20 14:32:42 +0800 | [diff] [blame] | 756 | macvtap_is_little_endian(q), true)) |
Mike Rapoport | fd88d68 | 2016-06-08 16:09:19 +0300 | [diff] [blame] | 757 | BUG(); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 758 | |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 759 | if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != |
| 760 | sizeof(vnet_hdr)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 761 | return -EFAULT; |
Jason Wang | 7cc76f5 | 2014-11-20 16:31:05 +0800 | [diff] [blame] | 762 | |
| 763 | iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 764 | } |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 765 | total = vnet_hdr_len; |
Jason Wang | ce232ce | 2013-12-11 13:08:34 +0800 | [diff] [blame] | 766 | total += skb->len; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 767 | |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 768 | if (skb_vlan_tag_present(skb)) { |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 769 | struct { |
| 770 | __be16 h_vlan_proto; |
| 771 | __be16 h_vlan_TCI; |
| 772 | } veth; |
Jason Wang | 0fbe0d4 | 2013-07-16 13:36:34 +0800 | [diff] [blame] | 773 | veth.h_vlan_proto = skb->vlan_proto; |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 774 | veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 775 | |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 776 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); |
Jason Wang | ce232ce | 2013-12-11 13:08:34 +0800 | [diff] [blame] | 777 | total += VLAN_HLEN; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 778 | |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 779 | ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); |
| 780 | if (ret || !iov_iter_count(iter)) |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 781 | goto done; |
| 782 | |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 783 | ret = copy_to_iter(&veth, sizeof(veth), iter); |
| 784 | if (ret != sizeof(veth) || !iov_iter_count(iter)) |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 785 | goto done; |
| 786 | } |
| 787 | |
Herbert Xu | 6c36d2e | 2014-11-07 21:22:25 +0800 | [diff] [blame] | 788 | ret = skb_copy_datagram_iter(skb, vlan_offset, iter, |
| 789 | skb->len - vlan_offset); |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 790 | |
| 791 | done: |
Jason Wang | ce232ce | 2013-12-11 13:08:34 +0800 | [diff] [blame] | 792 | return ret ? ret : total; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 793 | } |
| 794 | |
Zhi Yong Wu | 55ec8e2 | 2013-12-07 04:13:05 +0800 | [diff] [blame] | 795 | static ssize_t macvtap_do_read(struct macvtap_queue *q, |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 796 | struct iov_iter *to, |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 797 | int noblock) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 798 | { |
Hong zhi guo | ccf7e72 | 2012-06-06 22:36:27 +0000 | [diff] [blame] | 799 | DEFINE_WAIT(wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 800 | struct sk_buff *skb; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 801 | ssize_t ret = 0; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 802 | |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 803 | if (!iov_iter_count(to)) |
| 804 | return 0; |
| 805 | |
| 806 | while (1) { |
Jason Wang | 89cee91 | 2013-06-05 23:54:34 +0000 | [diff] [blame] | 807 | if (!noblock) |
| 808 | prepare_to_wait(sk_sleep(&q->sk), &wait, |
| 809 | TASK_INTERRUPTIBLE); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 810 | |
| 811 | /* Read frames from the queue */ |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 812 | skb = skb_array_consume(&q->skb_array); |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 813 | if (skb) |
| 814 | break; |
| 815 | if (noblock) { |
| 816 | ret = -EAGAIN; |
| 817 | break; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 818 | } |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 819 | if (signal_pending(current)) { |
| 820 | ret = -ERESTARTSYS; |
| 821 | break; |
| 822 | } |
| 823 | /* Nothing to read, let's sleep */ |
| 824 | schedule(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 825 | } |
Vlad Yasevich | a499a2e | 2015-11-09 09:14:17 -0500 | [diff] [blame] | 826 | if (!noblock) |
| 827 | finish_wait(sk_sleep(&q->sk), &wait); |
| 828 | |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 829 | if (skb) { |
| 830 | ret = macvtap_put_user(q, skb, to); |
Jason Wang | f51a5e8 | 2014-12-01 16:53:15 +0800 | [diff] [blame] | 831 | if (unlikely(ret < 0)) |
| 832 | kfree_skb(skb); |
| 833 | else |
| 834 | consume_skb(skb); |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 835 | } |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 836 | return ret; |
| 837 | } |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 838 | |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 839 | static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 840 | { |
| 841 | struct file *file = iocb->ki_filp; |
| 842 | struct macvtap_queue *q = file->private_data; |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 843 | ssize_t len = iov_iter_count(to), ret; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 844 | |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 845 | ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); |
Jason Wang | ce232ce | 2013-12-11 13:08:34 +0800 | [diff] [blame] | 846 | ret = min_t(ssize_t, ret, len); |
Zhi Yong Wu | e6ebc7f | 2013-12-06 14:16:50 +0800 | [diff] [blame] | 847 | if (ret > 0) |
| 848 | iocb->ki_pos = ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 849 | return ret; |
| 850 | } |
| 851 | |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 852 | static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) |
| 853 | { |
| 854 | struct macvlan_dev *vlan; |
| 855 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 856 | ASSERT_RTNL(); |
| 857 | vlan = rtnl_dereference(q->vlan); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 858 | if (vlan) |
| 859 | dev_hold(vlan->dev); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 860 | |
| 861 | return vlan; |
| 862 | } |
| 863 | |
| 864 | static void macvtap_put_vlan(struct macvlan_dev *vlan) |
| 865 | { |
| 866 | dev_put(vlan->dev); |
| 867 | } |
| 868 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 869 | static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) |
| 870 | { |
| 871 | struct macvtap_queue *q = file->private_data; |
| 872 | struct macvlan_dev *vlan; |
| 873 | int ret; |
| 874 | |
| 875 | vlan = macvtap_get_vlan(q); |
| 876 | if (!vlan) |
| 877 | return -EINVAL; |
| 878 | |
| 879 | if (flags & IFF_ATTACH_QUEUE) |
| 880 | ret = macvtap_enable_queue(vlan->dev, file, q); |
| 881 | else if (flags & IFF_DETACH_QUEUE) |
| 882 | ret = macvtap_disable_queue(q); |
Jason Wang | f57855a | 2013-06-13 14:23:36 +0800 | [diff] [blame] | 883 | else |
| 884 | ret = -EINVAL; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 885 | |
| 886 | macvtap_put_vlan(vlan); |
| 887 | return ret; |
| 888 | } |
| 889 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 890 | static int set_offload(struct macvtap_queue *q, unsigned long arg) |
| 891 | { |
| 892 | struct macvlan_dev *vlan; |
| 893 | netdev_features_t features; |
| 894 | netdev_features_t feature_mask = 0; |
| 895 | |
| 896 | vlan = rtnl_dereference(q->vlan); |
| 897 | if (!vlan) |
| 898 | return -ENOLINK; |
| 899 | |
| 900 | features = vlan->dev->features; |
| 901 | |
| 902 | if (arg & TUN_F_CSUM) { |
| 903 | feature_mask = NETIF_F_HW_CSUM; |
| 904 | |
| 905 | if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { |
| 906 | if (arg & TUN_F_TSO_ECN) |
| 907 | feature_mask |= NETIF_F_TSO_ECN; |
| 908 | if (arg & TUN_F_TSO4) |
| 909 | feature_mask |= NETIF_F_TSO; |
| 910 | if (arg & TUN_F_TSO6) |
| 911 | feature_mask |= NETIF_F_TSO6; |
| 912 | } |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 913 | |
| 914 | if (arg & TUN_F_UFO) |
| 915 | feature_mask |= NETIF_F_UFO; |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 916 | } |
| 917 | |
| 918 | /* tun/tap driver inverts the usage for TSO offloads, where |
| 919 | * setting the TSO bit means that the userspace wants to |
| 920 | * accept TSO frames and turning it off means that user space |
| 921 | * does not support TSO. |
| 922 | * For macvtap, we have to invert it to mean the same thing. |
| 923 | * When user space turns off TSO, we turn off GSO/LRO so that |
| 924 | * user-space will not receive TSO frames. |
| 925 | */ |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 926 | if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 927 | features |= RX_OFFLOADS; |
| 928 | else |
| 929 | features &= ~RX_OFFLOADS; |
| 930 | |
| 931 | /* tap_features are the same as features on tun/tap and |
| 932 | * reflect user expectations. |
| 933 | */ |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 934 | vlan->tap_features = feature_mask; |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 935 | vlan->set_features = features; |
| 936 | netdev_update_features(vlan->dev); |
| 937 | |
| 938 | return 0; |
| 939 | } |
| 940 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 941 | /* |
| 942 | * provide compatibility with generic tun/tap interface |
| 943 | */ |
| 944 | static long macvtap_ioctl(struct file *file, unsigned int cmd, |
| 945 | unsigned long arg) |
| 946 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 947 | struct macvtap_queue *q = file->private_data; |
| 948 | struct macvlan_dev *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 949 | void __user *argp = (void __user *)arg; |
| 950 | struct ifreq __user *ifr = argp; |
| 951 | unsigned int __user *up = argp; |
Michael S. Tsirkin | 39ec7de | 2014-12-16 15:04:56 +0200 | [diff] [blame] | 952 | unsigned short u; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 953 | int __user *sp = argp; |
Justin Cormack | 7f460d3 | 2015-05-13 19:19:02 +0100 | [diff] [blame] | 954 | struct sockaddr sa; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 955 | int s; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 956 | int ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 957 | |
| 958 | switch (cmd) { |
| 959 | case TUNSETIFF: |
| 960 | /* ignore the name, just look at flags */ |
| 961 | if (get_user(u, &ifr->ifr_flags)) |
| 962 | return -EFAULT; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 963 | |
| 964 | ret = 0; |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 965 | if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 966 | ret = -EINVAL; |
| 967 | else |
Michael S. Tsirkin | 39ec7de | 2014-12-16 15:04:56 +0200 | [diff] [blame] | 968 | q->flags = (q->flags & ~MACVTAP_FEATURES) | u; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 969 | |
| 970 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 971 | |
| 972 | case TUNGETIFF: |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 973 | rtnl_lock(); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 974 | vlan = macvtap_get_vlan(q); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 975 | if (!vlan) { |
| 976 | rtnl_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 977 | return -ENOLINK; |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 978 | } |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 979 | |
| 980 | ret = 0; |
Michael S. Tsirkin | 39ec7de | 2014-12-16 15:04:56 +0200 | [diff] [blame] | 981 | u = q->flags; |
Eric Dumazet | 13707f9 | 2011-01-26 19:28:23 +0000 | [diff] [blame] | 982 | if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || |
Michael S. Tsirkin | 39ec7de | 2014-12-16 15:04:56 +0200 | [diff] [blame] | 983 | put_user(u, &ifr->ifr_flags)) |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 984 | ret = -EFAULT; |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 985 | macvtap_put_vlan(vlan); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 986 | rtnl_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 987 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 988 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 989 | case TUNSETQUEUE: |
| 990 | if (get_user(u, &ifr->ifr_flags)) |
| 991 | return -EFAULT; |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 992 | rtnl_lock(); |
| 993 | ret = macvtap_ioctl_set_queue(file, u); |
| 994 | rtnl_unlock(); |
Jason Wang | 82a19eb | 2013-07-16 13:36:33 +0800 | [diff] [blame] | 995 | return ret; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 996 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 997 | case TUNGETFEATURES: |
Michael S. Tsirkin | 6ae7feb | 2014-11-23 17:24:15 +0200 | [diff] [blame] | 998 | if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 999 | return -EFAULT; |
| 1000 | return 0; |
| 1001 | |
| 1002 | case TUNSETSNDBUF: |
Michael S. Tsirkin | 3ea7924 | 2015-09-18 13:41:09 +0300 | [diff] [blame] | 1003 | if (get_user(s, sp)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1004 | return -EFAULT; |
| 1005 | |
Michael S. Tsirkin | 3ea7924 | 2015-09-18 13:41:09 +0300 | [diff] [blame] | 1006 | q->sk.sk_sndbuf = s; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1007 | return 0; |
| 1008 | |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 1009 | case TUNGETVNETHDRSZ: |
| 1010 | s = q->vnet_hdr_sz; |
| 1011 | if (put_user(s, sp)) |
| 1012 | return -EFAULT; |
| 1013 | return 0; |
| 1014 | |
| 1015 | case TUNSETVNETHDRSZ: |
| 1016 | if (get_user(s, sp)) |
| 1017 | return -EFAULT; |
| 1018 | if (s < (int)sizeof(struct virtio_net_hdr)) |
| 1019 | return -EINVAL; |
| 1020 | |
| 1021 | q->vnet_hdr_sz = s; |
| 1022 | return 0; |
| 1023 | |
Michael S. Tsirkin | 01b07fb | 2014-12-16 15:05:10 +0200 | [diff] [blame] | 1024 | case TUNGETVNETLE: |
| 1025 | s = !!(q->flags & MACVTAP_VNET_LE); |
| 1026 | if (put_user(s, sp)) |
| 1027 | return -EFAULT; |
| 1028 | return 0; |
| 1029 | |
| 1030 | case TUNSETVNETLE: |
| 1031 | if (get_user(s, sp)) |
| 1032 | return -EFAULT; |
| 1033 | if (s) |
| 1034 | q->flags |= MACVTAP_VNET_LE; |
| 1035 | else |
| 1036 | q->flags &= ~MACVTAP_VNET_LE; |
| 1037 | return 0; |
| 1038 | |
Greg Kurz | 8b8e658 | 2015-04-24 14:50:36 +0200 | [diff] [blame] | 1039 | case TUNGETVNETBE: |
| 1040 | return macvtap_get_vnet_be(q, sp); |
| 1041 | |
| 1042 | case TUNSETVNETBE: |
| 1043 | return macvtap_set_vnet_be(q, sp); |
| 1044 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1045 | case TUNSETOFFLOAD: |
| 1046 | /* let the user check for future flags */ |
| 1047 | if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 1048 | TUN_F_TSO_ECN | TUN_F_UFO)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1049 | return -EINVAL; |
| 1050 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 1051 | rtnl_lock(); |
| 1052 | ret = set_offload(q, arg); |
| 1053 | rtnl_unlock(); |
| 1054 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1055 | |
Justin Cormack | b508208 | 2015-05-11 20:00:10 +0100 | [diff] [blame] | 1056 | case SIOCGIFHWADDR: |
| 1057 | rtnl_lock(); |
| 1058 | vlan = macvtap_get_vlan(q); |
| 1059 | if (!vlan) { |
| 1060 | rtnl_unlock(); |
| 1061 | return -ENOLINK; |
| 1062 | } |
| 1063 | ret = 0; |
| 1064 | u = vlan->dev->type; |
| 1065 | if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || |
| 1066 | copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) || |
| 1067 | put_user(u, &ifr->ifr_hwaddr.sa_family)) |
| 1068 | ret = -EFAULT; |
| 1069 | macvtap_put_vlan(vlan); |
| 1070 | rtnl_unlock(); |
| 1071 | return ret; |
| 1072 | |
| 1073 | case SIOCSIFHWADDR: |
Justin Cormack | 7f460d3 | 2015-05-13 19:19:02 +0100 | [diff] [blame] | 1074 | if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) |
| 1075 | return -EFAULT; |
Justin Cormack | b508208 | 2015-05-11 20:00:10 +0100 | [diff] [blame] | 1076 | rtnl_lock(); |
| 1077 | vlan = macvtap_get_vlan(q); |
| 1078 | if (!vlan) { |
| 1079 | rtnl_unlock(); |
| 1080 | return -ENOLINK; |
| 1081 | } |
Justin Cormack | 7f460d3 | 2015-05-13 19:19:02 +0100 | [diff] [blame] | 1082 | ret = dev_set_mac_address(vlan->dev, &sa); |
Justin Cormack | b508208 | 2015-05-11 20:00:10 +0100 | [diff] [blame] | 1083 | macvtap_put_vlan(vlan); |
| 1084 | rtnl_unlock(); |
| 1085 | return ret; |
| 1086 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1087 | default: |
| 1088 | return -EINVAL; |
| 1089 | } |
| 1090 | } |
| 1091 | |
| 1092 | #ifdef CONFIG_COMPAT |
| 1093 | static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, |
| 1094 | unsigned long arg) |
| 1095 | { |
| 1096 | return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); |
| 1097 | } |
| 1098 | #endif |
| 1099 | |
Sainath Grandhi | a8e0469 | 2017-02-10 16:03:46 -0800 | [diff] [blame^] | 1100 | const struct file_operations macvtap_fops = { |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1101 | .owner = THIS_MODULE, |
| 1102 | .open = macvtap_open, |
| 1103 | .release = macvtap_release, |
Al Viro | 3af0bfe | 2014-11-07 14:13:53 -0500 | [diff] [blame] | 1104 | .read_iter = macvtap_read_iter, |
Al Viro | f5ff53b | 2014-06-19 15:36:49 -0400 | [diff] [blame] | 1105 | .write_iter = macvtap_write_iter, |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1106 | .poll = macvtap_poll, |
| 1107 | .llseek = no_llseek, |
| 1108 | .unlocked_ioctl = macvtap_ioctl, |
| 1109 | #ifdef CONFIG_COMPAT |
| 1110 | .compat_ioctl = macvtap_compat_ioctl, |
| 1111 | #endif |
| 1112 | }; |
| 1113 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 1114 | static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, |
| 1115 | size_t total_len) |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1116 | { |
| 1117 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); |
Al Viro | c0371da | 2014-11-24 10:42:55 -0500 | [diff] [blame] | 1118 | return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1119 | } |
| 1120 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 1121 | static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, |
| 1122 | size_t total_len, int flags) |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1123 | { |
| 1124 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); |
| 1125 | int ret; |
| 1126 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) |
| 1127 | return -EINVAL; |
Al Viro | c0371da | 2014-11-24 10:42:55 -0500 | [diff] [blame] | 1128 | ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); |
David S. Miller | de2aa47 | 2013-12-10 22:06:18 -0500 | [diff] [blame] | 1129 | if (ret > total_len) { |
| 1130 | m->msg_flags |= MSG_TRUNC; |
| 1131 | ret = flags & MSG_TRUNC ? ret : total_len; |
| 1132 | } |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1133 | return ret; |
| 1134 | } |
| 1135 | |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 1136 | static int macvtap_peek_len(struct socket *sock) |
| 1137 | { |
| 1138 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, |
| 1139 | sock); |
| 1140 | return skb_array_peek_len(&q->skb_array); |
| 1141 | } |
| 1142 | |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1143 | /* Ops structure to mimic raw sockets with tun */ |
| 1144 | static const struct proto_ops macvtap_socket_ops = { |
| 1145 | .sendmsg = macvtap_sendmsg, |
| 1146 | .recvmsg = macvtap_recvmsg, |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 1147 | .peek_len = macvtap_peek_len, |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1148 | }; |
| 1149 | |
| 1150 | /* Get an underlying socket object from tun file. Returns error unless file is |
| 1151 | * attached to a device. The returned object works like a packet socket, it |
| 1152 | * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for |
| 1153 | * holding a reference to the file for as long as the socket is in use. */ |
| 1154 | struct socket *macvtap_get_socket(struct file *file) |
| 1155 | { |
| 1156 | struct macvtap_queue *q; |
| 1157 | if (file->f_op != &macvtap_fops) |
| 1158 | return ERR_PTR(-EINVAL); |
| 1159 | q = file->private_data; |
| 1160 | if (!q) |
| 1161 | return ERR_PTR(-EBADFD); |
| 1162 | return &q->sock; |
| 1163 | } |
| 1164 | EXPORT_SYMBOL_GPL(macvtap_get_socket); |
| 1165 | |
Sainath Grandhi | a8e0469 | 2017-02-10 16:03:46 -0800 | [diff] [blame^] | 1166 | int macvtap_queue_resize(struct macvlan_dev *vlan) |
Jason Wang | 362899b | 2016-07-15 03:46:31 -0400 | [diff] [blame] | 1167 | { |
| 1168 | struct net_device *dev = vlan->dev; |
| 1169 | struct macvtap_queue *q; |
| 1170 | struct skb_array **arrays; |
| 1171 | int n = vlan->numqueues; |
| 1172 | int ret, i = 0; |
| 1173 | |
| 1174 | arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); |
| 1175 | if (!arrays) |
| 1176 | return -ENOMEM; |
| 1177 | |
| 1178 | list_for_each_entry(q, &vlan->queue_list, next) |
| 1179 | arrays[i++] = &q->skb_array; |
| 1180 | |
| 1181 | ret = skb_array_resize_multiple(arrays, n, |
| 1182 | dev->tx_queue_len, GFP_KERNEL); |
| 1183 | |
| 1184 | kfree(arrays); |
| 1185 | return ret; |
| 1186 | } |