Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1 | #include <linux/etherdevice.h> |
| 2 | #include <linux/if_macvlan.h> |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 3 | #include <linux/if_vlan.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 4 | #include <linux/interrupt.h> |
| 5 | #include <linux/nsproxy.h> |
| 6 | #include <linux/compat.h> |
| 7 | #include <linux/if_tun.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/skbuff.h> |
| 10 | #include <linux/cache.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 14 | #include <linux/init.h> |
| 15 | #include <linux/wait.h> |
| 16 | #include <linux/cdev.h> |
Al Viro | 4040153 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 17 | #include <linux/idr.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 18 | #include <linux/fs.h> |
| 19 | |
| 20 | #include <net/net_namespace.h> |
| 21 | #include <net/rtnetlink.h> |
| 22 | #include <net/sock.h> |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 23 | #include <linux/virtio_net.h> |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * A macvtap queue is the central object of this driver, it connects |
| 27 | * an open character device to a macvlan interface. There can be |
| 28 | * multiple queues on one interface, which map back to queues |
| 29 | * implemented in hardware on the underlying device. |
| 30 | * |
| 31 | * macvtap_proto is used to allocate queues through the sock allocation |
| 32 | * mechanism. |
| 33 | * |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 34 | */ |
| 35 | struct macvtap_queue { |
| 36 | struct sock sk; |
| 37 | struct socket sock; |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 38 | struct socket_wq wq; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 39 | int vnet_hdr_sz; |
Eric Dumazet | 13707f9 | 2011-01-26 19:28:23 +0000 | [diff] [blame] | 40 | struct macvlan_dev __rcu *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 41 | struct file *file; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 42 | unsigned int flags; |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 43 | u16 queue_index; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 44 | bool enabled; |
| 45 | struct list_head next; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 46 | }; |
| 47 | |
| 48 | static struct proto macvtap_proto = { |
| 49 | .name = "macvtap", |
| 50 | .owner = THIS_MODULE, |
| 51 | .obj_size = sizeof (struct macvtap_queue), |
| 52 | }; |
| 53 | |
| 54 | /* |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 55 | * Variables for dealing with macvtaps device numbers. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 56 | */ |
David S. Miller | 1ebed71 | 2010-07-10 19:25:50 -0700 | [diff] [blame] | 57 | static dev_t macvtap_major; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 58 | #define MACVTAP_NUM_DEVS (1U << MINORBITS) |
| 59 | static DEFINE_MUTEX(minor_lock); |
| 60 | static DEFINE_IDR(minor_idr); |
| 61 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 62 | #define GOODCOPY_LEN 128 |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 63 | static struct class *macvtap_class; |
| 64 | static struct cdev macvtap_cdev; |
| 65 | |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 66 | static const struct proto_ops macvtap_socket_ops; |
| 67 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ |
| 69 | NETIF_F_TSO6 | NETIF_F_UFO) |
| 70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 71 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) |
| 72 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 73 | /* |
| 74 | * RCU usage: |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 75 | * The macvtap_queue and the macvlan_dev are loosely coupled, the |
| 76 | * pointers from one to the other can only be read while rcu_read_lock |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 77 | * or rtnl is held. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 78 | * |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 79 | * Both the file and the macvlan_dev hold a reference on the macvtap_queue |
| 80 | * through sock_hold(&q->sk). When the macvlan_dev goes away first, |
| 81 | * q->vlan becomes inaccessible. When the files gets closed, |
| 82 | * macvtap_get_queue() fails. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 83 | * |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 84 | * There may still be references to the struct sock inside of the |
| 85 | * queue from outbound SKBs, but these never reference back to the |
| 86 | * file or the dev. The data structure is freed through __sk_free |
| 87 | * when both our references and any pending SKBs are gone. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 88 | */ |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 89 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 90 | static int macvtap_enable_queue(struct net_device *dev, struct file *file, |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 91 | struct macvtap_queue *q) |
| 92 | { |
| 93 | struct macvlan_dev *vlan = netdev_priv(dev); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 94 | int err = -EINVAL; |
| 95 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 96 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 97 | |
| 98 | if (q->enabled) |
| 99 | goto out; |
| 100 | |
| 101 | err = 0; |
| 102 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
| 103 | q->queue_index = vlan->numvtaps; |
| 104 | q->enabled = true; |
| 105 | |
| 106 | vlan->numvtaps++; |
| 107 | out: |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 108 | return err; |
| 109 | } |
| 110 | |
| 111 | static int macvtap_set_queue(struct net_device *dev, struct file *file, |
| 112 | struct macvtap_queue *q) |
| 113 | { |
| 114 | struct macvlan_dev *vlan = netdev_priv(dev); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 115 | int err = -EBUSY; |
| 116 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 117 | rtnl_lock(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 118 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 119 | goto out; |
| 120 | |
| 121 | err = 0; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 122 | rcu_assign_pointer(q->vlan, vlan); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 123 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 124 | sock_hold(&q->sk); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 125 | |
| 126 | q->file = file; |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 127 | q->queue_index = vlan->numvtaps; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 128 | q->enabled = true; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 129 | file->private_data = q; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 130 | list_add_tail(&q->next, &vlan->queue_list); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 131 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 132 | vlan->numvtaps++; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 133 | vlan->numqueues++; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 134 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 135 | out: |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 136 | rtnl_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 137 | return err; |
| 138 | } |
| 139 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 140 | static int macvtap_disable_queue(struct macvtap_queue *q) |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 141 | { |
| 142 | struct macvlan_dev *vlan; |
| 143 | struct macvtap_queue *nq; |
| 144 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 145 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 146 | if (!q->enabled) |
| 147 | return -EINVAL; |
| 148 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 149 | vlan = rtnl_dereference(q->vlan); |
| 150 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 151 | if (vlan) { |
| 152 | int index = q->queue_index; |
| 153 | BUG_ON(index >= vlan->numvtaps); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 154 | nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 155 | nq->queue_index = index; |
| 156 | |
| 157 | rcu_assign_pointer(vlan->taps[index], nq); |
| 158 | RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); |
| 159 | q->enabled = false; |
| 160 | |
| 161 | vlan->numvtaps--; |
| 162 | } |
| 163 | |
| 164 | return 0; |
| 165 | } |
| 166 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 167 | /* |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 168 | * The file owning the queue got closed, give up both |
| 169 | * the reference that the files holds as well as the |
| 170 | * one from the macvlan_dev if that still exists. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 171 | * |
| 172 | * Using the spinlock makes sure that we don't get |
| 173 | * to the queue again after destroying it. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 174 | */ |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 175 | static void macvtap_put_queue(struct macvtap_queue *q) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 176 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 177 | struct macvlan_dev *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 178 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 179 | rtnl_lock(); |
| 180 | vlan = rtnl_dereference(q->vlan); |
| 181 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 182 | if (vlan) { |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 183 | if (q->enabled) |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 184 | BUG_ON(macvtap_disable_queue(q)); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 185 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 186 | vlan->numqueues--; |
Eric Dumazet | 2cfa5a0 | 2011-11-23 07:09:32 +0000 | [diff] [blame] | 187 | RCU_INIT_POINTER(q->vlan, NULL); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 188 | sock_put(&q->sk); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 189 | list_del_init(&q->next); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 190 | } |
| 191 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 192 | rtnl_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 193 | |
| 194 | synchronize_rcu(); |
| 195 | sock_put(&q->sk); |
| 196 | } |
| 197 | |
| 198 | /* |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 199 | * Select a queue based on the rxq of the device on which this packet |
| 200 | * arrived. If the incoming device is not mq, calculate a flow hash |
| 201 | * to select a queue. If all fails, find the first available queue. |
| 202 | * Cache vlan->numvtaps since it can become zero during the execution |
| 203 | * of this function. |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 204 | */ |
| 205 | static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, |
| 206 | struct sk_buff *skb) |
| 207 | { |
| 208 | struct macvlan_dev *vlan = netdev_priv(dev); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 209 | struct macvtap_queue *tap = NULL; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 210 | /* Access to taps array is protected by rcu, but access to numvtaps |
| 211 | * isn't. Below we use it to lookup a queue, but treat it as a hint |
| 212 | * and validate that the result isn't NULL - in case we are |
| 213 | * racing against queue removal. |
| 214 | */ |
Jason Wang | ed0483f | 2013-06-05 23:54:33 +0000 | [diff] [blame] | 215 | int numvtaps = ACCESS_ONCE(vlan->numvtaps); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 216 | __u32 rxq; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 217 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 218 | if (!numvtaps) |
| 219 | goto out; |
| 220 | |
Krishna Kumar | ef0002b | 2011-11-23 22:17:14 +0000 | [diff] [blame] | 221 | /* Check if we can use flow to select a queue */ |
| 222 | rxq = skb_get_rxhash(skb); |
| 223 | if (rxq) { |
| 224 | tap = rcu_dereference(vlan->taps[rxq % numvtaps]); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 225 | goto out; |
Krishna Kumar | ef0002b | 2011-11-23 22:17:14 +0000 | [diff] [blame] | 226 | } |
| 227 | |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 228 | if (likely(skb_rx_queue_recorded(skb))) { |
| 229 | rxq = skb_get_rx_queue(skb); |
| 230 | |
| 231 | while (unlikely(rxq >= numvtaps)) |
| 232 | rxq -= numvtaps; |
| 233 | |
| 234 | tap = rcu_dereference(vlan->taps[rxq]); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 235 | goto out; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 236 | } |
| 237 | |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 238 | tap = rcu_dereference(vlan->taps[0]); |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 239 | out: |
| 240 | return tap; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 241 | } |
| 242 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 243 | /* |
| 244 | * The net_device is going away, give up the reference |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 245 | * that it holds on all queues and safely set the pointer |
| 246 | * from the queues to NULL. |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 247 | */ |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 248 | static void macvtap_del_queues(struct net_device *dev) |
| 249 | { |
| 250 | struct macvlan_dev *vlan = netdev_priv(dev); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 251 | struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES]; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 252 | int i, j = 0; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 253 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 254 | ASSERT_RTNL(); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 255 | list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { |
| 256 | list_del_init(&q->next); |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 257 | qlist[j++] = q; |
Jason Wang | 376b1aa | 2013-06-05 23:54:38 +0000 | [diff] [blame] | 258 | RCU_INIT_POINTER(q->vlan, NULL); |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 259 | if (q->enabled) |
| 260 | vlan->numvtaps--; |
| 261 | vlan->numqueues--; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 262 | } |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 263 | for (i = 0; i < vlan->numvtaps; i++) |
| 264 | RCU_INIT_POINTER(vlan->taps[i], NULL); |
| 265 | BUG_ON(vlan->numvtaps); |
| 266 | BUG_ON(vlan->numqueues); |
Eric W. Biederman | 99f34b3 | 2011-10-20 04:26:01 +0000 | [diff] [blame] | 267 | /* guarantee that any future macvtap_set_queue will fail */ |
| 268 | vlan->numvtaps = MAX_MACVTAP_QUEUES; |
Krishna Kumar | 1565c7c | 2010-08-04 06:15:59 +0000 | [diff] [blame] | 269 | |
| 270 | for (--j; j >= 0; j--) |
| 271 | sock_put(&qlist[j]->sk); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | /* |
| 275 | * Forward happens for data that gets sent from one macvlan |
| 276 | * endpoint to another one in bridge mode. We just take |
| 277 | * the skb and put it into the receive queue. |
| 278 | */ |
| 279 | static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) |
| 280 | { |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 281 | struct macvlan_dev *vlan = netdev_priv(dev); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 282 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 283 | netdev_features_t features = TAP_FEATURES; |
| 284 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 285 | if (!q) |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 286 | goto drop; |
| 287 | |
| 288 | if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) |
| 289 | goto drop; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 290 | |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 291 | skb->dev = dev; |
| 292 | /* Apply the forward feature mask so that we perform segmentation |
Vlad Yasevich | e573332 | 2013-08-16 15:25:02 -0400 | [diff] [blame^] | 293 | * according to users wishes. This only works if VNET_HDR is |
| 294 | * enabled. |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 295 | */ |
Vlad Yasevich | e573332 | 2013-08-16 15:25:02 -0400 | [diff] [blame^] | 296 | if (q->flags & IFF_VNET_HDR) |
| 297 | features |= vlan->tap_features; |
Vlad Yasevich | 3e4f8b7 | 2013-06-25 16:04:22 -0400 | [diff] [blame] | 298 | if (netif_needs_gso(skb, features)) { |
| 299 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); |
| 300 | |
| 301 | if (IS_ERR(segs)) |
| 302 | goto drop; |
| 303 | |
| 304 | if (!segs) { |
| 305 | skb_queue_tail(&q->sk.sk_receive_queue, skb); |
| 306 | goto wake_up; |
| 307 | } |
| 308 | |
| 309 | kfree_skb(skb); |
| 310 | while (segs) { |
| 311 | struct sk_buff *nskb = segs->next; |
| 312 | |
| 313 | segs->next = NULL; |
| 314 | skb_queue_tail(&q->sk.sk_receive_queue, segs); |
| 315 | segs = nskb; |
| 316 | } |
| 317 | } else { |
| 318 | skb_queue_tail(&q->sk.sk_receive_queue, skb); |
| 319 | } |
| 320 | |
| 321 | wake_up: |
Eric Dumazet | 4a4771a | 2010-04-25 22:20:06 +0000 | [diff] [blame] | 322 | wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 323 | return NET_RX_SUCCESS; |
| 324 | |
| 325 | drop: |
| 326 | kfree_skb(skb); |
| 327 | return NET_RX_DROP; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 328 | } |
| 329 | |
| 330 | /* |
| 331 | * Receive is for data from the external interface (lowerdev), |
| 332 | * in case of macvtap, we can treat that the same way as |
| 333 | * forward, which macvlan cannot. |
| 334 | */ |
| 335 | static int macvtap_receive(struct sk_buff *skb) |
| 336 | { |
| 337 | skb_push(skb, ETH_HLEN); |
| 338 | return macvtap_forward(skb->dev, skb); |
| 339 | } |
| 340 | |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 341 | static int macvtap_get_minor(struct macvlan_dev *vlan) |
| 342 | { |
| 343 | int retval = -ENOMEM; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 344 | |
| 345 | mutex_lock(&minor_lock); |
Tejun Heo | ec09ebc | 2013-02-27 17:04:34 -0800 | [diff] [blame] | 346 | retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); |
| 347 | if (retval >= 0) { |
| 348 | vlan->minor = retval; |
| 349 | } else if (retval == -ENOSPC) { |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 350 | printk(KERN_ERR "too many macvtap devices\n"); |
| 351 | retval = -EINVAL; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 352 | } |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 353 | mutex_unlock(&minor_lock); |
Tejun Heo | ec09ebc | 2013-02-27 17:04:34 -0800 | [diff] [blame] | 354 | return retval < 0 ? retval : 0; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 355 | } |
| 356 | |
| 357 | static void macvtap_free_minor(struct macvlan_dev *vlan) |
| 358 | { |
| 359 | mutex_lock(&minor_lock); |
| 360 | if (vlan->minor) { |
| 361 | idr_remove(&minor_idr, vlan->minor); |
| 362 | vlan->minor = 0; |
| 363 | } |
| 364 | mutex_unlock(&minor_lock); |
| 365 | } |
| 366 | |
| 367 | static struct net_device *dev_get_by_macvtap_minor(int minor) |
| 368 | { |
| 369 | struct net_device *dev = NULL; |
| 370 | struct macvlan_dev *vlan; |
| 371 | |
| 372 | mutex_lock(&minor_lock); |
| 373 | vlan = idr_find(&minor_idr, minor); |
| 374 | if (vlan) { |
| 375 | dev = vlan->dev; |
| 376 | dev_hold(dev); |
| 377 | } |
| 378 | mutex_unlock(&minor_lock); |
| 379 | return dev; |
| 380 | } |
| 381 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 382 | static int macvtap_newlink(struct net *src_net, |
| 383 | struct net_device *dev, |
| 384 | struct nlattr *tb[], |
| 385 | struct nlattr *data[]) |
| 386 | { |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 387 | struct macvlan_dev *vlan = netdev_priv(dev); |
| 388 | INIT_LIST_HEAD(&vlan->queue_list); |
| 389 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 390 | /* Since macvlan supports all offloads by default, make |
| 391 | * tap support all offloads also. |
| 392 | */ |
| 393 | vlan->tap_features = TUN_OFFLOADS; |
| 394 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 395 | /* Don't put anything that may fail after macvlan_common_newlink |
| 396 | * because we can't undo what it does. |
| 397 | */ |
| 398 | return macvlan_common_newlink(src_net, dev, tb, data, |
| 399 | macvtap_receive, macvtap_forward); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 400 | } |
| 401 | |
| 402 | static void macvtap_dellink(struct net_device *dev, |
| 403 | struct list_head *head) |
| 404 | { |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 405 | macvtap_del_queues(dev); |
| 406 | macvlan_dellink(dev, head); |
| 407 | } |
| 408 | |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 409 | static void macvtap_setup(struct net_device *dev) |
| 410 | { |
| 411 | macvlan_common_setup(dev); |
| 412 | dev->tx_queue_len = TUN_READQ_SIZE; |
| 413 | } |
| 414 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 415 | static struct rtnl_link_ops macvtap_link_ops __read_mostly = { |
| 416 | .kind = "macvtap", |
Herbert Xu | 8a35747 | 2010-07-21 21:44:31 +0000 | [diff] [blame] | 417 | .setup = macvtap_setup, |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 418 | .newlink = macvtap_newlink, |
| 419 | .dellink = macvtap_dellink, |
| 420 | }; |
| 421 | |
| 422 | |
| 423 | static void macvtap_sock_write_space(struct sock *sk) |
| 424 | { |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 425 | wait_queue_head_t *wqueue; |
| 426 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 427 | if (!sock_writeable(sk) || |
| 428 | !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
| 429 | return; |
| 430 | |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 431 | wqueue = sk_sleep(sk); |
| 432 | if (wqueue && waitqueue_active(wqueue)) |
| 433 | wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 434 | } |
| 435 | |
Eric W. Biederman | 2259fef | 2011-10-20 04:27:24 +0000 | [diff] [blame] | 436 | static void macvtap_sock_destruct(struct sock *sk) |
| 437 | { |
| 438 | skb_queue_purge(&sk->sk_receive_queue); |
| 439 | } |
| 440 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 441 | static int macvtap_open(struct inode *inode, struct file *file) |
| 442 | { |
| 443 | struct net *net = current->nsproxy->net_ns; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 444 | struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 445 | struct macvtap_queue *q; |
| 446 | int err; |
| 447 | |
| 448 | err = -ENODEV; |
| 449 | if (!dev) |
| 450 | goto out; |
| 451 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 452 | err = -ENOMEM; |
| 453 | q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, |
| 454 | &macvtap_proto); |
| 455 | if (!q) |
| 456 | goto out; |
| 457 | |
Jason Wang | d9a90a3 | 2013-06-13 14:23:35 +0800 | [diff] [blame] | 458 | RCU_INIT_POINTER(q->sock.wq, &q->wq); |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 459 | init_waitqueue_head(&q->wq.wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 460 | q->sock.type = SOCK_RAW; |
| 461 | q->sock.state = SS_CONNECTED; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 462 | q->sock.file = file; |
| 463 | q->sock.ops = &macvtap_socket_ops; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 464 | sock_init_data(&q->sock, &q->sk); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 465 | q->sk.sk_write_space = macvtap_sock_write_space; |
Eric W. Biederman | 2259fef | 2011-10-20 04:27:24 +0000 | [diff] [blame] | 466 | q->sk.sk_destruct = macvtap_sock_destruct; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 467 | q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 468 | q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 469 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 470 | /* |
| 471 | * so far only KVM virtio_net uses macvtap, enable zero copy between |
| 472 | * guest kernel and host kernel when lower device supports zerocopy |
Eric W. Biederman | 047af9cf | 2011-10-20 04:26:39 +0000 | [diff] [blame] | 473 | * |
| 474 | * The macvlan supports zerocopy iff the lower device supports zero |
| 475 | * copy so we don't have to look at the lower device directly. |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 476 | */ |
Eric W. Biederman | 047af9cf | 2011-10-20 04:26:39 +0000 | [diff] [blame] | 477 | if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) |
| 478 | sock_set_flag(&q->sk, SOCK_ZEROCOPY); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 479 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 480 | err = macvtap_set_queue(dev, file, q); |
| 481 | if (err) |
| 482 | sock_put(&q->sk); |
| 483 | |
| 484 | out: |
| 485 | if (dev) |
| 486 | dev_put(dev); |
| 487 | |
| 488 | return err; |
| 489 | } |
| 490 | |
| 491 | static int macvtap_release(struct inode *inode, struct file *file) |
| 492 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 493 | struct macvtap_queue *q = file->private_data; |
| 494 | macvtap_put_queue(q); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 495 | return 0; |
| 496 | } |
| 497 | |
| 498 | static unsigned int macvtap_poll(struct file *file, poll_table * wait) |
| 499 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 500 | struct macvtap_queue *q = file->private_data; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 501 | unsigned int mask = POLLERR; |
| 502 | |
| 503 | if (!q) |
| 504 | goto out; |
| 505 | |
| 506 | mask = 0; |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 507 | poll_wait(file, &q->wq.wait, wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 508 | |
| 509 | if (!skb_queue_empty(&q->sk.sk_receive_queue)) |
| 510 | mask |= POLLIN | POLLRDNORM; |
| 511 | |
| 512 | if (sock_writeable(&q->sk) || |
| 513 | (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && |
| 514 | sock_writeable(&q->sk))) |
| 515 | mask |= POLLOUT | POLLWRNORM; |
| 516 | |
| 517 | out: |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 518 | return mask; |
| 519 | } |
| 520 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 521 | static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, |
| 522 | size_t len, size_t linear, |
| 523 | int noblock, int *err) |
| 524 | { |
| 525 | struct sk_buff *skb; |
| 526 | |
| 527 | /* Under a page? Don't bother with paged skb. */ |
| 528 | if (prepad + len < PAGE_SIZE || !linear) |
| 529 | linear = len; |
| 530 | |
| 531 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, |
| 532 | err); |
| 533 | if (!skb) |
| 534 | return NULL; |
| 535 | |
| 536 | skb_reserve(skb, prepad); |
| 537 | skb_put(skb, linear); |
| 538 | skb->data_len = len - linear; |
| 539 | skb->len += len - linear; |
| 540 | |
| 541 | return skb; |
| 542 | } |
| 543 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 544 | /* set skb frags from iovec, this can move to core network code for reuse */ |
| 545 | static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, |
| 546 | int offset, size_t count) |
| 547 | { |
| 548 | int len = iov_length(from, count) - offset; |
| 549 | int copy = skb_headlen(skb); |
| 550 | int size, offset1 = 0; |
| 551 | int i = 0; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 552 | |
| 553 | /* Skip over from offset */ |
| 554 | while (count && (offset >= from->iov_len)) { |
| 555 | offset -= from->iov_len; |
| 556 | ++from; |
| 557 | --count; |
| 558 | } |
| 559 | |
| 560 | /* copy up to skb headlen */ |
| 561 | while (count && (copy > 0)) { |
| 562 | size = min_t(unsigned int, copy, from->iov_len - offset); |
| 563 | if (copy_from_user(skb->data + offset1, from->iov_base + offset, |
| 564 | size)) |
| 565 | return -EFAULT; |
| 566 | if (copy > size) { |
| 567 | ++from; |
| 568 | --count; |
Jason Wang | 3afc962 | 2012-05-02 11:41:30 +0800 | [diff] [blame] | 569 | offset = 0; |
| 570 | } else |
| 571 | offset += size; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 572 | copy -= size; |
| 573 | offset1 += size; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 574 | } |
| 575 | |
| 576 | if (len == offset1) |
| 577 | return 0; |
| 578 | |
| 579 | while (count--) { |
| 580 | struct page *page[MAX_SKB_FRAGS]; |
| 581 | int num_pages; |
| 582 | unsigned long base; |
Jason Wang | 4ef67eb | 2012-05-02 11:41:44 +0800 | [diff] [blame] | 583 | unsigned long truesize; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 584 | |
Jason Wang | 3afc962 | 2012-05-02 11:41:30 +0800 | [diff] [blame] | 585 | len = from->iov_len - offset; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 586 | if (!len) { |
Jason Wang | 3afc962 | 2012-05-02 11:41:30 +0800 | [diff] [blame] | 587 | offset = 0; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 588 | ++from; |
| 589 | continue; |
| 590 | } |
Jason Wang | 3afc962 | 2012-05-02 11:41:30 +0800 | [diff] [blame] | 591 | base = (unsigned long)from->iov_base + offset; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 592 | size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; |
Jason Wang | b92946e | 2012-05-02 11:42:15 +0800 | [diff] [blame] | 593 | if (i + size > MAX_SKB_FRAGS) |
| 594 | return -EMSGSIZE; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 595 | num_pages = get_user_pages_fast(base, size, 0, &page[i]); |
Jason Wang | b92946e | 2012-05-02 11:42:15 +0800 | [diff] [blame] | 596 | if (num_pages != size) { |
Michael S. Tsirkin | 4c7ab05 | 2013-06-23 17:26:58 +0300 | [diff] [blame] | 597 | int j; |
| 598 | |
| 599 | for (j = 0; j < num_pages; j++) |
| 600 | put_page(page[i + j]); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 601 | return -EFAULT; |
Jason Wang | 02ce04b | 2012-05-02 11:41:58 +0800 | [diff] [blame] | 602 | } |
Jason Wang | 4ef67eb | 2012-05-02 11:41:44 +0800 | [diff] [blame] | 603 | truesize = size * PAGE_SIZE; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 604 | skb->data_len += len; |
| 605 | skb->len += len; |
Jason Wang | 4ef67eb | 2012-05-02 11:41:44 +0800 | [diff] [blame] | 606 | skb->truesize += truesize; |
| 607 | atomic_add(truesize, &skb->sk->sk_wmem_alloc); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 608 | while (len) { |
Jason Wang | 653fc91 | 2011-09-18 23:48:31 +0000 | [diff] [blame] | 609 | int off = base & ~PAGE_MASK; |
| 610 | int size = min_t(int, len, PAGE_SIZE - off); |
| 611 | __skb_fill_page_desc(skb, i, page[i], off, size); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 612 | skb_shinfo(skb)->nr_frags++; |
| 613 | /* increase sk_wmem_alloc */ |
Jason Wang | 653fc91 | 2011-09-18 23:48:31 +0000 | [diff] [blame] | 614 | base += size; |
| 615 | len -= size; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 616 | i++; |
| 617 | } |
Jason Wang | 3afc962 | 2012-05-02 11:41:30 +0800 | [diff] [blame] | 618 | offset = 0; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 619 | ++from; |
| 620 | } |
| 621 | return 0; |
| 622 | } |
| 623 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 624 | /* |
| 625 | * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should |
| 626 | * be shared with the tun/tap driver. |
| 627 | */ |
| 628 | static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, |
| 629 | struct virtio_net_hdr *vnet_hdr) |
| 630 | { |
| 631 | unsigned short gso_type = 0; |
| 632 | if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
| 633 | switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
| 634 | case VIRTIO_NET_HDR_GSO_TCPV4: |
| 635 | gso_type = SKB_GSO_TCPV4; |
| 636 | break; |
| 637 | case VIRTIO_NET_HDR_GSO_TCPV6: |
| 638 | gso_type = SKB_GSO_TCPV6; |
| 639 | break; |
| 640 | case VIRTIO_NET_HDR_GSO_UDP: |
| 641 | gso_type = SKB_GSO_UDP; |
| 642 | break; |
| 643 | default: |
| 644 | return -EINVAL; |
| 645 | } |
| 646 | |
| 647 | if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) |
| 648 | gso_type |= SKB_GSO_TCP_ECN; |
| 649 | |
| 650 | if (vnet_hdr->gso_size == 0) |
| 651 | return -EINVAL; |
| 652 | } |
| 653 | |
| 654 | if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
| 655 | if (!skb_partial_csum_set(skb, vnet_hdr->csum_start, |
| 656 | vnet_hdr->csum_offset)) |
| 657 | return -EINVAL; |
| 658 | } |
| 659 | |
| 660 | if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
| 661 | skb_shinfo(skb)->gso_size = vnet_hdr->gso_size; |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 662 | skb_shinfo(skb)->gso_type = gso_type; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 663 | |
| 664 | /* Header must be checked, and gso_segs computed. */ |
| 665 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 666 | skb_shinfo(skb)->gso_segs = 0; |
| 667 | } |
| 668 | return 0; |
| 669 | } |
| 670 | |
| 671 | static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, |
| 672 | struct virtio_net_hdr *vnet_hdr) |
| 673 | { |
| 674 | memset(vnet_hdr, 0, sizeof(*vnet_hdr)); |
| 675 | |
| 676 | if (skb_is_gso(skb)) { |
| 677 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
| 678 | |
| 679 | /* This is a hint as to how much should be linear. */ |
| 680 | vnet_hdr->hdr_len = skb_headlen(skb); |
| 681 | vnet_hdr->gso_size = sinfo->gso_size; |
| 682 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
| 683 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
| 684 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
| 685 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
| 686 | else if (sinfo->gso_type & SKB_GSO_UDP) |
| 687 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; |
| 688 | else |
| 689 | BUG(); |
| 690 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) |
| 691 | vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
| 692 | } else |
| 693 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; |
| 694 | |
| 695 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 696 | vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 697 | vnet_hdr->csum_start = skb_checksum_start_offset(skb); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 698 | vnet_hdr->csum_offset = skb->csum_offset; |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 699 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
| 700 | vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 701 | } /* else everything is zero */ |
| 702 | |
| 703 | return 0; |
| 704 | } |
| 705 | |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 706 | static unsigned long iov_pages(const struct iovec *iv, int offset, |
| 707 | unsigned long nr_segs) |
| 708 | { |
| 709 | unsigned long seg, base; |
| 710 | int pages = 0, len, size; |
| 711 | |
| 712 | while (nr_segs && (offset >= iv->iov_len)) { |
| 713 | offset -= iv->iov_len; |
| 714 | ++iv; |
| 715 | --nr_segs; |
| 716 | } |
| 717 | |
| 718 | for (seg = 0; seg < nr_segs; seg++) { |
| 719 | base = (unsigned long)iv[seg].iov_base + offset; |
| 720 | len = iv[seg].iov_len - offset; |
| 721 | size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; |
| 722 | pages += size; |
| 723 | offset = 0; |
| 724 | } |
| 725 | |
| 726 | return pages; |
| 727 | } |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 728 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 729 | /* Get packet from user space buffer */ |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 730 | static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, |
| 731 | const struct iovec *iv, unsigned long total_len, |
| 732 | size_t count, int noblock) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 733 | { |
| 734 | struct sk_buff *skb; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 735 | struct macvlan_dev *vlan; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 736 | unsigned long len = total_len; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 737 | int err; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 738 | struct virtio_net_hdr vnet_hdr = { 0 }; |
| 739 | int vnet_hdr_len = 0; |
Jason Wang | b92946e | 2012-05-02 11:42:15 +0800 | [diff] [blame] | 740 | int copylen = 0; |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 741 | bool zerocopy = false; |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 742 | size_t linear; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 743 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 744 | if (q->flags & IFF_VNET_HDR) { |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 745 | vnet_hdr_len = q->vnet_hdr_sz; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 746 | |
| 747 | err = -EINVAL; |
Nicolas Kaiser | ce3c869 | 2011-03-04 13:49:41 +0000 | [diff] [blame] | 748 | if (len < vnet_hdr_len) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 749 | goto err; |
Nicolas Kaiser | ce3c869 | 2011-03-04 13:49:41 +0000 | [diff] [blame] | 750 | len -= vnet_hdr_len; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 751 | |
| 752 | err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 753 | sizeof(vnet_hdr)); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 754 | if (err < 0) |
| 755 | goto err; |
| 756 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
| 757 | vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > |
| 758 | vnet_hdr.hdr_len) |
| 759 | vnet_hdr.hdr_len = vnet_hdr.csum_start + |
| 760 | vnet_hdr.csum_offset + 2; |
| 761 | err = -EINVAL; |
| 762 | if (vnet_hdr.hdr_len > len) |
| 763 | goto err; |
| 764 | } |
| 765 | |
| 766 | err = -EINVAL; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 767 | if (unlikely(len < ETH_HLEN)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 768 | goto err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 769 | |
Jason Wang | b92946e | 2012-05-02 11:42:15 +0800 | [diff] [blame] | 770 | err = -EMSGSIZE; |
| 771 | if (unlikely(count > UIO_MAXIOV)) |
| 772 | goto err; |
| 773 | |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 774 | if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { |
| 775 | copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 776 | linear = copylen; |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 777 | if (iov_pages(iv, vnet_hdr_len + copylen, count) |
| 778 | <= MAX_SKB_FRAGS) |
| 779 | zerocopy = true; |
| 780 | } |
| 781 | |
| 782 | if (!zerocopy) { |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 783 | copylen = len; |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 784 | linear = vnet_hdr.hdr_len; |
| 785 | } |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 786 | |
| 787 | skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, |
Jason Wang | 61d46bf | 2013-07-10 13:43:28 +0800 | [diff] [blame] | 788 | linear, noblock, &err); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 789 | if (!skb) |
| 790 | goto err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 791 | |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 792 | if (zerocopy) |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 793 | err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 794 | else { |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 795 | err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, |
| 796 | len); |
Jason Wang | ece793f | 2013-07-18 10:55:16 +0800 | [diff] [blame] | 797 | if (!err && m && m->msg_control) { |
| 798 | struct ubuf_info *uarg = m->msg_control; |
| 799 | uarg->callback(uarg, false); |
| 800 | } |
| 801 | } |
| 802 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 803 | if (err) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 804 | goto err_kfree; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 805 | |
| 806 | skb_set_network_header(skb, ETH_HLEN); |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 807 | skb_reset_mac_header(skb); |
| 808 | skb->protocol = eth_hdr(skb)->h_proto; |
| 809 | |
| 810 | if (vnet_hdr_len) { |
| 811 | err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr); |
| 812 | if (err) |
| 813 | goto err_kfree; |
| 814 | } |
| 815 | |
Jason Wang | 40893fd | 2013-03-26 23:11:22 +0000 | [diff] [blame] | 816 | skb_probe_transport_header(skb, ETH_HLEN); |
Jason Wang | 9b4d669 | 2013-03-25 20:19:55 +0000 | [diff] [blame] | 817 | |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 818 | rcu_read_lock(); |
| 819 | vlan = rcu_dereference(q->vlan); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 820 | /* copy skb_ubuf_info for callback when skb has no error */ |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 821 | if (zerocopy) { |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 822 | skb_shinfo(skb)->destructor_arg = m->msg_control; |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 823 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
Pravin B Shelar | c9af6db | 2013-02-11 09:27:41 +0000 | [diff] [blame] | 824 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
Jason Wang | 01d6657 | 2012-05-02 11:42:06 +0800 | [diff] [blame] | 825 | } |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 826 | if (vlan) { |
| 827 | local_bh_disable(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 828 | macvlan_start_xmit(skb, vlan->dev); |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 829 | local_bh_enable(); |
| 830 | } else { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 831 | kfree_skb(skb); |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 832 | } |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 833 | rcu_read_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 834 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 835 | return total_len; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 836 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 837 | err_kfree: |
| 838 | kfree_skb(skb); |
| 839 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 840 | err: |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 841 | rcu_read_lock(); |
| 842 | vlan = rcu_dereference(q->vlan); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 843 | if (vlan) |
Eric Dumazet | 1ac9ad1 | 2011-01-12 12:13:14 +0000 | [diff] [blame] | 844 | vlan->dev->stats.tx_dropped++; |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 845 | rcu_read_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 846 | |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 847 | return err; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 848 | } |
| 849 | |
| 850 | static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv, |
| 851 | unsigned long count, loff_t pos) |
| 852 | { |
| 853 | struct file *file = iocb->ki_filp; |
| 854 | ssize_t result = -ENOLINK; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 855 | struct macvtap_queue *q = file->private_data; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 856 | |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 857 | result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count, |
| 858 | file->f_flags & O_NONBLOCK); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 859 | return result; |
| 860 | } |
| 861 | |
| 862 | /* Put packet to the user space buffer */ |
| 863 | static ssize_t macvtap_put_user(struct macvtap_queue *q, |
| 864 | const struct sk_buff *skb, |
| 865 | const struct iovec *iv, int len) |
| 866 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 867 | struct macvlan_dev *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 868 | int ret; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 869 | int vnet_hdr_len = 0; |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 870 | int vlan_offset = 0; |
| 871 | int copied; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 872 | |
| 873 | if (q->flags & IFF_VNET_HDR) { |
| 874 | struct virtio_net_hdr vnet_hdr; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 875 | vnet_hdr_len = q->vnet_hdr_sz; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 876 | if ((len -= vnet_hdr_len) < 0) |
| 877 | return -EINVAL; |
| 878 | |
| 879 | ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr); |
| 880 | if (ret) |
| 881 | return ret; |
| 882 | |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 883 | if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 884 | return -EFAULT; |
| 885 | } |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 886 | copied = vnet_hdr_len; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 887 | |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 888 | if (!vlan_tx_tag_present(skb)) |
| 889 | len = min_t(int, skb->len, len); |
| 890 | else { |
| 891 | int copy; |
| 892 | struct { |
| 893 | __be16 h_vlan_proto; |
| 894 | __be16 h_vlan_TCI; |
| 895 | } veth; |
Jason Wang | 0fbe0d4 | 2013-07-16 13:36:34 +0800 | [diff] [blame] | 896 | veth.h_vlan_proto = skb->vlan_proto; |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 897 | veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 898 | |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 899 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); |
| 900 | len = min_t(int, skb->len + VLAN_HLEN, len); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 901 | |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 902 | copy = min_t(int, vlan_offset, len); |
| 903 | ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); |
| 904 | len -= copy; |
| 905 | copied += copy; |
| 906 | if (ret || !len) |
| 907 | goto done; |
| 908 | |
| 909 | copy = min_t(int, sizeof(veth), len); |
| 910 | ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); |
| 911 | len -= copy; |
| 912 | copied += copy; |
| 913 | if (ret || !len) |
| 914 | goto done; |
| 915 | } |
| 916 | |
| 917 | ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); |
| 918 | copied += len; |
| 919 | |
| 920 | done: |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 921 | rcu_read_lock(); |
| 922 | vlan = rcu_dereference(q->vlan); |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 923 | if (vlan) { |
| 924 | preempt_disable(); |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 925 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); |
Eric Dumazet | 29d7919 | 2013-08-08 08:06:14 -0700 | [diff] [blame] | 926 | preempt_enable(); |
| 927 | } |
Vlad Yasevich | ac4e4af | 2013-06-25 16:04:20 -0400 | [diff] [blame] | 928 | rcu_read_unlock(); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 929 | |
Basil Gor | f09e224 | 2012-05-03 22:55:24 +0000 | [diff] [blame] | 930 | return ret ? ret : copied; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 931 | } |
| 932 | |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 933 | static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, |
| 934 | const struct iovec *iv, unsigned long len, |
| 935 | int noblock) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 936 | { |
Hong zhi guo | ccf7e72 | 2012-06-06 22:36:27 +0000 | [diff] [blame] | 937 | DEFINE_WAIT(wait); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 938 | struct sk_buff *skb; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 939 | ssize_t ret = 0; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 940 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 941 | while (len) { |
Jason Wang | 89cee91 | 2013-06-05 23:54:34 +0000 | [diff] [blame] | 942 | if (!noblock) |
| 943 | prepare_to_wait(sk_sleep(&q->sk), &wait, |
| 944 | TASK_INTERRUPTIBLE); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 945 | |
| 946 | /* Read frames from the queue */ |
| 947 | skb = skb_dequeue(&q->sk.sk_receive_queue); |
| 948 | if (!skb) { |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 949 | if (noblock) { |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 950 | ret = -EAGAIN; |
| 951 | break; |
| 952 | } |
| 953 | if (signal_pending(current)) { |
| 954 | ret = -ERESTARTSYS; |
| 955 | break; |
| 956 | } |
| 957 | /* Nothing to read, let's sleep */ |
| 958 | schedule(); |
| 959 | continue; |
| 960 | } |
| 961 | ret = macvtap_put_user(q, skb, iv, len); |
| 962 | kfree_skb(skb); |
| 963 | break; |
| 964 | } |
| 965 | |
Jason Wang | 89cee91 | 2013-06-05 23:54:34 +0000 | [diff] [blame] | 966 | if (!noblock) |
| 967 | finish_wait(sk_sleep(&q->sk), &wait); |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 968 | return ret; |
| 969 | } |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 970 | |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 971 | static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, |
| 972 | unsigned long count, loff_t pos) |
| 973 | { |
| 974 | struct file *file = iocb->ki_filp; |
| 975 | struct macvtap_queue *q = file->private_data; |
| 976 | ssize_t len, ret = 0; |
| 977 | |
| 978 | len = iov_length(iv, count); |
| 979 | if (len < 0) { |
| 980 | ret = -EINVAL; |
| 981 | goto out; |
| 982 | } |
| 983 | |
| 984 | ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK); |
| 985 | ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */ |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 986 | out: |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 987 | return ret; |
| 988 | } |
| 989 | |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 990 | static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) |
| 991 | { |
| 992 | struct macvlan_dev *vlan; |
| 993 | |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 994 | ASSERT_RTNL(); |
| 995 | vlan = rtnl_dereference(q->vlan); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 996 | if (vlan) |
| 997 | dev_hold(vlan->dev); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 998 | |
| 999 | return vlan; |
| 1000 | } |
| 1001 | |
| 1002 | static void macvtap_put_vlan(struct macvlan_dev *vlan) |
| 1003 | { |
| 1004 | dev_put(vlan->dev); |
| 1005 | } |
| 1006 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 1007 | static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) |
| 1008 | { |
| 1009 | struct macvtap_queue *q = file->private_data; |
| 1010 | struct macvlan_dev *vlan; |
| 1011 | int ret; |
| 1012 | |
| 1013 | vlan = macvtap_get_vlan(q); |
| 1014 | if (!vlan) |
| 1015 | return -EINVAL; |
| 1016 | |
| 1017 | if (flags & IFF_ATTACH_QUEUE) |
| 1018 | ret = macvtap_enable_queue(vlan->dev, file, q); |
| 1019 | else if (flags & IFF_DETACH_QUEUE) |
| 1020 | ret = macvtap_disable_queue(q); |
Jason Wang | f57855a | 2013-06-13 14:23:36 +0800 | [diff] [blame] | 1021 | else |
| 1022 | ret = -EINVAL; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 1023 | |
| 1024 | macvtap_put_vlan(vlan); |
| 1025 | return ret; |
| 1026 | } |
| 1027 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 1028 | static int set_offload(struct macvtap_queue *q, unsigned long arg) |
| 1029 | { |
| 1030 | struct macvlan_dev *vlan; |
| 1031 | netdev_features_t features; |
| 1032 | netdev_features_t feature_mask = 0; |
| 1033 | |
| 1034 | vlan = rtnl_dereference(q->vlan); |
| 1035 | if (!vlan) |
| 1036 | return -ENOLINK; |
| 1037 | |
| 1038 | features = vlan->dev->features; |
| 1039 | |
| 1040 | if (arg & TUN_F_CSUM) { |
| 1041 | feature_mask = NETIF_F_HW_CSUM; |
| 1042 | |
| 1043 | if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { |
| 1044 | if (arg & TUN_F_TSO_ECN) |
| 1045 | feature_mask |= NETIF_F_TSO_ECN; |
| 1046 | if (arg & TUN_F_TSO4) |
| 1047 | feature_mask |= NETIF_F_TSO; |
| 1048 | if (arg & TUN_F_TSO6) |
| 1049 | feature_mask |= NETIF_F_TSO6; |
| 1050 | } |
| 1051 | |
| 1052 | if (arg & TUN_F_UFO) |
| 1053 | feature_mask |= NETIF_F_UFO; |
| 1054 | } |
| 1055 | |
| 1056 | /* tun/tap driver inverts the usage for TSO offloads, where |
| 1057 | * setting the TSO bit means that the userspace wants to |
| 1058 | * accept TSO frames and turning it off means that user space |
| 1059 | * does not support TSO. |
| 1060 | * For macvtap, we have to invert it to mean the same thing. |
| 1061 | * When user space turns off TSO, we turn off GSO/LRO so that |
| 1062 | * user-space will not receive TSO frames. |
| 1063 | */ |
| 1064 | if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) |
| 1065 | features |= RX_OFFLOADS; |
| 1066 | else |
| 1067 | features &= ~RX_OFFLOADS; |
| 1068 | |
| 1069 | /* tap_features are the same as features on tun/tap and |
| 1070 | * reflect user expectations. |
| 1071 | */ |
Vlad Yasevich | a567dd6 | 2013-08-16 15:25:00 -0400 | [diff] [blame] | 1072 | vlan->tap_features = feature_mask; |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 1073 | vlan->set_features = features; |
| 1074 | netdev_update_features(vlan->dev); |
| 1075 | |
| 1076 | return 0; |
| 1077 | } |
| 1078 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1079 | /* |
| 1080 | * provide compatibility with generic tun/tap interface |
| 1081 | */ |
| 1082 | static long macvtap_ioctl(struct file *file, unsigned int cmd, |
| 1083 | unsigned long arg) |
| 1084 | { |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1085 | struct macvtap_queue *q = file->private_data; |
| 1086 | struct macvlan_dev *vlan; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1087 | void __user *argp = (void __user *)arg; |
| 1088 | struct ifreq __user *ifr = argp; |
| 1089 | unsigned int __user *up = argp; |
| 1090 | unsigned int u; |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 1091 | int __user *sp = argp; |
| 1092 | int s; |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1093 | int ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1094 | |
| 1095 | switch (cmd) { |
| 1096 | case TUNSETIFF: |
| 1097 | /* ignore the name, just look at flags */ |
| 1098 | if (get_user(u, &ifr->ifr_flags)) |
| 1099 | return -EFAULT; |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 1100 | |
| 1101 | ret = 0; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 1102 | if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) != |
| 1103 | (IFF_NO_PI | IFF_TAP)) |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 1104 | ret = -EINVAL; |
| 1105 | else |
| 1106 | q->flags = u; |
| 1107 | |
| 1108 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1109 | |
| 1110 | case TUNGETIFF: |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1111 | rtnl_lock(); |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 1112 | vlan = macvtap_get_vlan(q); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1113 | if (!vlan) { |
| 1114 | rtnl_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1115 | return -ENOLINK; |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1116 | } |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1117 | |
| 1118 | ret = 0; |
Eric Dumazet | 13707f9 | 2011-01-26 19:28:23 +0000 | [diff] [blame] | 1119 | if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 1120 | put_user(q->flags, &ifr->ifr_flags)) |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1121 | ret = -EFAULT; |
Jason Wang | 8f475a3 | 2013-06-05 23:54:36 +0000 | [diff] [blame] | 1122 | macvtap_put_vlan(vlan); |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1123 | rtnl_unlock(); |
Arnd Bergmann | 02df55d | 2010-02-18 05:45:36 +0000 | [diff] [blame] | 1124 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1125 | |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 1126 | case TUNSETQUEUE: |
| 1127 | if (get_user(u, &ifr->ifr_flags)) |
| 1128 | return -EFAULT; |
Vlad Yasevich | 441ac0f | 2013-06-25 16:04:19 -0400 | [diff] [blame] | 1129 | rtnl_lock(); |
| 1130 | ret = macvtap_ioctl_set_queue(file, u); |
| 1131 | rtnl_unlock(); |
Jason Wang | 82a19eb | 2013-07-16 13:36:33 +0800 | [diff] [blame] | 1132 | return ret; |
Jason Wang | 815f236 | 2013-06-05 23:54:39 +0000 | [diff] [blame] | 1133 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1134 | case TUNGETFEATURES: |
Jason Wang | df09b36 | 2013-06-05 23:54:40 +0000 | [diff] [blame] | 1135 | if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR | |
| 1136 | IFF_MULTI_QUEUE, up)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1137 | return -EFAULT; |
| 1138 | return 0; |
| 1139 | |
| 1140 | case TUNSETSNDBUF: |
| 1141 | if (get_user(u, up)) |
| 1142 | return -EFAULT; |
| 1143 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1144 | q->sk.sk_sndbuf = u; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1145 | return 0; |
| 1146 | |
Michael S. Tsirkin | 55afbd0 | 2010-04-29 13:50:48 +0300 | [diff] [blame] | 1147 | case TUNGETVNETHDRSZ: |
| 1148 | s = q->vnet_hdr_sz; |
| 1149 | if (put_user(s, sp)) |
| 1150 | return -EFAULT; |
| 1151 | return 0; |
| 1152 | |
| 1153 | case TUNSETVNETHDRSZ: |
| 1154 | if (get_user(s, sp)) |
| 1155 | return -EFAULT; |
| 1156 | if (s < (int)sizeof(struct virtio_net_hdr)) |
| 1157 | return -EINVAL; |
| 1158 | |
| 1159 | q->vnet_hdr_sz = s; |
| 1160 | return 0; |
| 1161 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1162 | case TUNSETOFFLOAD: |
| 1163 | /* let the user check for future flags */ |
| 1164 | if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | |
Arnd Bergmann | b9fb9ee | 2010-02-18 05:48:17 +0000 | [diff] [blame] | 1165 | TUN_F_TSO_ECN | TUN_F_UFO)) |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1166 | return -EINVAL; |
| 1167 | |
Vlad Yasevich | 2be5c76 | 2013-06-25 16:04:21 -0400 | [diff] [blame] | 1168 | rtnl_lock(); |
| 1169 | ret = set_offload(q, arg); |
| 1170 | rtnl_unlock(); |
| 1171 | return ret; |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1172 | |
| 1173 | default: |
| 1174 | return -EINVAL; |
| 1175 | } |
| 1176 | } |
| 1177 | |
| 1178 | #ifdef CONFIG_COMPAT |
| 1179 | static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, |
| 1180 | unsigned long arg) |
| 1181 | { |
| 1182 | return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); |
| 1183 | } |
| 1184 | #endif |
| 1185 | |
| 1186 | static const struct file_operations macvtap_fops = { |
| 1187 | .owner = THIS_MODULE, |
| 1188 | .open = macvtap_open, |
| 1189 | .release = macvtap_release, |
| 1190 | .aio_read = macvtap_aio_read, |
| 1191 | .aio_write = macvtap_aio_write, |
| 1192 | .poll = macvtap_poll, |
| 1193 | .llseek = no_llseek, |
| 1194 | .unlocked_ioctl = macvtap_ioctl, |
| 1195 | #ifdef CONFIG_COMPAT |
| 1196 | .compat_ioctl = macvtap_compat_ioctl, |
| 1197 | #endif |
| 1198 | }; |
| 1199 | |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1200 | static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, |
| 1201 | struct msghdr *m, size_t total_len) |
| 1202 | { |
| 1203 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); |
Shirley Ma | 97bc363 | 2011-07-06 12:26:11 +0000 | [diff] [blame] | 1204 | return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen, |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 1205 | m->msg_flags & MSG_DONTWAIT); |
| 1206 | } |
| 1207 | |
| 1208 | static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, |
| 1209 | struct msghdr *m, size_t total_len, |
| 1210 | int flags) |
| 1211 | { |
| 1212 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); |
| 1213 | int ret; |
| 1214 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) |
| 1215 | return -EINVAL; |
| 1216 | ret = macvtap_do_read(q, iocb, m->msg_iov, total_len, |
| 1217 | flags & MSG_DONTWAIT); |
| 1218 | if (ret > total_len) { |
| 1219 | m->msg_flags |= MSG_TRUNC; |
| 1220 | ret = flags & MSG_TRUNC ? ret : total_len; |
| 1221 | } |
| 1222 | return ret; |
| 1223 | } |
| 1224 | |
| 1225 | /* Ops structure to mimic raw sockets with tun */ |
| 1226 | static const struct proto_ops macvtap_socket_ops = { |
| 1227 | .sendmsg = macvtap_sendmsg, |
| 1228 | .recvmsg = macvtap_recvmsg, |
| 1229 | }; |
| 1230 | |
| 1231 | /* Get an underlying socket object from tun file. Returns error unless file is |
| 1232 | * attached to a device. The returned object works like a packet socket, it |
| 1233 | * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for |
| 1234 | * holding a reference to the file for as long as the socket is in use. */ |
| 1235 | struct socket *macvtap_get_socket(struct file *file) |
| 1236 | { |
| 1237 | struct macvtap_queue *q; |
| 1238 | if (file->f_op != &macvtap_fops) |
| 1239 | return ERR_PTR(-EINVAL); |
| 1240 | q = file->private_data; |
| 1241 | if (!q) |
| 1242 | return ERR_PTR(-EBADFD); |
| 1243 | return &q->sock; |
| 1244 | } |
| 1245 | EXPORT_SYMBOL_GPL(macvtap_get_socket); |
| 1246 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1247 | static int macvtap_device_event(struct notifier_block *unused, |
| 1248 | unsigned long event, void *ptr) |
| 1249 | { |
Jiri Pirko | 351638e | 2013-05-28 01:30:21 +0000 | [diff] [blame] | 1250 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1251 | struct macvlan_dev *vlan; |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1252 | struct device *classdev; |
| 1253 | dev_t devt; |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1254 | int err; |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1255 | |
| 1256 | if (dev->rtnl_link_ops != &macvtap_link_ops) |
| 1257 | return NOTIFY_DONE; |
| 1258 | |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1259 | vlan = netdev_priv(dev); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1260 | |
| 1261 | switch (event) { |
| 1262 | case NETDEV_REGISTER: |
| 1263 | /* Create the device node here after the network device has |
| 1264 | * been registered but before register_netdevice has |
| 1265 | * finished running. |
| 1266 | */ |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1267 | err = macvtap_get_minor(vlan); |
| 1268 | if (err) |
| 1269 | return notifier_from_errno(err); |
| 1270 | |
| 1271 | devt = MKDEV(MAJOR(macvtap_major), vlan->minor); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1272 | classdev = device_create(macvtap_class, &dev->dev, devt, |
| 1273 | dev, "tap%d", dev->ifindex); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1274 | if (IS_ERR(classdev)) { |
| 1275 | macvtap_free_minor(vlan); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1276 | return notifier_from_errno(PTR_ERR(classdev)); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1277 | } |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1278 | break; |
| 1279 | case NETDEV_UNREGISTER: |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1280 | devt = MKDEV(MAJOR(macvtap_major), vlan->minor); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1281 | device_destroy(macvtap_class, devt); |
Eric W. Biederman | e09eff7 | 2011-10-20 04:29:24 +0000 | [diff] [blame] | 1282 | macvtap_free_minor(vlan); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1283 | break; |
| 1284 | } |
| 1285 | |
| 1286 | return NOTIFY_DONE; |
| 1287 | } |
| 1288 | |
| 1289 | static struct notifier_block macvtap_notifier_block __read_mostly = { |
| 1290 | .notifier_call = macvtap_device_event, |
| 1291 | }; |
| 1292 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1293 | static int macvtap_init(void) |
| 1294 | { |
| 1295 | int err; |
| 1296 | |
| 1297 | err = alloc_chrdev_region(&macvtap_major, 0, |
| 1298 | MACVTAP_NUM_DEVS, "macvtap"); |
| 1299 | if (err) |
| 1300 | goto out1; |
| 1301 | |
| 1302 | cdev_init(&macvtap_cdev, &macvtap_fops); |
| 1303 | err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); |
| 1304 | if (err) |
| 1305 | goto out2; |
| 1306 | |
| 1307 | macvtap_class = class_create(THIS_MODULE, "macvtap"); |
| 1308 | if (IS_ERR(macvtap_class)) { |
| 1309 | err = PTR_ERR(macvtap_class); |
| 1310 | goto out3; |
| 1311 | } |
| 1312 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1313 | err = register_netdevice_notifier(&macvtap_notifier_block); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1314 | if (err) |
| 1315 | goto out4; |
| 1316 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1317 | err = macvlan_link_register(&macvtap_link_ops); |
| 1318 | if (err) |
| 1319 | goto out5; |
| 1320 | |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1321 | return 0; |
| 1322 | |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1323 | out5: |
| 1324 | unregister_netdevice_notifier(&macvtap_notifier_block); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1325 | out4: |
| 1326 | class_unregister(macvtap_class); |
| 1327 | out3: |
| 1328 | cdev_del(&macvtap_cdev); |
| 1329 | out2: |
| 1330 | unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); |
| 1331 | out1: |
| 1332 | return err; |
| 1333 | } |
| 1334 | module_init(macvtap_init); |
| 1335 | |
| 1336 | static void macvtap_exit(void) |
| 1337 | { |
| 1338 | rtnl_link_unregister(&macvtap_link_ops); |
Eric W. Biederman | 9bf1907 | 2011-10-20 04:28:46 +0000 | [diff] [blame] | 1339 | unregister_netdevice_notifier(&macvtap_notifier_block); |
Arnd Bergmann | 20d29d7 | 2010-01-30 12:24:26 +0000 | [diff] [blame] | 1340 | class_unregister(macvtap_class); |
| 1341 | cdev_del(&macvtap_cdev); |
| 1342 | unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); |
| 1343 | } |
| 1344 | module_exit(macvtap_exit); |
| 1345 | |
| 1346 | MODULE_ALIAS_RTNL_LINK("macvtap"); |
| 1347 | MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); |
| 1348 | MODULE_LICENSE("GPL"); |