Thomas Gleixner | 9952f69 | 2019-05-28 10:10:04 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 2 | /* |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 3 | * Copyright (c) 2009, Microsoft Corporation. |
| 4 | * |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 5 | * Authors: |
Haiyang Zhang | d0e94d1 | 2009-11-23 17:00:22 +0000 | [diff] [blame] | 6 | * Haiyang Zhang <haiyangz@microsoft.com> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 7 | * Hank Janssen <hjanssen@microsoft.com> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 8 | */ |
Hank Janssen | eb335bc | 2011-03-29 13:58:48 -0700 | [diff] [blame] | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 10 | |
Greg Kroah-Hartman | 5654e93 | 2009-07-14 15:08:20 -0700 | [diff] [blame] | 11 | #include <linux/kernel.h> |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 12 | #include <linux/sched.h> |
| 13 | #include <linux/wait.h> |
Greg Kroah-Hartman | 0ffa63b | 2009-07-15 11:06:01 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
Greg Kroah-Hartman | b4362c9 | 2009-07-16 11:50:41 -0700 | [diff] [blame] | 15 | #include <linux/delay.h> |
Greg Kroah-Hartman | 21a80820 | 2009-09-02 10:33:05 -0700 | [diff] [blame] | 16 | #include <linux/io.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> |
Haiyang Zhang | d987115 | 2011-09-01 12:19:41 -0700 | [diff] [blame] | 18 | #include <linux/netdevice.h> |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 19 | #include <linux/if_ether.h> |
Stephen Rothwell | d647230 | 2015-06-02 19:01:38 +1000 | [diff] [blame] | 20 | #include <linux/vmalloc.h> |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 21 | #include <linux/rtnetlink.h> |
stephen hemminger | 43bf99c | 2017-07-24 10:57:27 -0700 | [diff] [blame] | 22 | #include <linux/prefetch.h> |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 23 | |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 24 | #include <asm/sync_bitops.h> |
Andrea Parri (Microsoft) | 96854bb | 2021-02-01 15:48:14 +0100 | [diff] [blame] | 25 | #include <asm/mshyperv.h> |
K. Y. Srinivasan | 3f335ea | 2011-05-12 19:34:15 -0700 | [diff] [blame] | 26 | |
K. Y. Srinivasan | 5ca7252 | 2011-05-12 19:34:37 -0700 | [diff] [blame] | 27 | #include "hyperv_net.h" |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 28 | #include "netvsc_trace.h" |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 29 | |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 30 | /* |
| 31 | * Switch the data path from the synthetic interface to the VF |
| 32 | * interface. |
| 33 | */ |
Haiyang Zhang | d0922bf | 2021-03-29 16:21:35 -0700 | [diff] [blame] | 34 | int netvsc_switch_datapath(struct net_device *ndev, bool vf) |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 35 | { |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 36 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
| 37 | struct hv_device *dev = net_device_ctx->device_ctx; |
stephen hemminger | 79e8cbe | 2017-07-19 11:53:13 -0700 | [diff] [blame] | 38 | struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 39 | struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; |
Haiyang Zhang | d0922bf | 2021-03-29 16:21:35 -0700 | [diff] [blame] | 40 | int ret, retry = 0; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 41 | |
Long Li | 8b31f8c | 2021-01-08 16:53:42 -0800 | [diff] [blame] | 42 | /* Block sending traffic to VF if it's about to be gone */ |
| 43 | if (!vf) |
| 44 | net_device_ctx->data_path_is_vf = vf; |
| 45 | |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 46 | memset(init_pkt, 0, sizeof(struct nvsp_message)); |
| 47 | init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; |
| 48 | if (vf) |
| 49 | init_pkt->msg.v4_msg.active_dp.active_datapath = |
| 50 | NVSP_DATAPATH_VF; |
| 51 | else |
| 52 | init_pkt->msg.v4_msg.active_dp.active_datapath = |
| 53 | NVSP_DATAPATH_SYNTHETIC; |
| 54 | |
Haiyang Zhang | d0922bf | 2021-03-29 16:21:35 -0700 | [diff] [blame] | 55 | again: |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 56 | trace_nvsp_send(ndev, init_pkt); |
| 57 | |
Haiyang Zhang | d0922bf | 2021-03-29 16:21:35 -0700 | [diff] [blame] | 58 | ret = vmbus_sendpacket(dev->channel, init_pkt, |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 59 | sizeof(struct nvsp_message), |
Haiyang Zhang | d0922bf | 2021-03-29 16:21:35 -0700 | [diff] [blame] | 60 | (unsigned long)init_pkt, VM_PKT_DATA_INBAND, |
Long Li | 8b31f8c | 2021-01-08 16:53:42 -0800 | [diff] [blame] | 61 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
Haiyang Zhang | d0922bf | 2021-03-29 16:21:35 -0700 | [diff] [blame] | 62 | |
| 63 | /* If failed to switch to/from VF, let data_path_is_vf stay false, |
| 64 | * so we use synthetic path to send data. |
| 65 | */ |
| 66 | if (ret) { |
| 67 | if (ret != -EAGAIN) { |
| 68 | netdev_err(ndev, |
| 69 | "Unable to send sw datapath msg, err: %d\n", |
| 70 | ret); |
| 71 | return ret; |
| 72 | } |
| 73 | |
| 74 | if (retry++ < RETRY_MAX) { |
| 75 | usleep_range(RETRY_US_LO, RETRY_US_HI); |
| 76 | goto again; |
| 77 | } else { |
| 78 | netdev_err( |
| 79 | ndev, |
| 80 | "Retry failed to send sw datapath msg, err: %d\n", |
| 81 | ret); |
| 82 | return ret; |
| 83 | } |
| 84 | } |
| 85 | |
Long Li | 8b31f8c | 2021-01-08 16:53:42 -0800 | [diff] [blame] | 86 | wait_for_completion(&nv_dev->channel_init_wait); |
| 87 | net_device_ctx->data_path_is_vf = vf; |
Haiyang Zhang | d0922bf | 2021-03-29 16:21:35 -0700 | [diff] [blame] | 88 | |
| 89 | return 0; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 90 | } |
| 91 | |
Stephen Hemminger | 3ffe64f | 2018-06-29 14:07:16 -0700 | [diff] [blame] | 92 | /* Worker to setup sub channels on initial setup |
| 93 | * Initial hotplug event occurs in softirq context |
| 94 | * and can't wait for channels. |
| 95 | */ |
| 96 | static void netvsc_subchan_work(struct work_struct *w) |
| 97 | { |
| 98 | struct netvsc_device *nvdev = |
| 99 | container_of(w, struct netvsc_device, subchan_work); |
| 100 | struct rndis_device *rdev; |
| 101 | int i, ret; |
| 102 | |
| 103 | /* Avoid deadlock with device removal already under RTNL */ |
| 104 | if (!rtnl_trylock()) { |
| 105 | schedule_work(w); |
| 106 | return; |
| 107 | } |
| 108 | |
| 109 | rdev = nvdev->extension; |
| 110 | if (rdev) { |
Haiyang Zhang | 17d9125 | 2019-01-15 00:51:44 +0000 | [diff] [blame] | 111 | ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); |
Stephen Hemminger | 3ffe64f | 2018-06-29 14:07:16 -0700 | [diff] [blame] | 112 | if (ret == 0) { |
| 113 | netif_device_attach(rdev->ndev); |
| 114 | } else { |
| 115 | /* fallback to only primary channel */ |
| 116 | for (i = 1; i < nvdev->num_chn; i++) |
| 117 | netif_napi_del(&nvdev->chan_table[i].napi); |
| 118 | |
| 119 | nvdev->max_chn = 1; |
| 120 | nvdev->num_chn = 1; |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | rtnl_unlock(); |
| 125 | } |
| 126 | |
Vitaly Kuznetsov | 8809883 | 2016-05-13 13:55:25 +0200 | [diff] [blame] | 127 | static struct netvsc_device *alloc_net_device(void) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 128 | { |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 129 | struct netvsc_device *net_device; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 130 | |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 131 | net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); |
| 132 | if (!net_device) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 133 | return NULL; |
| 134 | |
Haiyang Zhang | dc5cd89 | 2012-06-04 06:42:38 +0000 | [diff] [blame] | 135 | init_waitqueue_head(&net_device->wait_drain); |
K. Y. Srinivasan | c38b9c7 | 2011-08-27 11:31:12 -0700 | [diff] [blame] | 136 | net_device->destroy = false; |
Haiyang Zhang | f6f13c1 | 2020-02-21 08:32:18 -0800 | [diff] [blame] | 137 | net_device->tx_disable = true; |
Stephen Hemminger | 0da6edb | 2017-12-12 16:48:39 -0800 | [diff] [blame] | 138 | |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 139 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
| 140 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 141 | |
Stephen Hemminger | fd61260 | 2016-08-23 12:17:51 -0700 | [diff] [blame] | 142 | init_completion(&net_device->channel_init_wait); |
stephen hemminger | 732e498 | 2017-08-03 17:13:54 -0700 | [diff] [blame] | 143 | init_waitqueue_head(&net_device->subchan_open); |
Stephen Hemminger | 3ffe64f | 2018-06-29 14:07:16 -0700 | [diff] [blame] | 144 | INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 145 | |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 146 | return net_device; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 147 | } |
| 148 | |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 149 | static void free_netvsc_device(struct rcu_head *head) |
Haiyang Zhang | f90251c | 2014-08-15 19:18:19 +0000 | [diff] [blame] | 150 | { |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 151 | struct netvsc_device *nvdev |
| 152 | = container_of(head, struct netvsc_device, rcu); |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 153 | int i; |
| 154 | |
Stephen Hemminger | 02400fc | 2018-03-20 15:03:03 -0700 | [diff] [blame] | 155 | kfree(nvdev->extension); |
| 156 | vfree(nvdev->recv_buf); |
| 157 | vfree(nvdev->send_buf); |
| 158 | kfree(nvdev->send_section_map); |
| 159 | |
Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 160 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { |
| 161 | xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq); |
Andrea Parri (Microsoft) | 0ba35fe | 2021-01-26 17:29:07 +0100 | [diff] [blame] | 162 | kfree(nvdev->chan_table[i].recv_buf); |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 163 | vfree(nvdev->chan_table[i].mrc.slots); |
Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 164 | } |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 165 | |
Haiyang Zhang | f90251c | 2014-08-15 19:18:19 +0000 | [diff] [blame] | 166 | kfree(nvdev); |
| 167 | } |
| 168 | |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 169 | static void free_netvsc_device_rcu(struct netvsc_device *nvdev) |
| 170 | { |
| 171 | call_rcu(&nvdev->rcu, free_netvsc_device); |
| 172 | } |
stephen hemminger | 46b4f7f | 2017-01-24 13:06:11 -0800 | [diff] [blame] | 173 | |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 174 | static void netvsc_revoke_recv_buf(struct hv_device *device, |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 175 | struct netvsc_device *net_device, |
| 176 | struct net_device *ndev) |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 177 | { |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 178 | struct nvsp_message *revoke_packet; |
Stephen Hemminger | 7a2a0a8 | 2016-08-23 12:17:54 -0700 | [diff] [blame] | 179 | int ret; |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 180 | |
| 181 | /* |
| 182 | * If we got a section count, it means we received a |
| 183 | * SendReceiveBufferComplete msg (ie sent |
| 184 | * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need |
| 185 | * to send a revoke msg here |
| 186 | */ |
| 187 | if (net_device->recv_section_cnt) { |
| 188 | /* Send the revoke receive buffer */ |
| 189 | revoke_packet = &net_device->revoke_packet; |
| 190 | memset(revoke_packet, 0, sizeof(struct nvsp_message)); |
| 191 | |
| 192 | revoke_packet->hdr.msg_type = |
| 193 | NVSP_MSG1_TYPE_REVOKE_RECV_BUF; |
| 194 | revoke_packet->msg.v1_msg. |
| 195 | revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; |
| 196 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 197 | trace_nvsp_send(ndev, revoke_packet); |
| 198 | |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 199 | ret = vmbus_sendpacket(device->channel, |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 200 | revoke_packet, |
| 201 | sizeof(struct nvsp_message), |
Andres Beltran | 4d18fcc | 2020-11-09 11:04:02 +0100 | [diff] [blame] | 202 | VMBUS_RQST_ID_NO_RESPONSE, |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 203 | VM_PKT_DATA_INBAND, 0); |
K. Y. Srinivasan | 73e64fa | 2017-04-19 13:53:49 -0700 | [diff] [blame] | 204 | /* If the failure is because the channel is rescinded; |
| 205 | * ignore the failure since we cannot send on a rescinded |
| 206 | * channel. This would allow us to properly cleanup |
| 207 | * even when the channel is rescinded. |
| 208 | */ |
| 209 | if (device->channel->rescind) |
| 210 | ret = 0; |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 211 | /* |
| 212 | * If we failed here, we might as well return and |
| 213 | * have a leak rather than continue and a bugchk |
| 214 | */ |
| 215 | if (ret != 0) { |
Haiyang Zhang | d987115 | 2011-09-01 12:19:41 -0700 | [diff] [blame] | 216 | netdev_err(ndev, "unable to send " |
Haiyang Zhang | c909ebb | 2011-09-01 12:19:40 -0700 | [diff] [blame] | 217 | "revoke receive buffer to netvsp\n"); |
Stephen Hemminger | 7a2a0a8 | 2016-08-23 12:17:54 -0700 | [diff] [blame] | 218 | return; |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 219 | } |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 220 | net_device->recv_section_cnt = 0; |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 221 | } |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 222 | } |
| 223 | |
| 224 | static void netvsc_revoke_send_buf(struct hv_device *device, |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 225 | struct netvsc_device *net_device, |
| 226 | struct net_device *ndev) |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 227 | { |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 228 | struct nvsp_message *revoke_packet; |
| 229 | int ret; |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 230 | |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 231 | /* Deal with the send buffer we may have setup. |
| 232 | * If we got a send section size, it means we received a |
Haiyang Zhang | c51ed18 | 2014-12-19 18:25:18 -0800 | [diff] [blame] | 233 | * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent |
| 234 | * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 235 | * to send a revoke msg here |
| 236 | */ |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 237 | if (net_device->send_section_cnt) { |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 238 | /* Send the revoke receive buffer */ |
| 239 | revoke_packet = &net_device->revoke_packet; |
| 240 | memset(revoke_packet, 0, sizeof(struct nvsp_message)); |
| 241 | |
| 242 | revoke_packet->hdr.msg_type = |
| 243 | NVSP_MSG1_TYPE_REVOKE_SEND_BUF; |
Haiyang Zhang | c51ed18 | 2014-12-19 18:25:18 -0800 | [diff] [blame] | 244 | revoke_packet->msg.v1_msg.revoke_send_buf.id = |
| 245 | NETVSC_SEND_BUFFER_ID; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 246 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 247 | trace_nvsp_send(ndev, revoke_packet); |
| 248 | |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 249 | ret = vmbus_sendpacket(device->channel, |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 250 | revoke_packet, |
| 251 | sizeof(struct nvsp_message), |
Andres Beltran | 4d18fcc | 2020-11-09 11:04:02 +0100 | [diff] [blame] | 252 | VMBUS_RQST_ID_NO_RESPONSE, |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 253 | VM_PKT_DATA_INBAND, 0); |
K. Y. Srinivasan | 73e64fa | 2017-04-19 13:53:49 -0700 | [diff] [blame] | 254 | |
| 255 | /* If the failure is because the channel is rescinded; |
| 256 | * ignore the failure since we cannot send on a rescinded |
| 257 | * channel. This would allow us to properly cleanup |
| 258 | * even when the channel is rescinded. |
| 259 | */ |
| 260 | if (device->channel->rescind) |
| 261 | ret = 0; |
| 262 | |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 263 | /* If we failed here, we might as well return and |
| 264 | * have a leak rather than continue and a bugchk |
| 265 | */ |
| 266 | if (ret != 0) { |
| 267 | netdev_err(ndev, "unable to send " |
| 268 | "revoke send buffer to netvsp\n"); |
Stephen Hemminger | 7a2a0a8 | 2016-08-23 12:17:54 -0700 | [diff] [blame] | 269 | return; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 270 | } |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 271 | net_device->send_section_cnt = 0; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 272 | } |
Vitaly Kuznetsov | 0cf7378 | 2017-11-02 11:35:30 +0100 | [diff] [blame] | 273 | } |
| 274 | |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 275 | static void netvsc_teardown_recv_gpadl(struct hv_device *device, |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 276 | struct netvsc_device *net_device, |
| 277 | struct net_device *ndev) |
Vitaly Kuznetsov | 0cf7378 | 2017-11-02 11:35:30 +0100 | [diff] [blame] | 278 | { |
Vitaly Kuznetsov | 0cf7378 | 2017-11-02 11:35:30 +0100 | [diff] [blame] | 279 | int ret; |
| 280 | |
| 281 | if (net_device->recv_buf_gpadl_handle) { |
| 282 | ret = vmbus_teardown_gpadl(device->channel, |
| 283 | net_device->recv_buf_gpadl_handle); |
| 284 | |
| 285 | /* If we failed here, we might as well return and have a leak |
| 286 | * rather than continue and a bugchk |
| 287 | */ |
| 288 | if (ret != 0) { |
| 289 | netdev_err(ndev, |
| 290 | "unable to teardown receive buffer's gpadl\n"); |
| 291 | return; |
| 292 | } |
| 293 | net_device->recv_buf_gpadl_handle = 0; |
| 294 | } |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 295 | } |
| 296 | |
| 297 | static void netvsc_teardown_send_gpadl(struct hv_device *device, |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 298 | struct netvsc_device *net_device, |
| 299 | struct net_device *ndev) |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 300 | { |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 301 | int ret; |
Vitaly Kuznetsov | 0cf7378 | 2017-11-02 11:35:30 +0100 | [diff] [blame] | 302 | |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 303 | if (net_device->send_buf_gpadl_handle) { |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 304 | ret = vmbus_teardown_gpadl(device->channel, |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 305 | net_device->send_buf_gpadl_handle); |
| 306 | |
| 307 | /* If we failed here, we might as well return and have a leak |
| 308 | * rather than continue and a bugchk |
| 309 | */ |
| 310 | if (ret != 0) { |
| 311 | netdev_err(ndev, |
| 312 | "unable to teardown send buffer's gpadl\n"); |
Stephen Hemminger | 7a2a0a8 | 2016-08-23 12:17:54 -0700 | [diff] [blame] | 313 | return; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 314 | } |
Dave Jones | 2f18423 | 2014-06-16 16:59:02 -0400 | [diff] [blame] | 315 | net_device->send_buf_gpadl_handle = 0; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 316 | } |
Haiyang Zhang | ec91cd0 | 2011-04-21 12:30:43 -0700 | [diff] [blame] | 317 | } |
| 318 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 319 | int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) |
| 320 | { |
| 321 | struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; |
| 322 | int node = cpu_to_node(nvchan->channel->target_cpu); |
| 323 | size_t size; |
| 324 | |
| 325 | size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); |
| 326 | nvchan->mrc.slots = vzalloc_node(size, node); |
| 327 | if (!nvchan->mrc.slots) |
| 328 | nvchan->mrc.slots = vzalloc(size); |
| 329 | |
| 330 | return nvchan->mrc.slots ? 0 : -ENOMEM; |
| 331 | } |
| 332 | |
stephen hemminger | 9579083 | 2017-06-08 16:21:22 -0700 | [diff] [blame] | 333 | static int netvsc_init_buf(struct hv_device *device, |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 334 | struct netvsc_device *net_device, |
| 335 | const struct netvsc_device_info *device_info) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 336 | { |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 337 | struct nvsp_1_message_send_receive_buffer_complete *resp; |
stephen hemminger | 9583337 | 2017-08-09 17:46:07 -0700 | [diff] [blame] | 338 | struct net_device *ndev = hv_get_drvdata(device); |
| 339 | struct nvsp_message *init_packet; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 340 | unsigned int buf_size; |
stephen hemminger | fdfb70d | 2017-04-24 18:33:38 -0700 | [diff] [blame] | 341 | size_t map_words; |
Andrea Parri (Microsoft) | 0102eee | 2021-02-03 12:35:12 +0100 | [diff] [blame] | 342 | int i, ret = 0; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 343 | |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 344 | /* Get receive buffer area. */ |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 345 | buf_size = device_info->recv_sections * device_info->recv_section_size; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 346 | buf_size = roundup(buf_size, PAGE_SIZE); |
| 347 | |
Haiyang Zhang | 11b2b65 | 2017-12-11 08:56:57 -0800 | [diff] [blame] | 348 | /* Legacy hosts only allow smaller receive buffer */ |
| 349 | if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) |
| 350 | buf_size = min_t(unsigned int, buf_size, |
| 351 | NETVSC_RECEIVE_BUFFER_SIZE_LEGACY); |
| 352 | |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 353 | net_device->recv_buf = vzalloc(buf_size); |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 354 | if (!net_device->recv_buf) { |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 355 | netdev_err(ndev, |
| 356 | "unable to allocate receive buffer of size %u\n", |
| 357 | buf_size); |
K. Y. Srinivasan | 927bc33 | 2011-08-25 09:49:13 -0700 | [diff] [blame] | 358 | ret = -ENOMEM; |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 359 | goto cleanup; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 360 | } |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 361 | |
Haiyang Zhang | c5d24bd | 2018-03-22 12:01:14 -0700 | [diff] [blame] | 362 | net_device->recv_buf_size = buf_size; |
| 363 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 364 | /* |
| 365 | * Establish the gpadl handle for this buffer on this |
| 366 | * channel. Note: This call uses the vmbus connection rather |
| 367 | * than the channel to establish the gpadl handle. |
| 368 | */ |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 369 | ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 370 | buf_size, |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 371 | &net_device->recv_buf_gpadl_handle); |
Greg Kroah-Hartman | 21a80820 | 2009-09-02 10:33:05 -0700 | [diff] [blame] | 372 | if (ret != 0) { |
Haiyang Zhang | d987115 | 2011-09-01 12:19:41 -0700 | [diff] [blame] | 373 | netdev_err(ndev, |
Haiyang Zhang | c909ebb | 2011-09-01 12:19:40 -0700 | [diff] [blame] | 374 | "unable to establish receive buffer's gpadl\n"); |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 375 | goto cleanup; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 376 | } |
| 377 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 378 | /* Notify the NetVsp of the gpadl handle */ |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 379 | init_packet = &net_device->channel_init_pkt; |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 380 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 381 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; |
| 382 | init_packet->msg.v1_msg.send_recv_buf. |
| 383 | gpadl_handle = net_device->recv_buf_gpadl_handle; |
| 384 | init_packet->msg.v1_msg. |
| 385 | send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 386 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 387 | trace_nvsp_send(ndev, init_packet); |
| 388 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 389 | /* Send the gpadl notification request */ |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 390 | ret = vmbus_sendpacket(device->channel, init_packet, |
Greg Kroah-Hartman | 5a4df29 | 2010-10-21 09:43:24 -0700 | [diff] [blame] | 391 | sizeof(struct nvsp_message), |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 392 | (unsigned long)init_packet, |
Haiyang Zhang | 415f228 | 2011-01-26 12:12:13 -0800 | [diff] [blame] | 393 | VM_PKT_DATA_INBAND, |
Greg Kroah-Hartman | 5a4df29 | 2010-10-21 09:43:24 -0700 | [diff] [blame] | 394 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
Greg Kroah-Hartman | 21a80820 | 2009-09-02 10:33:05 -0700 | [diff] [blame] | 395 | if (ret != 0) { |
Haiyang Zhang | d987115 | 2011-09-01 12:19:41 -0700 | [diff] [blame] | 396 | netdev_err(ndev, |
Haiyang Zhang | c909ebb | 2011-09-01 12:19:40 -0700 | [diff] [blame] | 397 | "unable to send receive buffer's gpadl to netvsp\n"); |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 398 | goto cleanup; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 399 | } |
| 400 | |
Vitaly Kuznetsov | 5362855 | 2016-06-09 12:44:03 +0200 | [diff] [blame] | 401 | wait_for_completion(&net_device->channel_init_wait); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 402 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 403 | /* Check the response */ |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 404 | resp = &init_packet->msg.v1_msg.send_recv_buf_complete; |
| 405 | if (resp->status != NVSP_STAT_SUCCESS) { |
| 406 | netdev_err(ndev, |
| 407 | "Unable to complete receive buffer initialization with NetVsp - status %d\n", |
| 408 | resp->status); |
K. Y. Srinivasan | 927bc33 | 2011-08-25 09:49:13 -0700 | [diff] [blame] | 409 | ret = -EINVAL; |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 410 | goto cleanup; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 411 | } |
| 412 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 413 | /* Parse the response */ |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 414 | netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n", |
| 415 | resp->num_sections, resp->sections[0].sub_alloc_size, |
| 416 | resp->sections[0].num_sub_allocs); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 417 | |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 418 | /* There should only be one section for the entire receive buffer */ |
| 419 | if (resp->num_sections != 1 || resp->sections[0].offset != 0) { |
K. Y. Srinivasan | 927bc33 | 2011-08-25 09:49:13 -0700 | [diff] [blame] | 420 | ret = -EINVAL; |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 421 | goto cleanup; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 422 | } |
| 423 | |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 424 | net_device->recv_section_size = resp->sections[0].sub_alloc_size; |
| 425 | net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; |
| 426 | |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 427 | /* Ensure buffer will not overflow */ |
| 428 | if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size * |
| 429 | (u64)net_device->recv_section_cnt > (u64)buf_size) { |
| 430 | netdev_err(ndev, "invalid recv_section_size %u\n", |
| 431 | net_device->recv_section_size); |
| 432 | ret = -EINVAL; |
| 433 | goto cleanup; |
| 434 | } |
| 435 | |
Andrea Parri (Microsoft) | 0102eee | 2021-02-03 12:35:12 +0100 | [diff] [blame] | 436 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { |
| 437 | struct netvsc_channel *nvchan = &net_device->chan_table[i]; |
| 438 | |
| 439 | nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL); |
| 440 | if (nvchan->recv_buf == NULL) { |
| 441 | ret = -ENOMEM; |
| 442 | goto cleanup; |
| 443 | } |
| 444 | } |
| 445 | |
Haiyang Zhang | f87238d | 2020-03-30 12:29:13 -0700 | [diff] [blame] | 446 | /* Setup receive completion ring. |
| 447 | * Add 1 to the recv_section_cnt because at least one entry in a |
| 448 | * ring buffer has to be empty. |
| 449 | */ |
| 450 | net_device->recv_completion_cnt = net_device->recv_section_cnt + 1; |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 451 | ret = netvsc_alloc_recv_comp_ring(net_device, 0); |
| 452 | if (ret) |
| 453 | goto cleanup; |
| 454 | |
| 455 | /* Now setup the send buffer. */ |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 456 | buf_size = device_info->send_sections * device_info->send_section_size; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 457 | buf_size = round_up(buf_size, PAGE_SIZE); |
| 458 | |
| 459 | net_device->send_buf = vzalloc(buf_size); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 460 | if (!net_device->send_buf) { |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 461 | netdev_err(ndev, "unable to allocate send buffer of size %u\n", |
| 462 | buf_size); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 463 | ret = -ENOMEM; |
| 464 | goto cleanup; |
| 465 | } |
| 466 | |
| 467 | /* Establish the gpadl handle for this buffer on this |
| 468 | * channel. Note: This call uses the vmbus connection rather |
| 469 | * than the channel to establish the gpadl handle. |
| 470 | */ |
| 471 | ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 472 | buf_size, |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 473 | &net_device->send_buf_gpadl_handle); |
| 474 | if (ret != 0) { |
| 475 | netdev_err(ndev, |
| 476 | "unable to establish send buffer's gpadl\n"); |
| 477 | goto cleanup; |
| 478 | } |
| 479 | |
| 480 | /* Notify the NetVsp of the gpadl handle */ |
| 481 | init_packet = &net_device->channel_init_pkt; |
| 482 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
| 483 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; |
Haiyang Zhang | c51ed18 | 2014-12-19 18:25:18 -0800 | [diff] [blame] | 484 | init_packet->msg.v1_msg.send_send_buf.gpadl_handle = |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 485 | net_device->send_buf_gpadl_handle; |
Haiyang Zhang | c51ed18 | 2014-12-19 18:25:18 -0800 | [diff] [blame] | 486 | init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 487 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 488 | trace_nvsp_send(ndev, init_packet); |
| 489 | |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 490 | /* Send the gpadl notification request */ |
| 491 | ret = vmbus_sendpacket(device->channel, init_packet, |
| 492 | sizeof(struct nvsp_message), |
| 493 | (unsigned long)init_packet, |
| 494 | VM_PKT_DATA_INBAND, |
| 495 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
| 496 | if (ret != 0) { |
| 497 | netdev_err(ndev, |
| 498 | "unable to send send buffer's gpadl to netvsp\n"); |
| 499 | goto cleanup; |
| 500 | } |
| 501 | |
Vitaly Kuznetsov | 5362855 | 2016-06-09 12:44:03 +0200 | [diff] [blame] | 502 | wait_for_completion(&net_device->channel_init_wait); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 503 | |
| 504 | /* Check the response */ |
| 505 | if (init_packet->msg.v1_msg. |
| 506 | send_send_buf_complete.status != NVSP_STAT_SUCCESS) { |
| 507 | netdev_err(ndev, "Unable to complete send buffer " |
| 508 | "initialization with NetVsp - status %d\n", |
| 509 | init_packet->msg.v1_msg. |
Haiyang Zhang | c51ed18 | 2014-12-19 18:25:18 -0800 | [diff] [blame] | 510 | send_send_buf_complete.status); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 511 | ret = -EINVAL; |
| 512 | goto cleanup; |
| 513 | } |
| 514 | |
| 515 | /* Parse the response */ |
| 516 | net_device->send_section_size = init_packet->msg. |
| 517 | v1_msg.send_send_buf_complete.section_size; |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 518 | if (net_device->send_section_size < NETVSC_MTU_MIN) { |
| 519 | netdev_err(ndev, "invalid send_section_size %u\n", |
| 520 | net_device->send_section_size); |
| 521 | ret = -EINVAL; |
| 522 | goto cleanup; |
| 523 | } |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 524 | |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 525 | /* Section count is simply the size divided by the section size. */ |
| 526 | net_device->send_section_cnt = buf_size / net_device->send_section_size; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 527 | |
Vitaly Kuznetsov | 93ba222 | 2016-11-28 18:25:44 +0100 | [diff] [blame] | 528 | netdev_dbg(ndev, "Send section size: %d, Section count:%d\n", |
| 529 | net_device->send_section_size, net_device->send_section_cnt); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 530 | |
| 531 | /* Setup state for managing the send buffer. */ |
stephen hemminger | fdfb70d | 2017-04-24 18:33:38 -0700 | [diff] [blame] | 532 | map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 533 | |
stephen hemminger | fdfb70d | 2017-04-24 18:33:38 -0700 | [diff] [blame] | 534 | net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL); |
Wei Yongjun | dd1d3f8 | 2014-07-23 09:00:35 +0800 | [diff] [blame] | 535 | if (net_device->send_section_map == NULL) { |
| 536 | ret = -ENOMEM; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 537 | goto cleanup; |
Wei Yongjun | dd1d3f8 | 2014-07-23 09:00:35 +0800 | [diff] [blame] | 538 | } |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 539 | |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 540 | goto exit; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 541 | |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 542 | cleanup: |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 543 | netvsc_revoke_recv_buf(device, net_device, ndev); |
| 544 | netvsc_revoke_send_buf(device, net_device, ndev); |
| 545 | netvsc_teardown_recv_gpadl(device, net_device, ndev); |
| 546 | netvsc_teardown_send_gpadl(device, net_device, ndev); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 547 | |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 548 | exit: |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 549 | return ret; |
| 550 | } |
| 551 | |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 552 | /* Negotiate NVSP protocol version */ |
| 553 | static int negotiate_nvsp_ver(struct hv_device *device, |
| 554 | struct netvsc_device *net_device, |
| 555 | struct nvsp_message *init_packet, |
| 556 | u32 nvsp_ver) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 557 | { |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 558 | struct net_device *ndev = hv_get_drvdata(device); |
Nicholas Mc Guire | 7390fe9 | 2015-01-25 15:46:31 +0100 | [diff] [blame] | 559 | int ret; |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 560 | |
| 561 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
| 562 | init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; |
| 563 | init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; |
| 564 | init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 565 | trace_nvsp_send(ndev, init_packet); |
| 566 | |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 567 | /* Send the init request */ |
| 568 | ret = vmbus_sendpacket(device->channel, init_packet, |
| 569 | sizeof(struct nvsp_message), |
| 570 | (unsigned long)init_packet, |
| 571 | VM_PKT_DATA_INBAND, |
| 572 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
| 573 | |
| 574 | if (ret != 0) |
| 575 | return ret; |
| 576 | |
Vitaly Kuznetsov | 5362855 | 2016-06-09 12:44:03 +0200 | [diff] [blame] | 577 | wait_for_completion(&net_device->channel_init_wait); |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 578 | |
| 579 | if (init_packet->msg.init_msg.init_complete.status != |
| 580 | NVSP_STAT_SUCCESS) |
| 581 | return -EINVAL; |
| 582 | |
Haiyang Zhang | a1eabb0 | 2014-02-19 15:49:45 -0800 | [diff] [blame] | 583 | if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 584 | return 0; |
| 585 | |
Haiyang Zhang | 71790a2 | 2015-07-24 10:08:40 -0700 | [diff] [blame] | 586 | /* NVSPv2 or later: Send NDIS config */ |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 587 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
| 588 | init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 589 | init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; |
Haiyang Zhang | 1f5f3a7 | 2012-03-12 10:20:50 +0000 | [diff] [blame] | 590 | init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 591 | |
Haiyang Zhang | 7f5d5af | 2016-08-04 10:42:15 -0700 | [diff] [blame] | 592 | if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) { |
Andrea Parri (Microsoft) | 96854bb | 2021-02-01 15:48:14 +0100 | [diff] [blame] | 593 | if (hv_is_isolation_supported()) |
| 594 | netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n"); |
| 595 | else |
| 596 | init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; |
Haiyang Zhang | 71790a2 | 2015-07-24 10:08:40 -0700 | [diff] [blame] | 597 | |
Haiyang Zhang | 7f5d5af | 2016-08-04 10:42:15 -0700 | [diff] [blame] | 598 | /* Teaming bit is needed to receive link speed updates */ |
| 599 | init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; |
| 600 | } |
| 601 | |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 602 | if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61) |
| 603 | init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1; |
| 604 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 605 | trace_nvsp_send(ndev, init_packet); |
| 606 | |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 607 | ret = vmbus_sendpacket(device->channel, init_packet, |
| 608 | sizeof(struct nvsp_message), |
Andres Beltran | 4d18fcc | 2020-11-09 11:04:02 +0100 | [diff] [blame] | 609 | VMBUS_RQST_ID_NO_RESPONSE, |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 610 | VM_PKT_DATA_INBAND, 0); |
| 611 | |
| 612 | return ret; |
| 613 | } |
| 614 | |
stephen hemminger | 9579083 | 2017-06-08 16:21:22 -0700 | [diff] [blame] | 615 | static int netvsc_connect_vsp(struct hv_device *device, |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 616 | struct netvsc_device *net_device, |
| 617 | const struct netvsc_device_info *device_info) |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 618 | { |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 619 | struct net_device *ndev = hv_get_drvdata(device); |
Colin Ian King | 1b17ca0 | 2017-09-22 16:50:23 +0100 | [diff] [blame] | 620 | static const u32 ver_list[] = { |
Stephen Hemminger | e5a78fa | 2016-08-23 12:17:49 -0700 | [diff] [blame] | 621 | NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, |
Haiyang Zhang | 0dcec22 | 2018-04-17 15:31:47 -0700 | [diff] [blame] | 622 | NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5, |
| 623 | NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61 |
stephen hemminger | 9579083 | 2017-06-08 16:21:22 -0700 | [diff] [blame] | 624 | }; |
| 625 | struct nvsp_message *init_packet; |
| 626 | int ndis_version, i, ret; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 627 | |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 628 | init_packet = &net_device->channel_init_pkt; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 629 | |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 630 | /* Negotiate the latest NVSP protocol supported */ |
Stephen Hemminger | e5a78fa | 2016-08-23 12:17:49 -0700 | [diff] [blame] | 631 | for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--) |
Haiyang Zhang | a1eabb0 | 2014-02-19 15:49:45 -0800 | [diff] [blame] | 632 | if (negotiate_nvsp_ver(device, net_device, init_packet, |
| 633 | ver_list[i]) == 0) { |
| 634 | net_device->nvsp_version = ver_list[i]; |
| 635 | break; |
| 636 | } |
| 637 | |
| 638 | if (i < 0) { |
K. Y. Srinivasan | 0f48c72 | 2011-08-25 09:49:14 -0700 | [diff] [blame] | 639 | ret = -EPROTO; |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 640 | goto cleanup; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 641 | } |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 642 | |
Andrea Parri (Microsoft) | 96854bb | 2021-02-01 15:48:14 +0100 | [diff] [blame] | 643 | if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) { |
| 644 | netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n", |
| 645 | net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61); |
| 646 | ret = -EPROTO; |
| 647 | goto cleanup; |
| 648 | } |
| 649 | |
Haiyang Zhang | f157e78 | 2011-12-15 13:45:16 -0800 | [diff] [blame] | 650 | pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); |
| 651 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 652 | /* Send the ndis version */ |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 653 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 654 | |
Haiyang Zhang | a1eabb0 | 2014-02-19 15:49:45 -0800 | [diff] [blame] | 655 | if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) |
KY Srinivasan | 1f73db4 | 2014-04-09 15:00:46 -0700 | [diff] [blame] | 656 | ndis_version = 0x00060001; |
Haiyang Zhang | a1eabb0 | 2014-02-19 15:49:45 -0800 | [diff] [blame] | 657 | else |
| 658 | ndis_version = 0x0006001e; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 659 | |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 660 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; |
| 661 | init_packet->msg.v1_msg. |
| 662 | send_ndis_ver.ndis_major_ver = |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 663 | (ndis_version & 0xFFFF0000) >> 16; |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 664 | init_packet->msg.v1_msg. |
| 665 | send_ndis_ver.ndis_minor_ver = |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 666 | ndis_version & 0xFFFF; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 667 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 668 | trace_nvsp_send(ndev, init_packet); |
| 669 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 670 | /* Send the init request */ |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 671 | ret = vmbus_sendpacket(device->channel, init_packet, |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 672 | sizeof(struct nvsp_message), |
Andres Beltran | 4d18fcc | 2020-11-09 11:04:02 +0100 | [diff] [blame] | 673 | VMBUS_RQST_ID_NO_RESPONSE, |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 674 | VM_PKT_DATA_INBAND, 0); |
K. Y. Srinivasan | 0f48c72 | 2011-08-25 09:49:14 -0700 | [diff] [blame] | 675 | if (ret != 0) |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 676 | goto cleanup; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 677 | |
Haiyang Zhang | 99d3016 | 2014-03-09 16:10:59 -0700 | [diff] [blame] | 678 | |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 679 | ret = netvsc_init_buf(device, net_device, device_info); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 680 | |
K. Y. Srinivasan | 0c3b7b2 | 2011-02-11 09:59:43 -0800 | [diff] [blame] | 681 | cleanup: |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 682 | return ret; |
| 683 | } |
| 684 | |
Hank Janssen | 3e18951 | 2010-03-04 22:11:00 +0000 | [diff] [blame] | 685 | /* |
Haiyang Zhang | 5a71ae3 | 2010-12-10 12:03:55 -0800 | [diff] [blame] | 686 | * netvsc_device_remove - Callback when the root bus device is removed |
Greg Kroah-Hartman | 21a80820 | 2009-09-02 10:33:05 -0700 | [diff] [blame] | 687 | */ |
Stephen Hemminger | e08f3ea | 2016-08-23 12:17:50 -0700 | [diff] [blame] | 688 | void netvsc_device_remove(struct hv_device *device) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 689 | { |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 690 | struct net_device *ndev = hv_get_drvdata(device); |
| 691 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
stephen hemminger | 79e8cbe | 2017-07-19 11:53:13 -0700 | [diff] [blame] | 692 | struct netvsc_device *net_device |
| 693 | = rtnl_dereference(net_device_ctx->nvdev); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 694 | int i; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 695 | |
Mohammed Gamal | a56d99d | 2018-04-05 21:09:20 +0200 | [diff] [blame] | 696 | /* |
| 697 | * Revoke receive buffer. If host is pre-Win2016 then tear down |
| 698 | * receive buffer GPADL. Do the same for send buffer. |
| 699 | */ |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 700 | netvsc_revoke_recv_buf(device, net_device, ndev); |
Mohammed Gamal | a56d99d | 2018-04-05 21:09:20 +0200 | [diff] [blame] | 701 | if (vmbus_proto_version < VERSION_WIN10) |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 702 | netvsc_teardown_recv_gpadl(device, net_device, ndev); |
Mohammed Gamal | a56d99d | 2018-04-05 21:09:20 +0200 | [diff] [blame] | 703 | |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 704 | netvsc_revoke_send_buf(device, net_device, ndev); |
Mohammed Gamal | a56d99d | 2018-04-05 21:09:20 +0200 | [diff] [blame] | 705 | if (vmbus_proto_version < VERSION_WIN10) |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 706 | netvsc_teardown_send_gpadl(device, net_device, ndev); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 707 | |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 708 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
K. Y. Srinivasan | 3852409 | 2011-08-27 11:31:14 -0700 | [diff] [blame] | 709 | |
Andrea Parri (Microsoft) | ac50476 | 2020-04-06 02:15:07 +0200 | [diff] [blame] | 710 | /* Disable NAPI and disassociate its context from the device. */ |
| 711 | for (i = 0; i < net_device->num_chn; i++) { |
| 712 | /* See also vmbus_reset_channel_cb(). */ |
| 713 | napi_disable(&net_device->chan_table[i].napi); |
Stephen Hemminger | 8348e04 | 2018-03-20 15:03:02 -0700 | [diff] [blame] | 714 | netif_napi_del(&net_device->chan_table[i].napi); |
Andrea Parri (Microsoft) | ac50476 | 2020-04-06 02:15:07 +0200 | [diff] [blame] | 715 | } |
Stephen Hemminger | 8348e04 | 2018-03-20 15:03:02 -0700 | [diff] [blame] | 716 | |
K. Y. Srinivasan | 86c921a | 2011-09-13 10:59:54 -0700 | [diff] [blame] | 717 | /* |
| 718 | * At this point, no one should be accessing net_device |
| 719 | * except in here |
| 720 | */ |
Vitaly Kuznetsov | 93ba222 | 2016-11-28 18:25:44 +0100 | [diff] [blame] | 721 | netdev_dbg(ndev, "net device safe to remove\n"); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 722 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 723 | /* Now, we can close the channel safely */ |
Haiyang Zhang | 85799a3 | 2010-12-10 12:03:54 -0800 | [diff] [blame] | 724 | vmbus_close(device->channel); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 725 | |
Mohammed Gamal | a56d99d | 2018-04-05 21:09:20 +0200 | [diff] [blame] | 726 | /* |
| 727 | * If host is Win2016 or higher then we do the GPADL tear down |
| 728 | * here after VMBus is closed. |
| 729 | */ |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 730 | if (vmbus_proto_version >= VERSION_WIN10) { |
Mohammed Gamal | 3f076eff | 2018-04-05 21:09:21 +0200 | [diff] [blame] | 731 | netvsc_teardown_recv_gpadl(device, net_device, ndev); |
| 732 | netvsc_teardown_send_gpadl(device, net_device, ndev); |
Mohammed Gamal | 7992894 | 2018-04-05 21:09:19 +0200 | [diff] [blame] | 733 | } |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 734 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 735 | /* Release all resources */ |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 736 | free_netvsc_device_rcu(net_device); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 737 | } |
| 738 | |
Haiyang Zhang | 33be96e | 2012-03-27 13:20:45 +0000 | [diff] [blame] | 739 | #define RING_AVAIL_PERCENT_HIWATER 20 |
| 740 | #define RING_AVAIL_PERCENT_LOWATER 10 |
| 741 | |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 742 | static inline void netvsc_free_send_slot(struct netvsc_device *net_device, |
| 743 | u32 index) |
| 744 | { |
| 745 | sync_change_bit(index, net_device->send_section_map); |
| 746 | } |
| 747 | |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 748 | static void netvsc_send_tx_complete(struct net_device *ndev, |
| 749 | struct netvsc_device *net_device, |
| 750 | struct vmbus_channel *channel, |
stephen hemminger | f964543 | 2017-04-07 14:41:19 -0400 | [diff] [blame] | 751 | const struct vmpacket_descriptor *desc, |
| 752 | int budget) |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 753 | { |
Simon Xiao | 09af87d | 2017-09-29 11:39:46 -0700 | [diff] [blame] | 754 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
Andres Beltran | 4d18fcc | 2020-11-09 11:04:02 +0100 | [diff] [blame] | 755 | struct sk_buff *skb; |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 756 | u16 q_idx = 0; |
| 757 | int queue_sends; |
Andres Beltran | 4d18fcc | 2020-11-09 11:04:02 +0100 | [diff] [blame] | 758 | u64 cmd_rqst; |
| 759 | |
| 760 | cmd_rqst = vmbus_request_addr(&channel->requestor, (u64)desc->trans_id); |
| 761 | if (cmd_rqst == VMBUS_RQST_ERROR) { |
| 762 | netdev_err(ndev, "Incorrect transaction id\n"); |
| 763 | return; |
| 764 | } |
| 765 | |
| 766 | skb = (struct sk_buff *)(unsigned long)cmd_rqst; |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 767 | |
| 768 | /* Notify the layer above us */ |
| 769 | if (likely(skb)) { |
stephen hemminger | 793e395 | 2017-01-24 13:06:12 -0800 | [diff] [blame] | 770 | const struct hv_netvsc_packet *packet |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 771 | = (struct hv_netvsc_packet *)skb->cb; |
stephen hemminger | 793e395 | 2017-01-24 13:06:12 -0800 | [diff] [blame] | 772 | u32 send_index = packet->send_buf_index; |
| 773 | struct netvsc_stats *tx_stats; |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 774 | |
| 775 | if (send_index != NETVSC_INVALID_INDEX) |
| 776 | netvsc_free_send_slot(net_device, send_index); |
stephen hemminger | 793e395 | 2017-01-24 13:06:12 -0800 | [diff] [blame] | 777 | q_idx = packet->q_idx; |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 778 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 779 | tx_stats = &net_device->chan_table[q_idx].tx_stats; |
stephen hemminger | 793e395 | 2017-01-24 13:06:12 -0800 | [diff] [blame] | 780 | |
| 781 | u64_stats_update_begin(&tx_stats->syncp); |
| 782 | tx_stats->packets += packet->total_packets; |
| 783 | tx_stats->bytes += packet->total_bytes; |
| 784 | u64_stats_update_end(&tx_stats->syncp); |
| 785 | |
stephen hemminger | f964543 | 2017-04-07 14:41:19 -0400 | [diff] [blame] | 786 | napi_consume_skb(skb, budget); |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 787 | } |
| 788 | |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 789 | queue_sends = |
| 790 | atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 791 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 792 | if (unlikely(net_device->destroy)) { |
| 793 | if (queue_sends == 0) |
| 794 | wake_up(&net_device->wait_drain); |
| 795 | } else { |
| 796 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 797 | |
Haiyang Zhang | 1b704c4 | 2019-03-28 19:40:36 +0000 | [diff] [blame] | 798 | if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && |
Long Li | 6b1f837 | 2018-03-27 17:48:39 -0700 | [diff] [blame] | 799 | (hv_get_avail_to_write_percent(&channel->outbound) > |
| 800 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 801 | netif_tx_wake_queue(txq); |
| 802 | ndev_ctx->eth_stats.wake_queue++; |
| 803 | } |
Simon Xiao | 09af87d | 2017-09-29 11:39:46 -0700 | [diff] [blame] | 804 | } |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 805 | } |
| 806 | |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 807 | static void netvsc_send_completion(struct net_device *ndev, |
| 808 | struct netvsc_device *net_device, |
KY Srinivasan | 25b85ee | 2015-12-01 16:43:05 -0800 | [diff] [blame] | 809 | struct vmbus_channel *incoming_channel, |
stephen hemminger | f964543 | 2017-04-07 14:41:19 -0400 | [diff] [blame] | 810 | const struct vmpacket_descriptor *desc, |
| 811 | int budget) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 812 | { |
Long Li | 8b31f8c | 2021-01-08 16:53:42 -0800 | [diff] [blame] | 813 | const struct nvsp_message *nvsp_packet; |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 814 | u32 msglen = hv_pkt_datalen(desc); |
Long Li | 8b31f8c | 2021-01-08 16:53:42 -0800 | [diff] [blame] | 815 | struct nvsp_message *pkt_rqst; |
| 816 | u64 cmd_rqst; |
| 817 | |
| 818 | /* First check if this is a VMBUS completion without data payload */ |
| 819 | if (!msglen) { |
| 820 | cmd_rqst = vmbus_request_addr(&incoming_channel->requestor, |
| 821 | (u64)desc->trans_id); |
| 822 | if (cmd_rqst == VMBUS_RQST_ERROR) { |
| 823 | netdev_err(ndev, "Invalid transaction id\n"); |
| 824 | return; |
| 825 | } |
| 826 | |
| 827 | pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst; |
| 828 | switch (pkt_rqst->hdr.msg_type) { |
| 829 | case NVSP_MSG4_TYPE_SWITCH_DATA_PATH: |
| 830 | complete(&net_device->channel_init_wait); |
| 831 | break; |
| 832 | |
| 833 | default: |
| 834 | netdev_err(ndev, "Unexpected VMBUS completion!!\n"); |
| 835 | } |
| 836 | return; |
| 837 | } |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 838 | |
| 839 | /* Ensure packet is big enough to read header fields */ |
| 840 | if (msglen < sizeof(struct nvsp_message_header)) { |
| 841 | netdev_err(ndev, "nvsp_message length too small: %u\n", msglen); |
| 842 | return; |
| 843 | } |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 844 | |
Long Li | 8b31f8c | 2021-01-08 16:53:42 -0800 | [diff] [blame] | 845 | nvsp_packet = hv_pkt_data(desc); |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 846 | switch (nvsp_packet->hdr.msg_type) { |
| 847 | case NVSP_MSG_TYPE_INIT_COMPLETE: |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 848 | if (msglen < sizeof(struct nvsp_message_header) + |
| 849 | sizeof(struct nvsp_message_init_complete)) { |
| 850 | netdev_err(ndev, "nvsp_msg length too small: %u\n", |
| 851 | msglen); |
| 852 | return; |
| 853 | } |
| 854 | fallthrough; |
| 855 | |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 856 | case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 857 | if (msglen < sizeof(struct nvsp_message_header) + |
| 858 | sizeof(struct nvsp_1_message_send_receive_buffer_complete)) { |
| 859 | netdev_err(ndev, "nvsp_msg1 length too small: %u\n", |
| 860 | msglen); |
| 861 | return; |
| 862 | } |
| 863 | fallthrough; |
| 864 | |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 865 | case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 866 | if (msglen < sizeof(struct nvsp_message_header) + |
| 867 | sizeof(struct nvsp_1_message_send_send_buffer_complete)) { |
| 868 | netdev_err(ndev, "nvsp_msg1 length too small: %u\n", |
| 869 | msglen); |
| 870 | return; |
| 871 | } |
| 872 | fallthrough; |
| 873 | |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 874 | case NVSP_MSG5_TYPE_SUBCHANNEL: |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 875 | if (msglen < sizeof(struct nvsp_message_header) + |
| 876 | sizeof(struct nvsp_5_subchannel_complete)) { |
| 877 | netdev_err(ndev, "nvsp_msg5 length too small: %u\n", |
| 878 | msglen); |
| 879 | return; |
| 880 | } |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 881 | /* Copy the response back */ |
Haiyang Zhang | 53d21fd | 2010-12-10 12:03:59 -0800 | [diff] [blame] | 882 | memcpy(&net_device->channel_init_pkt, nvsp_packet, |
Greg Kroah-Hartman | 21a80820 | 2009-09-02 10:33:05 -0700 | [diff] [blame] | 883 | sizeof(struct nvsp_message)); |
K. Y. Srinivasan | 35abb21 | 2011-05-10 07:55:41 -0700 | [diff] [blame] | 884 | complete(&net_device->channel_init_wait); |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 885 | break; |
Haiyang Zhang | 33be96e | 2012-03-27 13:20:45 +0000 | [diff] [blame] | 886 | |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 887 | case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 888 | netvsc_send_tx_complete(ndev, net_device, incoming_channel, |
| 889 | desc, budget); |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 890 | break; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 891 | |
Stephen Hemminger | bc304dd | 2016-08-23 12:17:53 -0700 | [diff] [blame] | 892 | default: |
| 893 | netdev_err(ndev, |
| 894 | "Unknown send completion type %d received!!\n", |
| 895 | nvsp_packet->hdr.msg_type); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 896 | } |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 897 | } |
| 898 | |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 899 | static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) |
| 900 | { |
stephen hemminger | b58a185 | 2017-01-24 13:06:14 -0800 | [diff] [blame] | 901 | unsigned long *map_addr = net_device->send_section_map; |
| 902 | unsigned int i; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 903 | |
stephen hemminger | fdfb70d | 2017-04-24 18:33:38 -0700 | [diff] [blame] | 904 | for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { |
stephen hemminger | b58a185 | 2017-01-24 13:06:14 -0800 | [diff] [blame] | 905 | if (sync_test_and_set_bit(i, map_addr) == 0) |
| 906 | return i; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 907 | } |
stephen hemminger | b58a185 | 2017-01-24 13:06:14 -0800 | [diff] [blame] | 908 | |
| 909 | return NETVSC_INVALID_INDEX; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 910 | } |
| 911 | |
Stephen Hemminger | 26a1126 | 2017-12-12 16:48:35 -0800 | [diff] [blame] | 912 | static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, |
| 913 | unsigned int section_index, |
| 914 | u32 pend_size, |
| 915 | struct hv_netvsc_packet *packet, |
| 916 | struct rndis_message *rndis_msg, |
| 917 | struct hv_page_buffer *pb, |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 918 | bool xmit_more) |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 919 | { |
| 920 | char *start = net_device->send_buf; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 921 | char *dest = start + (section_index * net_device->send_section_size) |
| 922 | + pend_size; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 923 | int i; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 924 | u32 padding = 0; |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 925 | u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : |
| 926 | packet->page_buf_cnt; |
Stephen Hemminger | b85e06f | 2017-12-01 11:01:46 -0800 | [diff] [blame] | 927 | u32 remain; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 928 | |
| 929 | /* Add padding */ |
Stephen Hemminger | b85e06f | 2017-12-01 11:01:46 -0800 | [diff] [blame] | 930 | remain = packet->total_data_buflen & (net_device->pkt_align - 1); |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 931 | if (xmit_more && remain) { |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 932 | padding = net_device->pkt_align - remain; |
KY Srinivasan | 2447676 | 2015-12-01 16:43:06 -0800 | [diff] [blame] | 933 | rndis_msg->msg_len += padding; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 934 | packet->total_data_buflen += padding; |
| 935 | } |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 936 | |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 937 | for (i = 0; i < page_count; i++) { |
Boqun Feng | 11d8620 | 2020-09-16 11:48:13 +0800 | [diff] [blame] | 938 | char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT); |
stephen hemminger | 02b6de0 | 2017-07-28 08:59:44 -0700 | [diff] [blame] | 939 | u32 offset = pb[i].offset; |
| 940 | u32 len = pb[i].len; |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 941 | |
| 942 | memcpy(dest, (src + offset), len); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 943 | dest += len; |
| 944 | } |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 945 | |
Stephen Hemminger | 26a1126 | 2017-12-12 16:48:35 -0800 | [diff] [blame] | 946 | if (padding) |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 947 | memset(dest, 0, padding); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 948 | } |
| 949 | |
Stephen Hemminger | 3a8963a | 2016-09-09 12:45:24 -0700 | [diff] [blame] | 950 | static inline int netvsc_send_pkt( |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 951 | struct hv_device *device, |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 952 | struct hv_netvsc_packet *packet, |
KY Srinivasan | a9f2e2d | 2015-12-01 16:43:13 -0800 | [diff] [blame] | 953 | struct netvsc_device *net_device, |
stephen hemminger | 02b6de0 | 2017-07-28 08:59:44 -0700 | [diff] [blame] | 954 | struct hv_page_buffer *pb, |
KY Srinivasan | 3a3d9a0 | 2015-12-01 16:43:14 -0800 | [diff] [blame] | 955 | struct sk_buff *skb) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 956 | { |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 957 | struct nvsp_message nvmsg; |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 958 | struct nvsp_1_message_send_rndis_packet *rpkt = |
Joe Perches | 956a25c | 2017-07-31 10:30:54 -0700 | [diff] [blame] | 959 | &nvmsg.msg.v1_msg.send_rndis_pkt; |
| 960 | struct netvsc_channel * const nvchan = |
| 961 | &net_device->chan_table[packet->q_idx]; |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 962 | struct vmbus_channel *out_channel = nvchan->channel; |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 963 | struct net_device *ndev = hv_get_drvdata(device); |
Simon Xiao | 09af87d | 2017-09-29 11:39:46 -0700 | [diff] [blame] | 964 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 965 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 966 | u64 req_id; |
| 967 | int ret; |
Long Li | 6b1f837 | 2018-03-27 17:48:39 -0700 | [diff] [blame] | 968 | u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound); |
KY Srinivasan | c25aaf8 | 2014-04-30 10:14:31 -0700 | [diff] [blame] | 969 | |
Andrea Parri (Microsoft) | 505e3f0 | 2021-01-14 21:26:28 +0100 | [diff] [blame] | 970 | memset(&nvmsg, 0, sizeof(struct nvsp_message)); |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 971 | nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; |
Joe Perches | 956a25c | 2017-07-31 10:30:54 -0700 | [diff] [blame] | 972 | if (skb) |
| 973 | rpkt->channel_type = 0; /* 0 is RMC_DATA */ |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 974 | else |
Joe Perches | 956a25c | 2017-07-31 10:30:54 -0700 | [diff] [blame] | 975 | rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ |
| 976 | |
| 977 | rpkt->send_buf_section_index = packet->send_buf_index; |
| 978 | if (packet->send_buf_index == NETVSC_INVALID_INDEX) |
| 979 | rpkt->send_buf_section_size = 0; |
| 980 | else |
| 981 | rpkt->send_buf_section_size = packet->total_data_buflen; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 982 | |
KY Srinivasan | 3a3d9a0 | 2015-12-01 16:43:14 -0800 | [diff] [blame] | 983 | req_id = (ulong)skb; |
Haiyang Zhang | f1ea3cd | 2013-04-05 11:44:40 +0000 | [diff] [blame] | 984 | |
Haiyang Zhang | c3582a2 | 2014-12-01 13:28:39 -0800 | [diff] [blame] | 985 | if (out_channel->rescind) |
| 986 | return -ENODEV; |
| 987 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 988 | trace_nvsp_send_pkt(ndev, out_channel, rpkt); |
| 989 | |
Haiyang Zhang | 72a2f5b | 2010-12-10 12:03:58 -0800 | [diff] [blame] | 990 | if (packet->page_buf_cnt) { |
stephen hemminger | 02b6de0 | 2017-07-28 08:59:44 -0700 | [diff] [blame] | 991 | if (packet->cp_partial) |
| 992 | pb += packet->rmsg_pgcnt; |
| 993 | |
stephen hemminger | 5a668d8 | 2017-08-16 08:56:25 -0700 | [diff] [blame] | 994 | ret = vmbus_sendpacket_pagebuffer(out_channel, |
| 995 | pb, packet->page_buf_cnt, |
| 996 | &nvmsg, sizeof(nvmsg), |
| 997 | req_id); |
Greg Kroah-Hartman | 21a80820 | 2009-09-02 10:33:05 -0700 | [diff] [blame] | 998 | } else { |
stephen hemminger | 5dd0fb9 | 2017-08-16 08:56:26 -0700 | [diff] [blame] | 999 | ret = vmbus_sendpacket(out_channel, |
| 1000 | &nvmsg, sizeof(nvmsg), |
| 1001 | req_id, VM_PKT_DATA_INBAND, |
| 1002 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1003 | } |
| 1004 | |
Haiyang Zhang | 1d06825 | 2011-12-02 11:56:25 -0800 | [diff] [blame] | 1005 | if (ret == 0) { |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 1006 | atomic_inc_return(&nvchan->queue_sends); |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1007 | |
Simon Xiao | 09af87d | 2017-09-29 11:39:46 -0700 | [diff] [blame] | 1008 | if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 1009 | netif_tx_stop_queue(txq); |
Simon Xiao | 09af87d | 2017-09-29 11:39:46 -0700 | [diff] [blame] | 1010 | ndev_ctx->eth_stats.stop_queue++; |
| 1011 | } |
Haiyang Zhang | 1d06825 | 2011-12-02 11:56:25 -0800 | [diff] [blame] | 1012 | } else if (ret == -EAGAIN) { |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 1013 | netif_tx_stop_queue(txq); |
Simon Xiao | 09af87d | 2017-09-29 11:39:46 -0700 | [diff] [blame] | 1014 | ndev_ctx->eth_stats.stop_queue++; |
Haiyang Zhang | 1d06825 | 2011-12-02 11:56:25 -0800 | [diff] [blame] | 1015 | } else { |
stephen hemminger | 4a2176c | 2017-07-28 08:59:43 -0700 | [diff] [blame] | 1016 | netdev_err(ndev, |
| 1017 | "Unable to send packet pages %u len %u, ret %d\n", |
| 1018 | packet->page_buf_cnt, packet->total_data_buflen, |
| 1019 | ret); |
Haiyang Zhang | 1d06825 | 2011-12-02 11:56:25 -0800 | [diff] [blame] | 1020 | } |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1021 | |
Haiyang Zhang | 93aa479 | 2019-04-30 19:29:07 +0000 | [diff] [blame] | 1022 | if (netif_tx_queue_stopped(txq) && |
| 1023 | atomic_read(&nvchan->queue_sends) < 1 && |
| 1024 | !net_device->tx_disable) { |
| 1025 | netif_tx_wake_queue(txq); |
| 1026 | ndev_ctx->eth_stats.wake_queue++; |
| 1027 | if (ret == -EAGAIN) |
| 1028 | ret = -ENOSPC; |
| 1029 | } |
| 1030 | |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1031 | return ret; |
| 1032 | } |
| 1033 | |
Haiyang Zhang | c85e492 | 2016-01-25 09:49:31 -0800 | [diff] [blame] | 1034 | /* Move packet out of multi send data (msd), and clear msd */ |
| 1035 | static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, |
| 1036 | struct sk_buff **msd_skb, |
| 1037 | struct multi_send_data *msdp) |
| 1038 | { |
| 1039 | *msd_skb = msdp->skb; |
| 1040 | *msd_send = msdp->pkt; |
| 1041 | msdp->skb = NULL; |
| 1042 | msdp->pkt = NULL; |
| 1043 | msdp->count = 0; |
| 1044 | } |
| 1045 | |
stephen hemminger | 2a926f7 | 2017-07-19 11:53:17 -0700 | [diff] [blame] | 1046 | /* RCU already held by caller */ |
Shachar Raindel | bd49fea | 2021-03-12 15:45:27 -0800 | [diff] [blame] | 1047 | /* Batching/bouncing logic is designed to attempt to optimize |
| 1048 | * performance. |
| 1049 | * |
| 1050 | * For small, non-LSO packets we copy the packet to a send buffer |
| 1051 | * which is pre-registered with the Hyper-V side. This enables the |
| 1052 | * hypervisor to avoid remapping the aperture to access the packet |
| 1053 | * descriptor and data. |
| 1054 | * |
| 1055 | * If we already started using a buffer and the netdev is transmitting |
| 1056 | * a burst of packets, keep on copying into the buffer until it is |
| 1057 | * full or we are done collecting a burst. If there is an existing |
| 1058 | * buffer with space for the RNDIS descriptor but not the packet, copy |
| 1059 | * the RNDIS descriptor to the buffer, keeping the packet in place. |
| 1060 | * |
| 1061 | * If we do batching and send more than one packet using a single |
| 1062 | * NetVSC message, free the SKBs of the packets copied, except for the |
| 1063 | * last packet. This is done to streamline the handling of the case |
| 1064 | * where the last packet only had the RNDIS descriptor copied to the |
| 1065 | * send buffer, with the data pointers included in the NetVSC message. |
| 1066 | */ |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 1067 | int netvsc_send(struct net_device *ndev, |
KY Srinivasan | 2447676 | 2015-12-01 16:43:06 -0800 | [diff] [blame] | 1068 | struct hv_netvsc_packet *packet, |
KY Srinivasan | a9f2e2d | 2015-12-01 16:43:13 -0800 | [diff] [blame] | 1069 | struct rndis_message *rndis_msg, |
stephen hemminger | 02b6de0 | 2017-07-28 08:59:44 -0700 | [diff] [blame] | 1070 | struct hv_page_buffer *pb, |
Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 1071 | struct sk_buff *skb, |
| 1072 | bool xdp_tx) |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1073 | { |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 1074 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
stephen hemminger | 3962981 | 2017-07-19 11:53:19 -0700 | [diff] [blame] | 1075 | struct netvsc_device *net_device |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1076 | = rcu_dereference_bh(ndev_ctx->nvdev); |
stephen hemminger | 2a926f7 | 2017-07-19 11:53:17 -0700 | [diff] [blame] | 1077 | struct hv_device *device = ndev_ctx->device_ctx; |
Stephen Hemminger | 6c4c137 | 2016-08-23 12:17:55 -0700 | [diff] [blame] | 1078 | int ret = 0; |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 1079 | struct netvsc_channel *nvchan; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1080 | u32 pktlen = packet->total_data_buflen, msd_len = 0; |
| 1081 | unsigned int section_index = NETVSC_INVALID_INDEX; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1082 | struct multi_send_data *msdp; |
| 1083 | struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; |
Haiyang Zhang | c85e492 | 2016-01-25 09:49:31 -0800 | [diff] [blame] | 1084 | struct sk_buff *msd_skb = NULL; |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 1085 | bool try_batch, xmit_more; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1086 | |
stephen hemminger | 592b4fe | 2017-06-08 16:21:23 -0700 | [diff] [blame] | 1087 | /* If device is rescinded, return error and packet will get dropped. */ |
stephen hemminger | 2a926f7 | 2017-07-19 11:53:17 -0700 | [diff] [blame] | 1088 | if (unlikely(!net_device || net_device->destroy)) |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1089 | return -ENODEV; |
| 1090 | |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 1091 | nvchan = &net_device->chan_table[packet->q_idx]; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1092 | packet->send_buf_index = NETVSC_INVALID_INDEX; |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 1093 | packet->cp_partial = false; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1094 | |
Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 1095 | /* Send a control message or XDP packet directly without accessing |
| 1096 | * msd (Multi-Send Data) field which may be changed during data packet |
| 1097 | * processing. |
Haiyang Zhang | cf8190e | 2015-12-10 12:19:35 -0800 | [diff] [blame] | 1098 | */ |
Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 1099 | if (!skb || xdp_tx) |
Stephen Hemminger | 12f6966 | 2018-03-02 13:49:01 -0800 | [diff] [blame] | 1100 | return netvsc_send_pkt(device, packet, net_device, pb, skb); |
Haiyang Zhang | cf8190e | 2015-12-10 12:19:35 -0800 | [diff] [blame] | 1101 | |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1102 | /* batch packets in send buffer if possible */ |
stephen hemminger | b8b835a | 2017-01-24 13:06:07 -0800 | [diff] [blame] | 1103 | msdp = &nvchan->msd; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1104 | if (msdp->pkt) |
| 1105 | msd_len = msdp->pkt->total_data_buflen; |
| 1106 | |
stephen hemminger | ebc1dcf | 2017-03-22 14:51:04 -0700 | [diff] [blame] | 1107 | try_batch = msd_len > 0 && msdp->count < net_device->max_pkt; |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 1108 | if (try_batch && msd_len + pktlen + net_device->pkt_align < |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1109 | net_device->send_section_size) { |
| 1110 | section_index = msdp->pkt->send_buf_index; |
| 1111 | |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 1112 | } else if (try_batch && msd_len + packet->rmsg_size < |
| 1113 | net_device->send_section_size) { |
| 1114 | section_index = msdp->pkt->send_buf_index; |
| 1115 | packet->cp_partial = true; |
| 1116 | |
stephen hemminger | ebc1dcf | 2017-03-22 14:51:04 -0700 | [diff] [blame] | 1117 | } else if (pktlen + net_device->pkt_align < |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1118 | net_device->send_section_size) { |
| 1119 | section_index = netvsc_get_next_send_section(net_device); |
stephen hemminger | cad5c19 | 2017-08-09 17:46:12 -0700 | [diff] [blame] | 1120 | if (unlikely(section_index == NETVSC_INVALID_INDEX)) { |
| 1121 | ++ndev_ctx->eth_stats.tx_send_full; |
| 1122 | } else { |
Haiyang Zhang | c85e492 | 2016-01-25 09:49:31 -0800 | [diff] [blame] | 1123 | move_pkt_msd(&msd_send, &msd_skb, msdp); |
| 1124 | msd_len = 0; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1125 | } |
| 1126 | } |
| 1127 | |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 1128 | /* Keep aggregating only if stack says more data is coming |
| 1129 | * and not doing mixed modes send and not flow blocked |
| 1130 | */ |
Florian Westphal | 6b16f9e | 2019-04-01 16:42:14 +0200 | [diff] [blame] | 1131 | xmit_more = netdev_xmit_more() && |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 1132 | !packet->cp_partial && |
| 1133 | !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); |
| 1134 | |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1135 | if (section_index != NETVSC_INVALID_INDEX) { |
| 1136 | netvsc_copy_to_send_buf(net_device, |
| 1137 | section_index, msd_len, |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 1138 | packet, rndis_msg, pb, xmit_more); |
KY Srinivasan | b08cc79 | 2015-03-29 21:08:42 -0700 | [diff] [blame] | 1139 | |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1140 | packet->send_buf_index = section_index; |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 1141 | |
| 1142 | if (packet->cp_partial) { |
| 1143 | packet->page_buf_cnt -= packet->rmsg_pgcnt; |
| 1144 | packet->total_data_buflen = msd_len + packet->rmsg_size; |
| 1145 | } else { |
| 1146 | packet->page_buf_cnt = 0; |
| 1147 | packet->total_data_buflen += msd_len; |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 1148 | } |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1149 | |
stephen hemminger | 793e395 | 2017-01-24 13:06:12 -0800 | [diff] [blame] | 1150 | if (msdp->pkt) { |
| 1151 | packet->total_packets += msdp->pkt->total_packets; |
| 1152 | packet->total_bytes += msdp->pkt->total_bytes; |
| 1153 | } |
| 1154 | |
Haiyang Zhang | c85e492 | 2016-01-25 09:49:31 -0800 | [diff] [blame] | 1155 | if (msdp->skb) |
Stephen Hemminger | 17db4bc | 2016-09-22 16:56:29 -0700 | [diff] [blame] | 1156 | dev_consume_skb_any(msdp->skb); |
Haiyang Zhang | ee90b81 | 2015-04-06 15:22:54 -0700 | [diff] [blame] | 1157 | |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 1158 | if (xmit_more) { |
Haiyang Zhang | c85e492 | 2016-01-25 09:49:31 -0800 | [diff] [blame] | 1159 | msdp->skb = skb; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1160 | msdp->pkt = packet; |
| 1161 | msdp->count++; |
| 1162 | } else { |
| 1163 | cur_send = packet; |
Haiyang Zhang | c85e492 | 2016-01-25 09:49:31 -0800 | [diff] [blame] | 1164 | msdp->skb = NULL; |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1165 | msdp->pkt = NULL; |
| 1166 | msdp->count = 0; |
| 1167 | } |
| 1168 | } else { |
Haiyang Zhang | c85e492 | 2016-01-25 09:49:31 -0800 | [diff] [blame] | 1169 | move_pkt_msd(&msd_send, &msd_skb, msdp); |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1170 | cur_send = packet; |
| 1171 | } |
| 1172 | |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1173 | if (msd_send) { |
Stephen Hemminger | 6c4c137 | 2016-08-23 12:17:55 -0700 | [diff] [blame] | 1174 | int m_ret = netvsc_send_pkt(device, msd_send, net_device, |
| 1175 | NULL, msd_skb); |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1176 | |
| 1177 | if (m_ret != 0) { |
| 1178 | netvsc_free_send_slot(net_device, |
| 1179 | msd_send->send_buf_index); |
Haiyang Zhang | c85e492 | 2016-01-25 09:49:31 -0800 | [diff] [blame] | 1180 | dev_kfree_skb_any(msd_skb); |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1181 | } |
| 1182 | } |
| 1183 | |
| 1184 | if (cur_send) |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 1185 | ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); |
Haiyang Zhang | 7c3877f | 2015-03-26 09:03:37 -0700 | [diff] [blame] | 1186 | |
Jerry Snitselaar | 7aab515 | 2015-05-04 10:57:16 -0700 | [diff] [blame] | 1187 | if (ret != 0 && section_index != NETVSC_INVALID_INDEX) |
| 1188 | netvsc_free_send_slot(net_device, section_index); |
Haiyang Zhang | d953ca4 | 2015-01-29 12:34:49 -0800 | [diff] [blame] | 1189 | |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1190 | return ret; |
| 1191 | } |
| 1192 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1193 | /* Send pending recv completions */ |
stephen hemminger | cad5c19 | 2017-08-09 17:46:12 -0700 | [diff] [blame] | 1194 | static int send_recv_completions(struct net_device *ndev, |
| 1195 | struct netvsc_device *nvdev, |
| 1196 | struct netvsc_channel *nvchan) |
Haiyang Zhang | 5fa9d3c | 2011-04-21 12:30:42 -0700 | [diff] [blame] | 1197 | { |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1198 | struct multi_recv_comp *mrc = &nvchan->mrc; |
| 1199 | struct recv_comp_msg { |
| 1200 | struct nvsp_message_header hdr; |
| 1201 | u32 status; |
| 1202 | } __packed; |
| 1203 | struct recv_comp_msg msg = { |
| 1204 | .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, |
| 1205 | }; |
Haiyang Zhang | 5fa9d3c | 2011-04-21 12:30:42 -0700 | [diff] [blame] | 1206 | int ret; |
| 1207 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1208 | while (mrc->first != mrc->next) { |
| 1209 | const struct recv_comp_data *rcd |
| 1210 | = mrc->slots + mrc->first; |
Haiyang Zhang | 5fa9d3c | 2011-04-21 12:30:42 -0700 | [diff] [blame] | 1211 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1212 | msg.status = rcd->status; |
| 1213 | ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), |
| 1214 | rcd->tid, VM_PKT_COMP, 0); |
stephen hemminger | cad5c19 | 2017-08-09 17:46:12 -0700 | [diff] [blame] | 1215 | if (unlikely(ret)) { |
| 1216 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
| 1217 | |
| 1218 | ++ndev_ctx->eth_stats.rx_comp_busy; |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1219 | return ret; |
stephen hemminger | cad5c19 | 2017-08-09 17:46:12 -0700 | [diff] [blame] | 1220 | } |
Haiyang Zhang | 5fa9d3c | 2011-04-21 12:30:42 -0700 | [diff] [blame] | 1221 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1222 | if (++mrc->first == nvdev->recv_completion_cnt) |
| 1223 | mrc->first = 0; |
| 1224 | } |
Haiyang Zhang | 5fa9d3c | 2011-04-21 12:30:42 -0700 | [diff] [blame] | 1225 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1226 | /* receive completion ring has been emptied */ |
| 1227 | if (unlikely(nvdev->destroy)) |
| 1228 | wake_up(&nvdev->wait_drain); |
| 1229 | |
| 1230 | return 0; |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1231 | } |
| 1232 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1233 | /* Count how many receive completions are outstanding */ |
| 1234 | static void recv_comp_slot_avail(const struct netvsc_device *nvdev, |
| 1235 | const struct multi_recv_comp *mrc, |
| 1236 | u32 *filled, u32 *avail) |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1237 | { |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1238 | u32 count = nvdev->recv_completion_cnt; |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1239 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1240 | if (mrc->next >= mrc->first) |
| 1241 | *filled = mrc->next - mrc->first; |
| 1242 | else |
| 1243 | *filled = (count - mrc->first) + mrc->next; |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1244 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1245 | *avail = count - *filled - 1; |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1246 | } |
| 1247 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1248 | /* Add receive complete to ring to send to host. */ |
| 1249 | static void enq_receive_complete(struct net_device *ndev, |
| 1250 | struct netvsc_device *nvdev, u16 q_idx, |
| 1251 | u64 tid, u32 status) |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1252 | { |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1253 | struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; |
| 1254 | struct multi_recv_comp *mrc = &nvchan->mrc; |
| 1255 | struct recv_comp_data *rcd; |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1256 | u32 filled, avail; |
| 1257 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1258 | recv_comp_slot_avail(nvdev, mrc, &filled, &avail); |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1259 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1260 | if (unlikely(filled > NAPI_POLL_WEIGHT)) { |
stephen hemminger | cad5c19 | 2017-08-09 17:46:12 -0700 | [diff] [blame] | 1261 | send_recv_completions(ndev, nvdev, nvchan); |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1262 | recv_comp_slot_avail(nvdev, mrc, &filled, &avail); |
Haiyang Zhang | 5fa9d3c | 2011-04-21 12:30:42 -0700 | [diff] [blame] | 1263 | } |
Haiyang Zhang | 5fa9d3c | 2011-04-21 12:30:42 -0700 | [diff] [blame] | 1264 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1265 | if (unlikely(!avail)) { |
| 1266 | netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", |
| 1267 | q_idx, tid); |
| 1268 | return; |
| 1269 | } |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1270 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1271 | rcd = mrc->slots + mrc->next; |
| 1272 | rcd->tid = tid; |
| 1273 | rcd->status = status; |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1274 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1275 | if (++mrc->next == nvdev->recv_completion_cnt) |
| 1276 | mrc->next = 0; |
Haiyang Zhang | c0b558e | 2016-08-19 14:47:09 -0700 | [diff] [blame] | 1277 | } |
| 1278 | |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1279 | static int netvsc_receive(struct net_device *ndev, |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1280 | struct netvsc_device *net_device, |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 1281 | struct netvsc_channel *nvchan, |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1282 | const struct vmpacket_descriptor *desc) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1283 | { |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 1284 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 1285 | struct vmbus_channel *channel = nvchan->channel; |
stephen hemminger | f3dd3f4 | 2017-02-27 10:26:48 -0800 | [diff] [blame] | 1286 | const struct vmtransfer_page_packet_header *vmxferpage_packet |
| 1287 | = container_of(desc, const struct vmtransfer_page_packet_header, d); |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1288 | const struct nvsp_message *nvsp = hv_pkt_data(desc); |
| 1289 | u32 msglen = hv_pkt_datalen(desc); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1290 | u16 q_idx = channel->offermsg.offer.sub_channel_index; |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 1291 | char *recv_buf = net_device->recv_buf; |
Haiyang Zhang | 4baab26 | 2014-04-21 14:54:43 -0700 | [diff] [blame] | 1292 | u32 status = NVSP_STAT_SUCCESS; |
Haiyang Zhang | 4532634 | 2011-12-15 13:45:15 -0800 | [diff] [blame] | 1293 | int i; |
| 1294 | int count = 0; |
K. Y. Srinivasan | 779b4d1 | 2011-04-26 09:20:22 -0700 | [diff] [blame] | 1295 | |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1296 | /* Ensure packet is big enough to read header fields */ |
| 1297 | if (msglen < sizeof(struct nvsp_message_header)) { |
| 1298 | netif_err(net_device_ctx, rx_err, ndev, |
| 1299 | "invalid nvsp header, length too small: %u\n", |
| 1300 | msglen); |
| 1301 | return 0; |
| 1302 | } |
| 1303 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 1304 | /* Make sure this is a valid nvsp packet */ |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 1305 | if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { |
| 1306 | netif_err(net_device_ctx, rx_err, ndev, |
| 1307 | "Unknown nvsp packet type received %u\n", |
| 1308 | nvsp->hdr.msg_type); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1309 | return 0; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1310 | } |
| 1311 | |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1312 | /* Validate xfer page pkt header */ |
| 1313 | if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) { |
| 1314 | netif_err(net_device_ctx, rx_err, ndev, |
| 1315 | "Invalid xfer page pkt, offset too small: %u\n", |
| 1316 | desc->offset8 << 3); |
| 1317 | return 0; |
| 1318 | } |
| 1319 | |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 1320 | if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { |
| 1321 | netif_err(net_device_ctx, rx_err, ndev, |
| 1322 | "Invalid xfer page set id - expecting %x got %x\n", |
| 1323 | NETVSC_RECEIVE_BUFFER_ID, |
| 1324 | vmxferpage_packet->xfer_pageset_id); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1325 | return 0; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1326 | } |
| 1327 | |
Haiyang Zhang | 4baab26 | 2014-04-21 14:54:43 -0700 | [diff] [blame] | 1328 | count = vmxferpage_packet->range_cnt; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1329 | |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1330 | /* Check count for a valid value */ |
| 1331 | if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) { |
| 1332 | netif_err(net_device_ctx, rx_err, ndev, |
| 1333 | "Range count is not valid: %d\n", |
| 1334 | count); |
| 1335 | return 0; |
| 1336 | } |
| 1337 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 1338 | /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ |
Haiyang Zhang | 4baab26 | 2014-04-21 14:54:43 -0700 | [diff] [blame] | 1339 | for (i = 0; i < count; i++) { |
Haiyang Zhang | c5d24bd | 2018-03-22 12:01:14 -0700 | [diff] [blame] | 1340 | u32 offset = vmxferpage_packet->ranges[i].byte_offset; |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 1341 | u32 buflen = vmxferpage_packet->ranges[i].byte_count; |
Haiyang Zhang | c5d24bd | 2018-03-22 12:01:14 -0700 | [diff] [blame] | 1342 | void *data; |
Haiyang Zhang | 5c71dad | 2018-03-22 12:01:13 -0700 | [diff] [blame] | 1343 | int ret; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1344 | |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1345 | if (unlikely(offset > net_device->recv_buf_size || |
| 1346 | buflen > net_device->recv_buf_size - offset)) { |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 1347 | nvchan->rsc.cnt = 0; |
Haiyang Zhang | c5d24bd | 2018-03-22 12:01:14 -0700 | [diff] [blame] | 1348 | status = NVSP_STAT_FAIL; |
| 1349 | netif_err(net_device_ctx, rx_err, ndev, |
| 1350 | "Packet offset:%u + len:%u too big\n", |
| 1351 | offset, buflen); |
| 1352 | |
| 1353 | continue; |
| 1354 | } |
| 1355 | |
Andrea Parri (Microsoft) | 0ba35fe | 2021-01-26 17:29:07 +0100 | [diff] [blame] | 1356 | /* We're going to copy (sections of) the packet into nvchan->recv_buf; |
| 1357 | * make sure that nvchan->recv_buf is large enough to hold the packet. |
| 1358 | */ |
| 1359 | if (unlikely(buflen > net_device->recv_section_size)) { |
| 1360 | nvchan->rsc.cnt = 0; |
| 1361 | status = NVSP_STAT_FAIL; |
| 1362 | netif_err(net_device_ctx, rx_err, ndev, |
| 1363 | "Packet too big: buflen=%u recv_section_size=%u\n", |
| 1364 | buflen, net_device->recv_section_size); |
| 1365 | |
| 1366 | continue; |
| 1367 | } |
| 1368 | |
Haiyang Zhang | c5d24bd | 2018-03-22 12:01:14 -0700 | [diff] [blame] | 1369 | data = recv_buf + offset; |
| 1370 | |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 1371 | nvchan->rsc.is_last = (i == count - 1); |
| 1372 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 1373 | trace_rndis_recv(ndev, q_idx, data); |
| 1374 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 1375 | /* Pass it to the upper layer */ |
Haiyang Zhang | 5c71dad | 2018-03-22 12:01:13 -0700 | [diff] [blame] | 1376 | ret = rndis_filter_receive(ndev, net_device, |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 1377 | nvchan, data, buflen); |
Haiyang Zhang | 5c71dad | 2018-03-22 12:01:13 -0700 | [diff] [blame] | 1378 | |
Andrea Parri (Microsoft) | 12bc8df | 2021-02-03 12:36:02 +0100 | [diff] [blame] | 1379 | if (unlikely(ret != NVSP_STAT_SUCCESS)) { |
| 1380 | /* Drop incomplete packet */ |
| 1381 | nvchan->rsc.cnt = 0; |
Haiyang Zhang | 5c71dad | 2018-03-22 12:01:13 -0700 | [diff] [blame] | 1382 | status = NVSP_STAT_FAIL; |
Andrea Parri (Microsoft) | 12bc8df | 2021-02-03 12:36:02 +0100 | [diff] [blame] | 1383 | } |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1384 | } |
| 1385 | |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1386 | enq_receive_complete(ndev, net_device, q_idx, |
| 1387 | vmxferpage_packet->d.trans_id, status); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1388 | |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1389 | return count; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1390 | } |
| 1391 | |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 1392 | static void netvsc_send_table(struct net_device *ndev, |
Haiyang Zhang | 171c1fd | 2019-11-21 13:33:41 -0800 | [diff] [blame] | 1393 | struct netvsc_device *nvscdev, |
Haiyang Zhang | 71f2195 | 2019-11-21 13:33:40 -0800 | [diff] [blame] | 1394 | const struct nvsp_message *nvmsg, |
| 1395 | u32 msglen) |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1396 | { |
stephen hemminger | 7ce1012 | 2017-03-09 14:58:29 -0800 | [diff] [blame] | 1397 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
Haiyang Zhang | 71f2195 | 2019-11-21 13:33:40 -0800 | [diff] [blame] | 1398 | u32 count, offset, *tab; |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 1399 | int i; |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1400 | |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1401 | /* Ensure packet is big enough to read send_table fields */ |
| 1402 | if (msglen < sizeof(struct nvsp_message_header) + |
| 1403 | sizeof(struct nvsp_5_send_indirect_table)) { |
| 1404 | netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen); |
| 1405 | return; |
| 1406 | } |
| 1407 | |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1408 | count = nvmsg->msg.v5_msg.send_table.count; |
Haiyang Zhang | 71f2195 | 2019-11-21 13:33:40 -0800 | [diff] [blame] | 1409 | offset = nvmsg->msg.v5_msg.send_table.offset; |
| 1410 | |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1411 | if (count != VRSS_SEND_TAB_SIZE) { |
| 1412 | netdev_err(ndev, "Received wrong send-table size:%u\n", count); |
| 1413 | return; |
| 1414 | } |
| 1415 | |
Haiyang Zhang | 171c1fd | 2019-11-21 13:33:41 -0800 | [diff] [blame] | 1416 | /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be |
| 1417 | * wrong due to a host bug. So fix the offset here. |
| 1418 | */ |
| 1419 | if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 && |
| 1420 | msglen >= sizeof(struct nvsp_message_header) + |
| 1421 | sizeof(union nvsp_6_message_uber) + count * sizeof(u32)) |
| 1422 | offset = sizeof(struct nvsp_message_header) + |
| 1423 | sizeof(union nvsp_6_message_uber); |
| 1424 | |
| 1425 | /* Boundary check for all versions */ |
Andrea Parri (Microsoft) | 505e3f0 | 2021-01-14 21:26:28 +0100 | [diff] [blame] | 1426 | if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) { |
Haiyang Zhang | 71f2195 | 2019-11-21 13:33:40 -0800 | [diff] [blame] | 1427 | netdev_err(ndev, "Received send-table offset too big:%u\n", |
| 1428 | offset); |
| 1429 | return; |
| 1430 | } |
| 1431 | |
| 1432 | tab = (void *)nvmsg + offset; |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1433 | |
| 1434 | for (i = 0; i < count; i++) |
Haiyang Zhang | 39e91cf | 2017-10-13 12:28:04 -0700 | [diff] [blame] | 1435 | net_device_ctx->tx_table[i] = tab[i]; |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1436 | } |
| 1437 | |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 1438 | static void netvsc_send_vf(struct net_device *ndev, |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1439 | const struct nvsp_message *nvmsg, |
| 1440 | u32 msglen) |
Haiyang Zhang | 71790a2 | 2015-07-24 10:08:40 -0700 | [diff] [blame] | 1441 | { |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 1442 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
| 1443 | |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1444 | /* Ensure packet is big enough to read its fields */ |
| 1445 | if (msglen < sizeof(struct nvsp_message_header) + |
| 1446 | sizeof(struct nvsp_4_send_vf_association)) { |
| 1447 | netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen); |
| 1448 | return; |
| 1449 | } |
| 1450 | |
Vitaly Kuznetsov | f9a7da9 | 2016-08-15 17:48:39 +0200 | [diff] [blame] | 1451 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
| 1452 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; |
Stephen Hemminger | 00d7ddb | 2018-09-14 12:54:57 -0700 | [diff] [blame] | 1453 | netdev_info(ndev, "VF slot %u %s\n", |
| 1454 | net_device_ctx->vf_serial, |
| 1455 | net_device_ctx->vf_alloc ? "added" : "removed"); |
Haiyang Zhang | 71790a2 | 2015-07-24 10:08:40 -0700 | [diff] [blame] | 1456 | } |
| 1457 | |
Haiyang Zhang | 71f2195 | 2019-11-21 13:33:40 -0800 | [diff] [blame] | 1458 | static void netvsc_receive_inband(struct net_device *ndev, |
Haiyang Zhang | 171c1fd | 2019-11-21 13:33:41 -0800 | [diff] [blame] | 1459 | struct netvsc_device *nvscdev, |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1460 | const struct vmpacket_descriptor *desc) |
Haiyang Zhang | 71790a2 | 2015-07-24 10:08:40 -0700 | [diff] [blame] | 1461 | { |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1462 | const struct nvsp_message *nvmsg = hv_pkt_data(desc); |
| 1463 | u32 msglen = hv_pkt_datalen(desc); |
| 1464 | |
| 1465 | /* Ensure packet is big enough to read header fields */ |
| 1466 | if (msglen < sizeof(struct nvsp_message_header)) { |
| 1467 | netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen); |
| 1468 | return; |
| 1469 | } |
| 1470 | |
Haiyang Zhang | 71790a2 | 2015-07-24 10:08:40 -0700 | [diff] [blame] | 1471 | switch (nvmsg->hdr.msg_type) { |
| 1472 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: |
Haiyang Zhang | 171c1fd | 2019-11-21 13:33:41 -0800 | [diff] [blame] | 1473 | netvsc_send_table(ndev, nvscdev, nvmsg, msglen); |
Haiyang Zhang | 71790a2 | 2015-07-24 10:08:40 -0700 | [diff] [blame] | 1474 | break; |
| 1475 | |
| 1476 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: |
Andrea Parri (Microsoft) | 96854bb | 2021-02-01 15:48:14 +0100 | [diff] [blame] | 1477 | if (hv_is_isolation_supported()) |
| 1478 | netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n"); |
| 1479 | else |
| 1480 | netvsc_send_vf(ndev, nvmsg, msglen); |
Haiyang Zhang | 71790a2 | 2015-07-24 10:08:40 -0700 | [diff] [blame] | 1481 | break; |
| 1482 | } |
| 1483 | } |
| 1484 | |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1485 | static int netvsc_process_raw_pkt(struct hv_device *device, |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 1486 | struct netvsc_channel *nvchan, |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1487 | struct netvsc_device *net_device, |
| 1488 | struct net_device *ndev, |
stephen hemminger | f964543 | 2017-04-07 14:41:19 -0400 | [diff] [blame] | 1489 | const struct vmpacket_descriptor *desc, |
| 1490 | int budget) |
K. Y. Srinivasan | 99a50bb | 2016-07-05 16:52:46 -0700 | [diff] [blame] | 1491 | { |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 1492 | struct vmbus_channel *channel = nvchan->channel; |
Stephen Hemminger | c347b92 | 2018-04-26 14:34:25 -0700 | [diff] [blame] | 1493 | const struct nvsp_message *nvmsg = hv_pkt_data(desc); |
K. Y. Srinivasan | 99a50bb | 2016-07-05 16:52:46 -0700 | [diff] [blame] | 1494 | |
Stephen Hemminger | ec96638 | 2018-03-16 15:44:28 -0700 | [diff] [blame] | 1495 | trace_nvsp_recv(ndev, channel, nvmsg); |
| 1496 | |
K. Y. Srinivasan | 99a50bb | 2016-07-05 16:52:46 -0700 | [diff] [blame] | 1497 | switch (desc->type) { |
| 1498 | case VM_PKT_COMP: |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1499 | netvsc_send_completion(ndev, net_device, channel, desc, budget); |
K. Y. Srinivasan | 99a50bb | 2016-07-05 16:52:46 -0700 | [diff] [blame] | 1500 | break; |
| 1501 | |
| 1502 | case VM_PKT_DATA_USING_XFER_PAGES: |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1503 | return netvsc_receive(ndev, net_device, nvchan, desc); |
K. Y. Srinivasan | 99a50bb | 2016-07-05 16:52:46 -0700 | [diff] [blame] | 1504 | break; |
| 1505 | |
| 1506 | case VM_PKT_DATA_INBAND: |
Andres Beltran | 4414418 | 2020-09-16 11:47:27 +0200 | [diff] [blame] | 1507 | netvsc_receive_inband(ndev, net_device, desc); |
K. Y. Srinivasan | 99a50bb | 2016-07-05 16:52:46 -0700 | [diff] [blame] | 1508 | break; |
| 1509 | |
| 1510 | default: |
| 1511 | netdev_err(ndev, "unhandled packet type %d, tid %llx\n", |
stephen hemminger | f4f1c23 | 2017-03-22 14:50:57 -0700 | [diff] [blame] | 1512 | desc->type, desc->trans_id); |
K. Y. Srinivasan | 99a50bb | 2016-07-05 16:52:46 -0700 | [diff] [blame] | 1513 | break; |
| 1514 | } |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1515 | |
| 1516 | return 0; |
| 1517 | } |
| 1518 | |
| 1519 | static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel) |
| 1520 | { |
| 1521 | struct vmbus_channel *primary = channel->primary_channel; |
| 1522 | |
| 1523 | return primary ? primary->device_obj : channel->device_obj; |
| 1524 | } |
| 1525 | |
stephen hemminger | 262b7f1 | 2017-03-16 16:12:38 -0700 | [diff] [blame] | 1526 | /* Network processing softirq |
| 1527 | * Process data in incoming ring buffer from host |
| 1528 | * Stops when ring is empty or budget is met or exceeded. |
| 1529 | */ |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1530 | int netvsc_poll(struct napi_struct *napi, int budget) |
| 1531 | { |
| 1532 | struct netvsc_channel *nvchan |
| 1533 | = container_of(napi, struct netvsc_channel, napi); |
stephen hemminger | 35fbbcc | 2017-07-19 11:53:18 -0700 | [diff] [blame] | 1534 | struct netvsc_device *net_device = nvchan->net_device; |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1535 | struct vmbus_channel *channel = nvchan->channel; |
| 1536 | struct hv_device *device = netvsc_channel_to_device(channel); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1537 | struct net_device *ndev = hv_get_drvdata(device); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1538 | int work_done = 0; |
Haiyang Zhang | 6b81b19 | 2018-07-17 17:11:13 +0000 | [diff] [blame] | 1539 | int ret; |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1540 | |
stephen hemminger | f4f1c23 | 2017-03-22 14:50:57 -0700 | [diff] [blame] | 1541 | /* If starting a new interval */ |
| 1542 | if (!nvchan->desc) |
| 1543 | nvchan->desc = hv_pkt_iter_first(channel); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1544 | |
stephen hemminger | f4f1c23 | 2017-03-22 14:50:57 -0700 | [diff] [blame] | 1545 | while (nvchan->desc && work_done < budget) { |
Haiyang Zhang | c8e4eff | 2018-09-21 18:20:35 +0000 | [diff] [blame] | 1546 | work_done += netvsc_process_raw_pkt(device, nvchan, net_device, |
stephen hemminger | f964543 | 2017-04-07 14:41:19 -0400 | [diff] [blame] | 1547 | ndev, nvchan->desc, budget); |
stephen hemminger | f4f1c23 | 2017-03-22 14:50:57 -0700 | [diff] [blame] | 1548 | nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1549 | } |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1550 | |
Haiyang Zhang | 6b81b19 | 2018-07-17 17:11:13 +0000 | [diff] [blame] | 1551 | /* Send any pending receive completions */ |
| 1552 | ret = send_recv_completions(ndev, net_device, nvchan); |
| 1553 | |
| 1554 | /* If it did not exhaust NAPI budget this time |
| 1555 | * and not doing busy poll |
stephen hemminger | f4e4036 | 2017-07-28 08:59:47 -0700 | [diff] [blame] | 1556 | * then re-enable host interrupts |
Haiyang Zhang | 6b81b19 | 2018-07-17 17:11:13 +0000 | [diff] [blame] | 1557 | * and reschedule if ring is not empty |
| 1558 | * or sending receive completion failed. |
stephen hemminger | 262b7f1 | 2017-03-16 16:12:38 -0700 | [diff] [blame] | 1559 | */ |
Haiyang Zhang | 6b81b19 | 2018-07-17 17:11:13 +0000 | [diff] [blame] | 1560 | if (work_done < budget && |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1561 | napi_complete_done(napi, work_done) && |
Haiyang Zhang | 6b81b19 | 2018-07-17 17:11:13 +0000 | [diff] [blame] | 1562 | (ret || hv_end_read(&channel->inbound)) && |
Stephen Hemminger | d64e38a | 2018-03-02 13:49:05 -0800 | [diff] [blame] | 1563 | napi_schedule_prep(napi)) { |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1564 | hv_begin_read(&channel->inbound); |
Stephen Hemminger | d64e38a | 2018-03-02 13:49:05 -0800 | [diff] [blame] | 1565 | __napi_schedule(napi); |
stephen hemminger | 7426b1a | 2017-07-28 08:59:45 -0700 | [diff] [blame] | 1566 | } |
stephen hemminger | f4f1c23 | 2017-03-22 14:50:57 -0700 | [diff] [blame] | 1567 | |
| 1568 | /* Driver may overshoot since multiple packets per descriptor */ |
| 1569 | return min(work_done, budget); |
K. Y. Srinivasan | 99a50bb | 2016-07-05 16:52:46 -0700 | [diff] [blame] | 1570 | } |
| 1571 | |
stephen hemminger | 262b7f1 | 2017-03-16 16:12:38 -0700 | [diff] [blame] | 1572 | /* Call back when data is available in host ring buffer. |
| 1573 | * Processing is deferred until network softirq (NAPI) |
| 1574 | */ |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1575 | void netvsc_channel_cb(void *context) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1576 | { |
stephen hemminger | 6de38af | 2017-03-16 16:12:37 -0700 | [diff] [blame] | 1577 | struct netvsc_channel *nvchan = context; |
stephen hemminger | 43bf99c | 2017-07-24 10:57:27 -0700 | [diff] [blame] | 1578 | struct vmbus_channel *channel = nvchan->channel; |
| 1579 | struct hv_ring_buffer_info *rbi = &channel->inbound; |
| 1580 | |
| 1581 | /* preload first vmpacket descriptor */ |
| 1582 | prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); |
stephen hemminger | 0b307eb | 2017-01-24 13:05:58 -0800 | [diff] [blame] | 1583 | |
stephen hemminger | f4f1c23 | 2017-03-22 14:50:57 -0700 | [diff] [blame] | 1584 | if (napi_schedule_prep(&nvchan->napi)) { |
Adrian Vladu | 52d3b49 | 2019-01-03 19:43:08 +0000 | [diff] [blame] | 1585 | /* disable interrupts from host */ |
stephen hemminger | 43bf99c | 2017-07-24 10:57:27 -0700 | [diff] [blame] | 1586 | hv_begin_read(rbi); |
stephen hemminger | 0d6dd35 | 2017-03-09 15:04:14 -0800 | [diff] [blame] | 1587 | |
Stephen Hemminger | 68633ed | 2018-03-02 13:49:06 -0800 | [diff] [blame] | 1588 | __napi_schedule_irqoff(&nvchan->napi); |
stephen hemminger | f4f1c23 | 2017-03-22 14:50:57 -0700 | [diff] [blame] | 1589 | } |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1590 | } |
Haiyang Zhang | af24ce4 | 2011-04-21 12:30:40 -0700 | [diff] [blame] | 1591 | |
| 1592 | /* |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1593 | * netvsc_device_add - Callback when the device belonging to this |
| 1594 | * driver is added |
| 1595 | */ |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 1596 | struct netvsc_device *netvsc_device_add(struct hv_device *device, |
| 1597 | const struct netvsc_device_info *device_info) |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1598 | { |
Vitaly Kuznetsov | 8809883 | 2016-05-13 13:55:25 +0200 | [diff] [blame] | 1599 | int i, ret = 0; |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1600 | struct netvsc_device *net_device; |
Vitaly Kuznetsov | 8809883 | 2016-05-13 13:55:25 +0200 | [diff] [blame] | 1601 | struct net_device *ndev = hv_get_drvdata(device); |
| 1602 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1603 | |
Vitaly Kuznetsov | 8809883 | 2016-05-13 13:55:25 +0200 | [diff] [blame] | 1604 | net_device = alloc_net_device(); |
Dan Carpenter | b1c8492 | 2014-09-04 14:11:23 +0300 | [diff] [blame] | 1605 | if (!net_device) |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 1606 | return ERR_PTR(-ENOMEM); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1607 | |
Haiyang Zhang | 6b0cbe3 | 2017-10-13 12:28:05 -0700 | [diff] [blame] | 1608 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) |
| 1609 | net_device_ctx->tx_table[i] = 0; |
| 1610 | |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1611 | /* Because the device uses NAPI, all the interrupt batching and |
| 1612 | * control is done via Net softirq, not the channel handling |
| 1613 | */ |
| 1614 | set_channel_read_mode(device->channel, HV_CALL_ISR); |
| 1615 | |
K. Y. Srinivasan | bffb184 | 2017-04-06 14:59:21 -0700 | [diff] [blame] | 1616 | /* If we're reopening the device we may have multiple queues, fill the |
| 1617 | * chn_table with the default channel to use it before subchannels are |
| 1618 | * opened. |
| 1619 | * Initialize the channel state before we open; |
| 1620 | * we can be interrupted as soon as we open the channel. |
| 1621 | */ |
| 1622 | |
| 1623 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { |
| 1624 | struct netvsc_channel *nvchan = &net_device->chan_table[i]; |
| 1625 | |
| 1626 | nvchan->channel = device->channel; |
stephen hemminger | 35fbbcc | 2017-07-19 11:53:18 -0700 | [diff] [blame] | 1627 | nvchan->net_device = net_device; |
Florian Fainelli | 4a0dee1 | 2017-08-01 12:11:12 -0700 | [diff] [blame] | 1628 | u64_stats_init(&nvchan->tx_stats.syncp); |
| 1629 | u64_stats_init(&nvchan->rx_stats.syncp); |
Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 1630 | |
Björn Töpel | b02e5a0 | 2020-11-30 19:52:01 +0100 | [diff] [blame] | 1631 | ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0); |
Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 1632 | |
| 1633 | if (ret) { |
| 1634 | netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret); |
| 1635 | goto cleanup2; |
| 1636 | } |
| 1637 | |
| 1638 | ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq, |
| 1639 | MEM_TYPE_PAGE_SHARED, NULL); |
| 1640 | |
| 1641 | if (ret) { |
| 1642 | netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret); |
| 1643 | goto cleanup2; |
| 1644 | } |
K. Y. Srinivasan | bffb184 | 2017-04-06 14:59:21 -0700 | [diff] [blame] | 1645 | } |
| 1646 | |
stephen hemminger | 2be0f26 | 2017-05-03 16:59:21 -0700 | [diff] [blame] | 1647 | /* Enable NAPI handler before init callbacks */ |
| 1648 | netif_napi_add(ndev, &net_device->chan_table[0].napi, |
| 1649 | netvsc_poll, NAPI_POLL_WEIGHT); |
| 1650 | |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1651 | /* Open the channel */ |
Andres Beltran | 4d18fcc | 2020-11-09 11:04:02 +0100 | [diff] [blame] | 1652 | device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes); |
Stephen Hemminger | a7f99d0 | 2017-12-01 11:01:47 -0800 | [diff] [blame] | 1653 | ret = vmbus_open(device->channel, netvsc_ring_bytes, |
| 1654 | netvsc_ring_bytes, NULL, 0, |
| 1655 | netvsc_channel_cb, net_device->chan_table); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1656 | |
| 1657 | if (ret != 0) { |
Haiyang Zhang | d987115 | 2011-09-01 12:19:41 -0700 | [diff] [blame] | 1658 | netdev_err(ndev, "unable to open channel: %d\n", ret); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1659 | goto cleanup; |
| 1660 | } |
| 1661 | |
| 1662 | /* Channel is opened */ |
Vitaly Kuznetsov | 93ba222 | 2016-11-28 18:25:44 +0100 | [diff] [blame] | 1663 | netdev_dbg(ndev, "hv_netvsc channel opened successfully\n"); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1664 | |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1665 | napi_enable(&net_device->chan_table[0].napi); |
Vitaly Kuznetsov | 8809883 | 2016-05-13 13:55:25 +0200 | [diff] [blame] | 1666 | |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1667 | /* Connect with the NetVsp */ |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1668 | ret = netvsc_connect_vsp(device, net_device, device_info); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1669 | if (ret != 0) { |
Haiyang Zhang | d987115 | 2011-09-01 12:19:41 -0700 | [diff] [blame] | 1670 | netdev_err(ndev, |
Haiyang Zhang | c909ebb | 2011-09-01 12:19:40 -0700 | [diff] [blame] | 1671 | "unable to connect to NetVSP - %d\n", ret); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1672 | goto close; |
| 1673 | } |
| 1674 | |
Stephen Hemminger | 12f6966 | 2018-03-02 13:49:01 -0800 | [diff] [blame] | 1675 | /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is |
| 1676 | * populated. |
| 1677 | */ |
| 1678 | rcu_assign_pointer(net_device_ctx->nvdev, net_device); |
| 1679 | |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 1680 | return net_device; |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1681 | |
| 1682 | close: |
stephen hemminger | 4939334 | 2017-07-28 08:59:46 -0700 | [diff] [blame] | 1683 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
| 1684 | napi_disable(&net_device->chan_table[0].napi); |
stephen hemminger | 15a863b | 2017-02-27 10:26:49 -0800 | [diff] [blame] | 1685 | |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1686 | /* Now, we can close the channel safely */ |
| 1687 | vmbus_close(device->channel); |
| 1688 | |
| 1689 | cleanup: |
Stephen Hemminger | fcfb4a0 | 2018-03-02 13:49:03 -0800 | [diff] [blame] | 1690 | netif_napi_del(&net_device->chan_table[0].napi); |
Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 1691 | |
| 1692 | cleanup2: |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1693 | free_netvsc_device(&net_device->rcu); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1694 | |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 1695 | return ERR_PTR(ret); |
Haiyang Zhang | b637e02 | 2011-04-21 12:30:45 -0700 | [diff] [blame] | 1696 | } |