Haiyang Zhang | 351e158 | 2020-01-23 13:52:34 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright (c) 2019, Microsoft Corporation. |
| 3 | * |
| 4 | * Author: |
| 5 | * Haiyang Zhang <haiyangz@microsoft.com> |
| 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
| 10 | #include <linux/netdevice.h> |
| 11 | #include <linux/etherdevice.h> |
| 12 | #include <linux/ethtool.h> |
| 13 | #include <linux/bpf.h> |
| 14 | #include <linux/bpf_trace.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <net/xdp.h> |
| 17 | |
| 18 | #include <linux/mutex.h> |
| 19 | #include <linux/rtnetlink.h> |
| 20 | |
| 21 | #include "hyperv_net.h" |
| 22 | |
| 23 | u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, |
| 24 | struct xdp_buff *xdp) |
| 25 | { |
| 26 | void *data = nvchan->rsc.data[0]; |
| 27 | u32 len = nvchan->rsc.len[0]; |
| 28 | struct page *page = NULL; |
| 29 | struct bpf_prog *prog; |
| 30 | u32 act = XDP_PASS; |
| 31 | |
| 32 | xdp->data_hard_start = NULL; |
| 33 | |
| 34 | rcu_read_lock(); |
| 35 | prog = rcu_dereference(nvchan->bpf_prog); |
| 36 | |
| 37 | if (!prog) |
| 38 | goto out; |
| 39 | |
| 40 | /* allocate page buffer for data */ |
| 41 | page = alloc_page(GFP_ATOMIC); |
| 42 | if (!page) { |
| 43 | act = XDP_DROP; |
| 44 | goto out; |
| 45 | } |
| 46 | |
| 47 | xdp->data_hard_start = page_address(page); |
| 48 | xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM; |
| 49 | xdp_set_data_meta_invalid(xdp); |
| 50 | xdp->data_end = xdp->data + len; |
| 51 | xdp->rxq = &nvchan->xdp_rxq; |
| 52 | xdp->handle = 0; |
| 53 | |
| 54 | memcpy(xdp->data, data, len); |
| 55 | |
| 56 | act = bpf_prog_run_xdp(prog, xdp); |
| 57 | |
| 58 | switch (act) { |
| 59 | case XDP_PASS: |
| 60 | case XDP_TX: |
| 61 | case XDP_DROP: |
| 62 | break; |
| 63 | |
| 64 | case XDP_ABORTED: |
| 65 | trace_xdp_exception(ndev, prog, act); |
| 66 | break; |
| 67 | |
| 68 | default: |
| 69 | bpf_warn_invalid_xdp_action(act); |
| 70 | } |
| 71 | |
| 72 | out: |
| 73 | rcu_read_unlock(); |
| 74 | |
| 75 | if (page && act != XDP_PASS && act != XDP_TX) { |
| 76 | __free_page(page); |
| 77 | xdp->data_hard_start = NULL; |
| 78 | } |
| 79 | |
| 80 | return act; |
| 81 | } |
| 82 | |
| 83 | unsigned int netvsc_xdp_fraglen(unsigned int len) |
| 84 | { |
| 85 | return SKB_DATA_ALIGN(len) + |
| 86 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 87 | } |
| 88 | |
| 89 | struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev) |
| 90 | { |
| 91 | return rtnl_dereference(nvdev->chan_table[0].bpf_prog); |
| 92 | } |
| 93 | |
| 94 | int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
| 95 | struct netlink_ext_ack *extack, |
| 96 | struct netvsc_device *nvdev) |
| 97 | { |
| 98 | struct bpf_prog *old_prog; |
| 99 | int buf_max, i; |
| 100 | |
| 101 | old_prog = netvsc_xdp_get(nvdev); |
| 102 | |
| 103 | if (!old_prog && !prog) |
| 104 | return 0; |
| 105 | |
| 106 | buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN); |
| 107 | if (prog && buf_max > PAGE_SIZE) { |
| 108 | netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n", |
| 109 | dev->mtu, buf_max); |
| 110 | NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large"); |
| 111 | |
| 112 | return -EOPNOTSUPP; |
| 113 | } |
| 114 | |
| 115 | if (prog && (dev->features & NETIF_F_LRO)) { |
| 116 | netdev_err(dev, "XDP: not support LRO\n"); |
| 117 | NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO"); |
| 118 | |
| 119 | return -EOPNOTSUPP; |
| 120 | } |
| 121 | |
| 122 | if (prog) |
| 123 | bpf_prog_add(prog, nvdev->num_chn); |
| 124 | |
| 125 | for (i = 0; i < nvdev->num_chn; i++) |
| 126 | rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog); |
| 127 | |
| 128 | if (old_prog) |
| 129 | for (i = 0; i < nvdev->num_chn; i++) |
| 130 | bpf_prog_put(old_prog); |
| 131 | |
| 132 | return 0; |
| 133 | } |
| 134 | |
| 135 | int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) |
| 136 | { |
| 137 | struct netdev_bpf xdp; |
| 138 | bpf_op_t ndo_bpf; |
| 139 | |
| 140 | ASSERT_RTNL(); |
| 141 | |
| 142 | if (!vf_netdev) |
| 143 | return 0; |
| 144 | |
| 145 | ndo_bpf = vf_netdev->netdev_ops->ndo_bpf; |
| 146 | if (!ndo_bpf) |
| 147 | return 0; |
| 148 | |
| 149 | memset(&xdp, 0, sizeof(xdp)); |
| 150 | |
| 151 | xdp.command = XDP_SETUP_PROG; |
| 152 | xdp.prog = prog; |
| 153 | |
| 154 | return ndo_bpf(vf_netdev, &xdp); |
| 155 | } |
| 156 | |
| 157 | static u32 netvsc_xdp_query(struct netvsc_device *nvdev) |
| 158 | { |
| 159 | struct bpf_prog *prog = netvsc_xdp_get(nvdev); |
| 160 | |
| 161 | if (prog) |
| 162 | return prog->aux->id; |
| 163 | |
| 164 | return 0; |
| 165 | } |
| 166 | |
| 167 | int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) |
| 168 | { |
| 169 | struct net_device_context *ndevctx = netdev_priv(dev); |
| 170 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
| 171 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); |
| 172 | struct netlink_ext_ack *extack = bpf->extack; |
| 173 | int ret; |
| 174 | |
| 175 | if (!nvdev || nvdev->destroy) { |
| 176 | if (bpf->command == XDP_QUERY_PROG) { |
| 177 | bpf->prog_id = 0; |
| 178 | return 0; /* Query must always succeed */ |
| 179 | } else { |
| 180 | return -ENODEV; |
| 181 | } |
| 182 | } |
| 183 | |
| 184 | switch (bpf->command) { |
| 185 | case XDP_SETUP_PROG: |
| 186 | ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev); |
| 187 | |
| 188 | if (ret) |
| 189 | return ret; |
| 190 | |
| 191 | ret = netvsc_vf_setxdp(vf_netdev, bpf->prog); |
| 192 | |
| 193 | if (ret) { |
| 194 | netdev_err(dev, "vf_setxdp failed:%d\n", ret); |
| 195 | NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed"); |
| 196 | |
| 197 | netvsc_xdp_set(dev, NULL, extack, nvdev); |
| 198 | } |
| 199 | |
| 200 | return ret; |
| 201 | |
| 202 | case XDP_QUERY_PROG: |
| 203 | bpf->prog_id = netvsc_xdp_query(nvdev); |
| 204 | return 0; |
| 205 | |
| 206 | default: |
| 207 | return -EINVAL; |
| 208 | } |
| 209 | } |