Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* XDP user-space packet buffer |
| 3 | * Copyright(c) 2018 Intel Corporation. |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/init.h> |
| 7 | #include <linux/sched/mm.h> |
| 8 | #include <linux/sched/signal.h> |
| 9 | #include <linux/sched/task.h> |
| 10 | #include <linux/uaccess.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/bpf.h> |
| 13 | #include <linux/mm.h> |
Jakub Kicinski | 84c6b86 | 2018-07-30 20:43:53 -0700 | [diff] [blame] | 14 | #include <linux/netdevice.h> |
| 15 | #include <linux/rtnetlink.h> |
Björn Töpel | 50e74c0 | 2019-01-24 19:59:38 +0100 | [diff] [blame] | 16 | #include <linux/idr.h> |
Ivan Khoronzhuk | 624676e | 2019-08-15 15:13:55 +0300 | [diff] [blame] | 17 | #include <linux/vmalloc.h> |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 18 | |
| 19 | #include "xdp_umem.h" |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 20 | #include "xsk_queue.h" |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 21 | |
Björn Töpel | bbff2f3 | 2018-06-04 13:57:13 +0200 | [diff] [blame] | 22 | #define XDP_UMEM_MIN_CHUNK_SIZE 2048 |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 23 | |
Björn Töpel | 50e74c0 | 2019-01-24 19:59:38 +0100 | [diff] [blame] | 24 | static DEFINE_IDA(umem_ida); |
| 25 | |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 26 | void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) |
| 27 | { |
| 28 | unsigned long flags; |
| 29 | |
Magnus Karlsson | 2afd23f | 2019-10-21 10:16:58 +0200 | [diff] [blame] | 30 | if (!xs->tx) |
| 31 | return; |
| 32 | |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 33 | spin_lock_irqsave(&umem->xsk_list_lock, flags); |
| 34 | list_add_rcu(&xs->list, &umem->xsk_list); |
| 35 | spin_unlock_irqrestore(&umem->xsk_list_lock, flags); |
| 36 | } |
| 37 | |
| 38 | void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) |
| 39 | { |
| 40 | unsigned long flags; |
| 41 | |
Magnus Karlsson | 2afd23f | 2019-10-21 10:16:58 +0200 | [diff] [blame] | 42 | if (!xs->tx) |
| 43 | return; |
| 44 | |
Björn Töpel | 541d7fd | 2018-10-05 13:25:15 +0200 | [diff] [blame] | 45 | spin_lock_irqsave(&umem->xsk_list_lock, flags); |
| 46 | list_del_rcu(&xs->list); |
| 47 | spin_unlock_irqrestore(&umem->xsk_list_lock, flags); |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 48 | } |
| 49 | |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 50 | /* The umem is stored both in the _rx struct and the _tx struct as we do |
| 51 | * not know if the device has more tx queues than rx, or the opposite. |
| 52 | * This might also change during run time. |
| 53 | */ |
Krzysztof Kazimierczak | cc5b5d35 | 2019-01-10 20:29:02 +0100 | [diff] [blame] | 54 | static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, |
| 55 | u16 queue_id) |
Jakub Kicinski | 84c6b86 | 2018-07-30 20:43:53 -0700 | [diff] [blame] | 56 | { |
Krzysztof Kazimierczak | cc5b5d35 | 2019-01-10 20:29:02 +0100 | [diff] [blame] | 57 | if (queue_id >= max_t(unsigned int, |
| 58 | dev->real_num_rx_queues, |
| 59 | dev->real_num_tx_queues)) |
| 60 | return -EINVAL; |
| 61 | |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 62 | if (queue_id < dev->real_num_rx_queues) |
| 63 | dev->_rx[queue_id].umem = umem; |
| 64 | if (queue_id < dev->real_num_tx_queues) |
| 65 | dev->_tx[queue_id].umem = umem; |
Krzysztof Kazimierczak | cc5b5d35 | 2019-01-10 20:29:02 +0100 | [diff] [blame] | 66 | |
| 67 | return 0; |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 68 | } |
Jakub Kicinski | 84c6b86 | 2018-07-30 20:43:53 -0700 | [diff] [blame] | 69 | |
Jakub Kicinski | 1661d34 | 2018-10-01 14:51:36 +0200 | [diff] [blame] | 70 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, |
| 71 | u16 queue_id) |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 72 | { |
| 73 | if (queue_id < dev->real_num_rx_queues) |
| 74 | return dev->_rx[queue_id].umem; |
| 75 | if (queue_id < dev->real_num_tx_queues) |
| 76 | return dev->_tx[queue_id].umem; |
Jakub Kicinski | 84c6b86 | 2018-07-30 20:43:53 -0700 | [diff] [blame] | 77 | |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 78 | return NULL; |
| 79 | } |
Jan Sokolowski | 5f4f3b2 | 2018-12-18 13:45:13 +0000 | [diff] [blame] | 80 | EXPORT_SYMBOL(xdp_get_umem_from_qid); |
Jakub Kicinski | 84c6b86 | 2018-07-30 20:43:53 -0700 | [diff] [blame] | 81 | |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 82 | static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id) |
| 83 | { |
Magnus Karlsson | a41b4f3 | 2018-10-01 14:51:37 +0200 | [diff] [blame] | 84 | if (queue_id < dev->real_num_rx_queues) |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 85 | dev->_rx[queue_id].umem = NULL; |
Magnus Karlsson | a41b4f3 | 2018-10-01 14:51:37 +0200 | [diff] [blame] | 86 | if (queue_id < dev->real_num_tx_queues) |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 87 | dev->_tx[queue_id].umem = NULL; |
Jakub Kicinski | 84c6b86 | 2018-07-30 20:43:53 -0700 | [diff] [blame] | 88 | } |
| 89 | |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 90 | int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 91 | u16 queue_id, u16 flags) |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 92 | { |
| 93 | bool force_zc, force_copy; |
| 94 | struct netdev_bpf bpf; |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 95 | int err = 0; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 96 | |
Ilya Maximets | 5464c3a | 2019-07-08 14:03:44 +0300 | [diff] [blame] | 97 | ASSERT_RTNL(); |
| 98 | |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 99 | force_zc = flags & XDP_ZEROCOPY; |
| 100 | force_copy = flags & XDP_COPY; |
| 101 | |
| 102 | if (force_zc && force_copy) |
| 103 | return -EINVAL; |
| 104 | |
Ilya Maximets | 5464c3a | 2019-07-08 14:03:44 +0300 | [diff] [blame] | 105 | if (xdp_get_umem_from_qid(dev, queue_id)) |
| 106 | return -EBUSY; |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 107 | |
Krzysztof Kazimierczak | cc5b5d35 | 2019-01-10 20:29:02 +0100 | [diff] [blame] | 108 | err = xdp_reg_umem_at_qid(dev, umem, queue_id); |
| 109 | if (err) |
Ilya Maximets | 5464c3a | 2019-07-08 14:03:44 +0300 | [diff] [blame] | 110 | return err; |
Krzysztof Kazimierczak | cc5b5d35 | 2019-01-10 20:29:02 +0100 | [diff] [blame] | 111 | |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 112 | umem->dev = dev; |
| 113 | umem->queue_id = queue_id; |
Ilya Maximets | 162c820 | 2019-06-28 11:04:06 +0300 | [diff] [blame] | 114 | |
Magnus Karlsson | 77cd0d7 | 2019-08-14 09:27:17 +0200 | [diff] [blame] | 115 | if (flags & XDP_USE_NEED_WAKEUP) { |
| 116 | umem->flags |= XDP_UMEM_USES_NEED_WAKEUP; |
| 117 | /* Tx needs to be explicitly woken up the first time. |
| 118 | * Also for supporting drivers that do not implement this |
| 119 | * feature. They will always have to call sendto(). |
| 120 | */ |
| 121 | xsk_set_tx_need_wakeup(umem); |
| 122 | } |
| 123 | |
Ilya Maximets | 162c820 | 2019-06-28 11:04:06 +0300 | [diff] [blame] | 124 | dev_hold(dev); |
| 125 | |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 126 | if (force_copy) |
| 127 | /* For copy-mode, we are done. */ |
Ilya Maximets | 5464c3a | 2019-07-08 14:03:44 +0300 | [diff] [blame] | 128 | return 0; |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 129 | |
Magnus Karlsson | 9116e5e | 2019-08-14 09:27:16 +0200 | [diff] [blame] | 130 | if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) { |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 131 | err = -EOPNOTSUPP; |
| 132 | goto err_unreg_umem; |
Jakub Kicinski | 84c6b86 | 2018-07-30 20:43:53 -0700 | [diff] [blame] | 133 | } |
Jakub Kicinski | f734607 | 2018-07-30 20:43:52 -0700 | [diff] [blame] | 134 | |
| 135 | bpf.command = XDP_SETUP_XSK_UMEM; |
| 136 | bpf.xsk.umem = umem; |
| 137 | bpf.xsk.queue_id = queue_id; |
| 138 | |
Jakub Kicinski | f734607 | 2018-07-30 20:43:52 -0700 | [diff] [blame] | 139 | err = dev->netdev_ops->ndo_bpf(dev, &bpf); |
Jakub Kicinski | f734607 | 2018-07-30 20:43:52 -0700 | [diff] [blame] | 140 | if (err) |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 141 | goto err_unreg_umem; |
Jakub Kicinski | f734607 | 2018-07-30 20:43:52 -0700 | [diff] [blame] | 142 | |
Jakub Kicinski | f734607 | 2018-07-30 20:43:52 -0700 | [diff] [blame] | 143 | umem->zc = true; |
| 144 | return 0; |
Jakub Kicinski | 84c6b86 | 2018-07-30 20:43:53 -0700 | [diff] [blame] | 145 | |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 146 | err_unreg_umem: |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 147 | if (!force_zc) |
| 148 | err = 0; /* fallback to copy mode */ |
Björn Töpel | 1e405c1 | 2019-02-12 08:51:14 +0100 | [diff] [blame] | 149 | if (err) |
| 150 | xdp_clear_umem_at_qid(dev, queue_id); |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 151 | return err; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 152 | } |
| 153 | |
Ilya Maximets | 455302d | 2019-06-28 11:04:07 +0300 | [diff] [blame] | 154 | void xdp_umem_clear_dev(struct xdp_umem *umem) |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 155 | { |
| 156 | struct netdev_bpf bpf; |
| 157 | int err; |
| 158 | |
Ilya Maximets | 455302d | 2019-06-28 11:04:07 +0300 | [diff] [blame] | 159 | ASSERT_RTNL(); |
| 160 | |
Ilya Maximets | 01d76b5 | 2019-06-07 20:27:32 +0300 | [diff] [blame] | 161 | if (!umem->dev) |
| 162 | return; |
| 163 | |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 164 | if (umem->zc) { |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 165 | bpf.command = XDP_SETUP_XSK_UMEM; |
| 166 | bpf.xsk.umem = NULL; |
| 167 | bpf.xsk.queue_id = umem->queue_id; |
| 168 | |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 169 | err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf); |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 170 | |
| 171 | if (err) |
| 172 | WARN(1, "failed to disable umem!\n"); |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 173 | } |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 174 | |
Ilya Maximets | 01d76b5 | 2019-06-07 20:27:32 +0300 | [diff] [blame] | 175 | xdp_clear_umem_at_qid(umem->dev, umem->queue_id); |
Magnus Karlsson | c9b47cc | 2018-10-01 14:51:34 +0200 | [diff] [blame] | 176 | |
Ilya Maximets | 162c820 | 2019-06-28 11:04:06 +0300 | [diff] [blame] | 177 | dev_put(umem->dev); |
| 178 | umem->dev = NULL; |
| 179 | umem->zc = false; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 180 | } |
| 181 | |
Ivan Khoronzhuk | d9973cec | 2019-08-08 12:38:03 +0300 | [diff] [blame] | 182 | static void xdp_umem_unmap_pages(struct xdp_umem *umem) |
| 183 | { |
| 184 | unsigned int i; |
| 185 | |
| 186 | for (i = 0; i < umem->npgs; i++) |
Ivan Khoronzhuk | 624676e | 2019-08-15 15:13:55 +0300 | [diff] [blame] | 187 | if (PageHighMem(umem->pgs[i])) |
| 188 | vunmap(umem->pages[i].addr); |
| 189 | } |
| 190 | |
| 191 | static int xdp_umem_map_pages(struct xdp_umem *umem) |
| 192 | { |
| 193 | unsigned int i; |
| 194 | void *addr; |
| 195 | |
| 196 | for (i = 0; i < umem->npgs; i++) { |
| 197 | if (PageHighMem(umem->pgs[i])) |
| 198 | addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL); |
| 199 | else |
| 200 | addr = page_address(umem->pgs[i]); |
| 201 | |
| 202 | if (!addr) { |
| 203 | xdp_umem_unmap_pages(umem); |
| 204 | return -ENOMEM; |
| 205 | } |
| 206 | |
| 207 | umem->pages[i].addr = addr; |
| 208 | } |
| 209 | |
| 210 | return 0; |
Ivan Khoronzhuk | d9973cec | 2019-08-08 12:38:03 +0300 | [diff] [blame] | 211 | } |
| 212 | |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 213 | static void xdp_umem_unpin_pages(struct xdp_umem *umem) |
| 214 | { |
John Hubbard | 1edc976 | 2019-09-23 15:35:10 -0700 | [diff] [blame] | 215 | put_user_pages_dirty_lock(umem->pgs, umem->npgs, true); |
Björn Töpel | a49049e | 2018-05-22 09:35:02 +0200 | [diff] [blame] | 216 | |
| 217 | kfree(umem->pgs); |
| 218 | umem->pgs = NULL; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 219 | } |
| 220 | |
| 221 | static void xdp_umem_unaccount_pages(struct xdp_umem *umem) |
| 222 | { |
Daniel Borkmann | c09290c | 2018-06-08 00:06:01 +0200 | [diff] [blame] | 223 | if (umem->user) { |
| 224 | atomic_long_sub(umem->npgs, &umem->user->locked_vm); |
| 225 | free_uid(umem->user); |
| 226 | } |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | static void xdp_umem_release(struct xdp_umem *umem) |
| 230 | { |
Ilya Maximets | 455302d | 2019-06-28 11:04:07 +0300 | [diff] [blame] | 231 | rtnl_lock(); |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 232 | xdp_umem_clear_dev(umem); |
Ilya Maximets | 455302d | 2019-06-28 11:04:07 +0300 | [diff] [blame] | 233 | rtnl_unlock(); |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 234 | |
Björn Töpel | 50e74c0 | 2019-01-24 19:59:38 +0100 | [diff] [blame] | 235 | ida_simple_remove(&umem_ida, umem->id); |
| 236 | |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 237 | if (umem->fq) { |
| 238 | xskq_destroy(umem->fq); |
| 239 | umem->fq = NULL; |
| 240 | } |
| 241 | |
Magnus Karlsson | fe23083 | 2018-05-02 13:01:31 +0200 | [diff] [blame] | 242 | if (umem->cq) { |
| 243 | xskq_destroy(umem->cq); |
| 244 | umem->cq = NULL; |
| 245 | } |
| 246 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 247 | xsk_reuseq_destroy(umem); |
| 248 | |
Ivan Khoronzhuk | d9973cec | 2019-08-08 12:38:03 +0300 | [diff] [blame] | 249 | xdp_umem_unmap_pages(umem); |
Björn Töpel | a49049e | 2018-05-22 09:35:02 +0200 | [diff] [blame] | 250 | xdp_umem_unpin_pages(umem); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 251 | |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 252 | kfree(umem->pages); |
| 253 | umem->pages = NULL; |
| 254 | |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 255 | xdp_umem_unaccount_pages(umem); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 256 | kfree(umem); |
| 257 | } |
| 258 | |
| 259 | static void xdp_umem_release_deferred(struct work_struct *work) |
| 260 | { |
| 261 | struct xdp_umem *umem = container_of(work, struct xdp_umem, work); |
| 262 | |
| 263 | xdp_umem_release(umem); |
| 264 | } |
| 265 | |
| 266 | void xdp_get_umem(struct xdp_umem *umem) |
| 267 | { |
Björn Töpel | d3b42f1 | 2018-05-22 09:35:03 +0200 | [diff] [blame] | 268 | refcount_inc(&umem->users); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | void xdp_put_umem(struct xdp_umem *umem) |
| 272 | { |
| 273 | if (!umem) |
| 274 | return; |
| 275 | |
Björn Töpel | d3b42f1 | 2018-05-22 09:35:03 +0200 | [diff] [blame] | 276 | if (refcount_dec_and_test(&umem->users)) { |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 277 | INIT_WORK(&umem->work, xdp_umem_release_deferred); |
| 278 | schedule_work(&umem->work); |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | static int xdp_umem_pin_pages(struct xdp_umem *umem) |
| 283 | { |
| 284 | unsigned int gup_flags = FOLL_WRITE; |
| 285 | long npgs; |
| 286 | int err; |
| 287 | |
Björn Töpel | a343993 | 2018-06-11 13:57:12 +0200 | [diff] [blame] | 288 | umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), |
| 289 | GFP_KERNEL | __GFP_NOWARN); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 290 | if (!umem->pgs) |
| 291 | return -ENOMEM; |
| 292 | |
Davidlohr Bueso | e451eb5 | 2019-02-11 08:15:29 -0800 | [diff] [blame] | 293 | down_read(¤t->mm->mmap_sem); |
Ira Weiny | 932f4a6 | 2019-05-13 17:17:03 -0700 | [diff] [blame] | 294 | npgs = get_user_pages(umem->address, umem->npgs, |
| 295 | gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL); |
Davidlohr Bueso | e451eb5 | 2019-02-11 08:15:29 -0800 | [diff] [blame] | 296 | up_read(¤t->mm->mmap_sem); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 297 | |
| 298 | if (npgs != umem->npgs) { |
| 299 | if (npgs >= 0) { |
| 300 | umem->npgs = npgs; |
| 301 | err = -ENOMEM; |
| 302 | goto out_pin; |
| 303 | } |
| 304 | err = npgs; |
| 305 | goto out_pgs; |
| 306 | } |
| 307 | return 0; |
| 308 | |
| 309 | out_pin: |
| 310 | xdp_umem_unpin_pages(umem); |
| 311 | out_pgs: |
| 312 | kfree(umem->pgs); |
| 313 | umem->pgs = NULL; |
| 314 | return err; |
| 315 | } |
| 316 | |
| 317 | static int xdp_umem_account_pages(struct xdp_umem *umem) |
| 318 | { |
| 319 | unsigned long lock_limit, new_npgs, old_npgs; |
| 320 | |
| 321 | if (capable(CAP_IPC_LOCK)) |
| 322 | return 0; |
| 323 | |
| 324 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 325 | umem->user = get_uid(current_user()); |
| 326 | |
| 327 | do { |
| 328 | old_npgs = atomic_long_read(&umem->user->locked_vm); |
| 329 | new_npgs = old_npgs + umem->npgs; |
| 330 | if (new_npgs > lock_limit) { |
| 331 | free_uid(umem->user); |
| 332 | umem->user = NULL; |
| 333 | return -ENOBUFS; |
| 334 | } |
| 335 | } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, |
| 336 | new_npgs) != old_npgs); |
| 337 | return 0; |
| 338 | } |
| 339 | |
Björn Töpel | a49049e | 2018-05-22 09:35:02 +0200 | [diff] [blame] | 340 | static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 341 | { |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 342 | bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; |
Björn Töpel | bbff2f3 | 2018-06-04 13:57:13 +0200 | [diff] [blame] | 343 | u32 chunk_size = mr->chunk_size, headroom = mr->headroom; |
| 344 | unsigned int chunks, chunks_per_page; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 345 | u64 addr = mr->addr, size = mr->len; |
Ivan Khoronzhuk | 624676e | 2019-08-15 15:13:55 +0300 | [diff] [blame] | 346 | int size_chk, err; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 347 | |
Björn Töpel | bbff2f3 | 2018-06-04 13:57:13 +0200 | [diff] [blame] | 348 | if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 349 | /* Strictly speaking we could support this, if: |
| 350 | * - huge pages, or* |
| 351 | * - using an IOMMU, or |
| 352 | * - making sure the memory area is consecutive |
| 353 | * but for now, we simply say "computer says no". |
| 354 | */ |
| 355 | return -EINVAL; |
| 356 | } |
| 357 | |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 358 | if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG | |
| 359 | XDP_UMEM_USES_NEED_WAKEUP)) |
| 360 | return -EINVAL; |
| 361 | |
| 362 | if (!unaligned_chunks && !is_power_of_2(chunk_size)) |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 363 | return -EINVAL; |
| 364 | |
| 365 | if (!PAGE_ALIGNED(addr)) { |
| 366 | /* Memory area has to be page size aligned. For |
| 367 | * simplicity, this might change. |
| 368 | */ |
| 369 | return -EINVAL; |
| 370 | } |
| 371 | |
| 372 | if ((addr + size) < addr) |
| 373 | return -EINVAL; |
| 374 | |
Björn Töpel | bbff2f3 | 2018-06-04 13:57:13 +0200 | [diff] [blame] | 375 | chunks = (unsigned int)div_u64(size, chunk_size); |
| 376 | if (chunks == 0) |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 377 | return -EINVAL; |
| 378 | |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 379 | if (!unaligned_chunks) { |
| 380 | chunks_per_page = PAGE_SIZE / chunk_size; |
| 381 | if (chunks < chunks_per_page || chunks % chunks_per_page) |
| 382 | return -EINVAL; |
| 383 | } |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 384 | |
Björn Töpel | bbff2f3 | 2018-06-04 13:57:13 +0200 | [diff] [blame] | 385 | size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 386 | if (size_chk < 0) |
| 387 | return -EINVAL; |
| 388 | |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 389 | umem->address = (unsigned long)addr; |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 390 | umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK |
| 391 | : ~((u64)chunk_size - 1); |
Magnus Karlsson | 93ee30f | 2018-08-31 13:40:02 +0200 | [diff] [blame] | 392 | umem->size = size; |
Björn Töpel | bbff2f3 | 2018-06-04 13:57:13 +0200 | [diff] [blame] | 393 | umem->headroom = headroom; |
| 394 | umem->chunk_size_nohr = chunk_size - headroom; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 395 | umem->npgs = size / PAGE_SIZE; |
| 396 | umem->pgs = NULL; |
| 397 | umem->user = NULL; |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 398 | umem->flags = mr->flags; |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 399 | INIT_LIST_HEAD(&umem->xsk_list); |
| 400 | spin_lock_init(&umem->xsk_list_lock); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 401 | |
Björn Töpel | d3b42f1 | 2018-05-22 09:35:03 +0200 | [diff] [blame] | 402 | refcount_set(&umem->users, 1); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 403 | |
| 404 | err = xdp_umem_account_pages(umem); |
| 405 | if (err) |
Björn Töpel | 044175a | 2019-03-13 15:15:49 +0100 | [diff] [blame] | 406 | return err; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 407 | |
| 408 | err = xdp_umem_pin_pages(umem); |
| 409 | if (err) |
| 410 | goto out_account; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 411 | |
| 412 | umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); |
| 413 | if (!umem->pages) { |
| 414 | err = -ENOMEM; |
Ivan Khoronzhuk | fb89c39 | 2019-08-15 23:56:35 +0300 | [diff] [blame] | 415 | goto out_pin; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 416 | } |
| 417 | |
Ivan Khoronzhuk | 624676e | 2019-08-15 15:13:55 +0300 | [diff] [blame] | 418 | err = xdp_umem_map_pages(umem); |
| 419 | if (!err) |
| 420 | return 0; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 421 | |
Ivan Khoronzhuk | 624676e | 2019-08-15 15:13:55 +0300 | [diff] [blame] | 422 | kfree(umem->pages); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 423 | |
Ivan Khoronzhuk | fb89c39 | 2019-08-15 23:56:35 +0300 | [diff] [blame] | 424 | out_pin: |
| 425 | xdp_umem_unpin_pages(umem); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 426 | out_account: |
| 427 | xdp_umem_unaccount_pages(umem); |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 428 | return err; |
| 429 | } |
Magnus Karlsson | 965a990 | 2018-05-02 13:01:26 +0200 | [diff] [blame] | 430 | |
Björn Töpel | a49049e | 2018-05-22 09:35:02 +0200 | [diff] [blame] | 431 | struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr) |
| 432 | { |
| 433 | struct xdp_umem *umem; |
| 434 | int err; |
| 435 | |
| 436 | umem = kzalloc(sizeof(*umem), GFP_KERNEL); |
| 437 | if (!umem) |
| 438 | return ERR_PTR(-ENOMEM); |
| 439 | |
Björn Töpel | 50e74c0 | 2019-01-24 19:59:38 +0100 | [diff] [blame] | 440 | err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL); |
| 441 | if (err < 0) { |
| 442 | kfree(umem); |
| 443 | return ERR_PTR(err); |
| 444 | } |
| 445 | umem->id = err; |
| 446 | |
Björn Töpel | a49049e | 2018-05-22 09:35:02 +0200 | [diff] [blame] | 447 | err = xdp_umem_reg(umem, mr); |
| 448 | if (err) { |
Björn Töpel | 50e74c0 | 2019-01-24 19:59:38 +0100 | [diff] [blame] | 449 | ida_simple_remove(&umem_ida, umem->id); |
Björn Töpel | a49049e | 2018-05-22 09:35:02 +0200 | [diff] [blame] | 450 | kfree(umem); |
| 451 | return ERR_PTR(err); |
| 452 | } |
| 453 | |
| 454 | return umem; |
| 455 | } |
| 456 | |
Magnus Karlsson | 965a990 | 2018-05-02 13:01:26 +0200 | [diff] [blame] | 457 | bool xdp_umem_validate_queues(struct xdp_umem *umem) |
| 458 | { |
Björn Töpel | da60cf0 | 2018-05-18 14:00:23 +0200 | [diff] [blame] | 459 | return umem->fq && umem->cq; |
Magnus Karlsson | 965a990 | 2018-05-02 13:01:26 +0200 | [diff] [blame] | 460 | } |