blob: 56a28a686988d5590b45aa8e6fa967c2184472c3 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#include <linux/init.h>
7#include <linux/sched/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/slab.h>
12#include <linux/bpf.h>
13#include <linux/mm.h>
Jakub Kicinski84c6b862018-07-30 20:43:53 -070014#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
Björn Töpel50e74c02019-01-24 19:59:38 +010016#include <linux/idr.h>
Ivan Khoronzhuk624676e2019-08-15 15:13:55 +030017#include <linux/vmalloc.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020018
19#include "xdp_umem.h"
Björn Töpele61e62b92018-06-04 14:05:51 +020020#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020021
Björn Töpelbbff2f32018-06-04 13:57:13 +020022#define XDP_UMEM_MIN_CHUNK_SIZE 2048
Björn Töpelc0c77d82018-05-02 13:01:23 +020023
Björn Töpel50e74c02019-01-24 19:59:38 +010024static DEFINE_IDA(umem_ida);
25
Björn Töpelc0c77d82018-05-02 13:01:23 +020026static void xdp_umem_unpin_pages(struct xdp_umem *umem)
27{
John Hubbardf1f6a7d2020-01-30 22:13:35 -080028 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
Björn Töpela49049e2018-05-22 09:35:02 +020029
30 kfree(umem->pgs);
31 umem->pgs = NULL;
Björn Töpelc0c77d82018-05-02 13:01:23 +020032}
33
34static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
35{
Daniel Borkmannc09290c2018-06-08 00:06:01 +020036 if (umem->user) {
37 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
38 free_uid(umem->user);
39 }
Björn Töpelc0c77d82018-05-02 13:01:23 +020040}
41
Magnus Karlsson7f7ffa42020-08-28 10:26:21 +020042static void xdp_umem_addr_unmap(struct xdp_umem *umem)
43{
44 vunmap(umem->addrs);
45 umem->addrs = NULL;
46}
47
48static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
49 u32 nr_pages)
50{
51 umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
52 if (!umem->addrs)
53 return -ENOMEM;
54 return 0;
55}
56
Björn Töpelc0c77d82018-05-02 13:01:23 +020057static void xdp_umem_release(struct xdp_umem *umem)
58{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020059 umem->zc = false;
Björn Töpel50e74c02019-01-24 19:59:38 +010060 ida_simple_remove(&umem_ida, umem->id);
61
Magnus Karlsson7f7ffa42020-08-28 10:26:21 +020062 xdp_umem_addr_unmap(umem);
Björn Töpela49049e2018-05-22 09:35:02 +020063 xdp_umem_unpin_pages(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +020064
Björn Töpelc0c77d82018-05-02 13:01:23 +020065 xdp_umem_unaccount_pages(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +020066 kfree(umem);
67}
68
Magnus Karlsson537cf4e2020-11-20 12:53:39 +010069static void xdp_umem_release_deferred(struct work_struct *work)
70{
71 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
72
73 xdp_umem_release(umem);
74}
75
Björn Töpelc0c77d82018-05-02 13:01:23 +020076void xdp_get_umem(struct xdp_umem *umem)
77{
Björn Töpeld3b42f12018-05-22 09:35:03 +020078 refcount_inc(&umem->users);
Björn Töpelc0c77d82018-05-02 13:01:23 +020079}
80
Magnus Karlsson537cf4e2020-11-20 12:53:39 +010081void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
Björn Töpelc0c77d82018-05-02 13:01:23 +020082{
83 if (!umem)
84 return;
85
Magnus Karlsson537cf4e2020-11-20 12:53:39 +010086 if (refcount_dec_and_test(&umem->users)) {
87 if (defer_cleanup) {
88 INIT_WORK(&umem->work, xdp_umem_release_deferred);
89 schedule_work(&umem->work);
90 } else {
91 xdp_umem_release(umem);
92 }
93 }
Björn Töpelc0c77d82018-05-02 13:01:23 +020094}
95
Magnus Karlsson07bf2d92020-05-04 15:33:52 +020096static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
Björn Töpelc0c77d82018-05-02 13:01:23 +020097{
98 unsigned int gup_flags = FOLL_WRITE;
99 long npgs;
100 int err;
101
Björn Töpela3439932018-06-11 13:57:12 +0200102 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
103 GFP_KERNEL | __GFP_NOWARN);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200104 if (!umem->pgs)
105 return -ENOMEM;
106
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700107 mmap_read_lock(current->mm);
Magnus Karlsson07bf2d92020-05-04 15:33:52 +0200108 npgs = pin_user_pages(address, umem->npgs,
Ira Weiny932f4a62019-05-13 17:17:03 -0700109 gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700110 mmap_read_unlock(current->mm);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200111
112 if (npgs != umem->npgs) {
113 if (npgs >= 0) {
114 umem->npgs = npgs;
115 err = -ENOMEM;
116 goto out_pin;
117 }
118 err = npgs;
119 goto out_pgs;
120 }
121 return 0;
122
123out_pin:
124 xdp_umem_unpin_pages(umem);
125out_pgs:
126 kfree(umem->pgs);
127 umem->pgs = NULL;
128 return err;
129}
130
131static int xdp_umem_account_pages(struct xdp_umem *umem)
132{
133 unsigned long lock_limit, new_npgs, old_npgs;
134
135 if (capable(CAP_IPC_LOCK))
136 return 0;
137
138 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
139 umem->user = get_uid(current_user());
140
141 do {
142 old_npgs = atomic_long_read(&umem->user->locked_vm);
143 new_npgs = old_npgs + umem->npgs;
144 if (new_npgs > lock_limit) {
145 free_uid(umem->user);
146 umem->user = NULL;
147 return -ENOBUFS;
148 }
149 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
150 new_npgs) != old_npgs);
151 return 0;
152}
153
Björn Töpela49049e2018-05-22 09:35:02 +0200154static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200155{
Björn Töpel2b1667e2020-09-10 09:56:09 +0200156 u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000157 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
Björn Töpelb16a87d2020-05-25 10:03:59 +0200158 u64 npgs, addr = mr->addr, size = mr->len;
Björn Töpel2b1667e2020-09-10 09:56:09 +0200159 unsigned int chunks, chunks_rem;
Magnus Karlsson99e3a232020-04-14 09:35:15 +0200160 int err;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200161
Björn Töpelbbff2f32018-06-04 13:57:13 +0200162 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200163 /* Strictly speaking we could support this, if:
164 * - huge pages, or*
165 * - using an IOMMU, or
166 * - making sure the memory area is consecutive
167 * but for now, we simply say "computer says no".
168 */
169 return -EINVAL;
170 }
171
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200172 if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
Kevin Laatzc05cd362019-08-27 02:25:22 +0000173 return -EINVAL;
174
175 if (!unaligned_chunks && !is_power_of_2(chunk_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200176 return -EINVAL;
177
178 if (!PAGE_ALIGNED(addr)) {
179 /* Memory area has to be page size aligned. For
180 * simplicity, this might change.
181 */
182 return -EINVAL;
183 }
184
185 if ((addr + size) < addr)
186 return -EINVAL;
187
Björn Töpel2b1667e2020-09-10 09:56:09 +0200188 npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
189 if (npgs_rem)
190 npgs++;
Björn Töpelb16a87d2020-05-25 10:03:59 +0200191 if (npgs > U32_MAX)
192 return -EINVAL;
193
Björn Töpel2b1667e2020-09-10 09:56:09 +0200194 chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200195 if (chunks == 0)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200196 return -EINVAL;
197
Björn Töpel2b1667e2020-09-10 09:56:09 +0200198 if (!unaligned_chunks && chunks_rem)
199 return -EINVAL;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200200
Magnus Karlsson99e3a232020-04-14 09:35:15 +0200201 if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200202 return -EINVAL;
203
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200204 umem->size = size;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200205 umem->headroom = headroom;
Björn Töpel2b434702020-05-20 21:20:53 +0200206 umem->chunk_size = chunk_size;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200207 umem->chunks = chunks;
Björn Töpelb16a87d2020-05-25 10:03:59 +0200208 umem->npgs = (u32)npgs;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200209 umem->pgs = NULL;
210 umem->user = NULL;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000211 umem->flags = mr->flags;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200212
Magnus Karlsson921b6862020-08-28 10:26:22 +0200213 INIT_LIST_HEAD(&umem->xsk_dma_list);
Björn Töpeld3b42f12018-05-22 09:35:03 +0200214 refcount_set(&umem->users, 1);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200215
216 err = xdp_umem_account_pages(umem);
217 if (err)
Björn Töpel044175a2019-03-13 15:15:49 +0100218 return err;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200219
Magnus Karlsson07bf2d92020-05-04 15:33:52 +0200220 err = xdp_umem_pin_pages(umem, (unsigned long)addr);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200221 if (err)
222 goto out_account;
Björn Töpel8aef7342018-06-04 14:05:52 +0200223
Magnus Karlsson7f7ffa42020-08-28 10:26:21 +0200224 err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
225 if (err)
226 goto out_unpin;
227
Björn Töpel2b434702020-05-20 21:20:53 +0200228 return 0;
229
Magnus Karlsson7f7ffa42020-08-28 10:26:21 +0200230out_unpin:
231 xdp_umem_unpin_pages(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200232out_account:
233 xdp_umem_unaccount_pages(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200234 return err;
235}
Magnus Karlsson965a9902018-05-02 13:01:26 +0200236
Björn Töpela49049e2018-05-22 09:35:02 +0200237struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
238{
239 struct xdp_umem *umem;
240 int err;
241
242 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
243 if (!umem)
244 return ERR_PTR(-ENOMEM);
245
Björn Töpel50e74c02019-01-24 19:59:38 +0100246 err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
247 if (err < 0) {
248 kfree(umem);
249 return ERR_PTR(err);
250 }
251 umem->id = err;
252
Björn Töpela49049e2018-05-22 09:35:02 +0200253 err = xdp_umem_reg(umem, mr);
254 if (err) {
Björn Töpel50e74c02019-01-24 19:59:38 +0100255 ida_simple_remove(&umem_ida, umem->id);
Björn Töpela49049e2018-05-22 09:35:02 +0200256 kfree(umem);
257 return ERR_PTR(err);
258 }
259
260 return umem;
261}