blob: 2793a503223e1db8af3a6bb0f56ab629a4caa5ec [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#include <linux/init.h>
7#include <linux/sched/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/slab.h>
12#include <linux/bpf.h>
13#include <linux/mm.h>
14
15#include "xdp_umem.h"
Björn Töpele61e62b92018-06-04 14:05:51 +020016#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020017
Björn Töpelbbff2f32018-06-04 13:57:13 +020018#define XDP_UMEM_MIN_CHUNK_SIZE 2048
Björn Töpelc0c77d82018-05-02 13:01:23 +020019
Björn Töpelc0c77d82018-05-02 13:01:23 +020020static void xdp_umem_unpin_pages(struct xdp_umem *umem)
21{
22 unsigned int i;
23
Björn Töpela49049e2018-05-22 09:35:02 +020024 for (i = 0; i < umem->npgs; i++) {
25 struct page *page = umem->pgs[i];
Björn Töpelc0c77d82018-05-02 13:01:23 +020026
Björn Töpela49049e2018-05-22 09:35:02 +020027 set_page_dirty_lock(page);
28 put_page(page);
Björn Töpelc0c77d82018-05-02 13:01:23 +020029 }
Björn Töpela49049e2018-05-22 09:35:02 +020030
31 kfree(umem->pgs);
32 umem->pgs = NULL;
Björn Töpelc0c77d82018-05-02 13:01:23 +020033}
34
35static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
36{
Björn Töpela49049e2018-05-22 09:35:02 +020037 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
38 free_uid(umem->user);
Björn Töpelc0c77d82018-05-02 13:01:23 +020039}
40
41static void xdp_umem_release(struct xdp_umem *umem)
42{
43 struct task_struct *task;
44 struct mm_struct *mm;
45
Magnus Karlsson423f3832018-05-02 13:01:24 +020046 if (umem->fq) {
47 xskq_destroy(umem->fq);
48 umem->fq = NULL;
49 }
50
Magnus Karlssonfe230832018-05-02 13:01:31 +020051 if (umem->cq) {
52 xskq_destroy(umem->cq);
53 umem->cq = NULL;
54 }
55
Björn Töpela49049e2018-05-22 09:35:02 +020056 xdp_umem_unpin_pages(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +020057
Björn Töpela49049e2018-05-22 09:35:02 +020058 task = get_pid_task(umem->pid, PIDTYPE_PID);
59 put_pid(umem->pid);
60 if (!task)
61 goto out;
62 mm = get_task_mm(task);
63 put_task_struct(task);
64 if (!mm)
65 goto out;
Björn Töpelc0c77d82018-05-02 13:01:23 +020066
Björn Töpela49049e2018-05-22 09:35:02 +020067 mmput(mm);
Björn Töpelc0c77d82018-05-02 13:01:23 +020068 xdp_umem_unaccount_pages(umem);
69out:
70 kfree(umem);
71}
72
73static void xdp_umem_release_deferred(struct work_struct *work)
74{
75 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
76
77 xdp_umem_release(umem);
78}
79
80void xdp_get_umem(struct xdp_umem *umem)
81{
Björn Töpeld3b42f12018-05-22 09:35:03 +020082 refcount_inc(&umem->users);
Björn Töpelc0c77d82018-05-02 13:01:23 +020083}
84
85void xdp_put_umem(struct xdp_umem *umem)
86{
87 if (!umem)
88 return;
89
Björn Töpeld3b42f12018-05-22 09:35:03 +020090 if (refcount_dec_and_test(&umem->users)) {
Björn Töpelc0c77d82018-05-02 13:01:23 +020091 INIT_WORK(&umem->work, xdp_umem_release_deferred);
92 schedule_work(&umem->work);
93 }
94}
95
96static int xdp_umem_pin_pages(struct xdp_umem *umem)
97{
98 unsigned int gup_flags = FOLL_WRITE;
99 long npgs;
100 int err;
101
102 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL);
103 if (!umem->pgs)
104 return -ENOMEM;
105
106 down_write(&current->mm->mmap_sem);
107 npgs = get_user_pages(umem->address, umem->npgs,
108 gup_flags, &umem->pgs[0], NULL);
109 up_write(&current->mm->mmap_sem);
110
111 if (npgs != umem->npgs) {
112 if (npgs >= 0) {
113 umem->npgs = npgs;
114 err = -ENOMEM;
115 goto out_pin;
116 }
117 err = npgs;
118 goto out_pgs;
119 }
120 return 0;
121
122out_pin:
123 xdp_umem_unpin_pages(umem);
124out_pgs:
125 kfree(umem->pgs);
126 umem->pgs = NULL;
127 return err;
128}
129
130static int xdp_umem_account_pages(struct xdp_umem *umem)
131{
132 unsigned long lock_limit, new_npgs, old_npgs;
133
134 if (capable(CAP_IPC_LOCK))
135 return 0;
136
137 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
138 umem->user = get_uid(current_user());
139
140 do {
141 old_npgs = atomic_long_read(&umem->user->locked_vm);
142 new_npgs = old_npgs + umem->npgs;
143 if (new_npgs > lock_limit) {
144 free_uid(umem->user);
145 umem->user = NULL;
146 return -ENOBUFS;
147 }
148 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
149 new_npgs) != old_npgs);
150 return 0;
151}
152
Björn Töpela49049e2018-05-22 09:35:02 +0200153static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200154{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200155 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
156 unsigned int chunks, chunks_per_page;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200157 u64 addr = mr->addr, size = mr->len;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200158 int size_chk, err;
159
Björn Töpelbbff2f32018-06-04 13:57:13 +0200160 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200161 /* Strictly speaking we could support this, if:
162 * - huge pages, or*
163 * - using an IOMMU, or
164 * - making sure the memory area is consecutive
165 * but for now, we simply say "computer says no".
166 */
167 return -EINVAL;
168 }
169
Björn Töpelbbff2f32018-06-04 13:57:13 +0200170 if (!is_power_of_2(chunk_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200171 return -EINVAL;
172
173 if (!PAGE_ALIGNED(addr)) {
174 /* Memory area has to be page size aligned. For
175 * simplicity, this might change.
176 */
177 return -EINVAL;
178 }
179
180 if ((addr + size) < addr)
181 return -EINVAL;
182
Björn Töpelbbff2f32018-06-04 13:57:13 +0200183 chunks = (unsigned int)div_u64(size, chunk_size);
184 if (chunks == 0)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200185 return -EINVAL;
186
Björn Töpelbbff2f32018-06-04 13:57:13 +0200187 chunks_per_page = PAGE_SIZE / chunk_size;
188 if (chunks < chunks_per_page || chunks % chunks_per_page)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200189 return -EINVAL;
190
Björn Töpelbbff2f32018-06-04 13:57:13 +0200191 headroom = ALIGN(headroom, 64);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200192
Björn Töpelbbff2f32018-06-04 13:57:13 +0200193 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200194 if (size_chk < 0)
195 return -EINVAL;
196
197 umem->pid = get_task_pid(current, PIDTYPE_PID);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200198 umem->address = (unsigned long)addr;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200199 umem->props.chunk_mask = ~((u64)chunk_size - 1);
200 umem->props.size = size;
201 umem->headroom = headroom;
202 umem->chunk_size_nohr = chunk_size - headroom;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200203 umem->npgs = size / PAGE_SIZE;
204 umem->pgs = NULL;
205 umem->user = NULL;
206
Björn Töpeld3b42f12018-05-22 09:35:03 +0200207 refcount_set(&umem->users, 1);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200208
209 err = xdp_umem_account_pages(umem);
210 if (err)
211 goto out;
212
213 err = xdp_umem_pin_pages(umem);
214 if (err)
215 goto out_account;
216 return 0;
217
218out_account:
219 xdp_umem_unaccount_pages(umem);
220out:
221 put_pid(umem->pid);
222 return err;
223}
Magnus Karlsson965a9902018-05-02 13:01:26 +0200224
Björn Töpela49049e2018-05-22 09:35:02 +0200225struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
226{
227 struct xdp_umem *umem;
228 int err;
229
230 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
231 if (!umem)
232 return ERR_PTR(-ENOMEM);
233
234 err = xdp_umem_reg(umem, mr);
235 if (err) {
236 kfree(umem);
237 return ERR_PTR(err);
238 }
239
240 return umem;
241}
242
Magnus Karlsson965a9902018-05-02 13:01:26 +0200243bool xdp_umem_validate_queues(struct xdp_umem *umem)
244{
Björn Töpelda60cf02018-05-18 14:00:23 +0200245 return umem->fq && umem->cq;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200246}