blob: f11560334f884496e5e5784420e7ceceea45a045 [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* XDP user-space packet buffer
Björn Töpelc0c77d82018-05-02 13:01:23 +02003 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#ifndef XDP_UMEM_H_
7#define XDP_UMEM_H_
8
Björn Töpele61e62b92018-06-04 14:05:51 +02009#include <net/xdp_sock.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020010
Björn Töpelbbff2f32018-06-04 13:57:13 +020011static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
Björn Töpelc4971762018-05-02 13:01:27 +020012{
Björn Töpel8aef7342018-06-04 14:05:52 +020013 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
Björn Töpelc4971762018-05-02 13:01:27 +020014}
15
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020016static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
17{
18 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
19}
20
Björn Töpel173d3ad2018-06-04 14:05:55 +020021int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
22 u32 queue_id, u16 flags);
Magnus Karlsson965a99092018-05-02 13:01:26 +020023bool xdp_umem_validate_queues(struct xdp_umem *umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +020024void xdp_get_umem(struct xdp_umem *umem);
25void xdp_put_umem(struct xdp_umem *umem);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020026void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
27void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
Björn Töpela49049e2018-05-22 09:35:02 +020028struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
Björn Töpelc0c77d82018-05-02 13:01:23 +020029
30#endif /* XDP_UMEM_H_ */