blob: c014217f5fa7d92aba92b2682416d208f8a53fdf [file] [log] [blame]
Björn Töpela36b38aa2019-01-24 19:59:39 +01001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets monitoring support
3 *
4 * Copyright(c) 2019 Intel Corporation.
5 *
6 * Author: Björn Töpel <bjorn.topel@intel.com>
7 */
8
9#include <linux/module.h>
10#include <net/xdp_sock.h>
11#include <linux/xdp_diag.h>
12#include <linux/sock_diag.h>
13
14#include "xsk_queue.h"
15#include "xsk.h"
16
17static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
18{
19 struct xdp_diag_info di = {};
20
21 di.ifindex = xs->dev ? xs->dev->ifindex : 0;
22 di.queue_id = xs->queue_id;
23 return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
24}
25
26static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
27 struct sk_buff *nlskb)
28{
29 struct xdp_diag_ring dr = {};
30
31 dr.entries = queue->nentries;
32 return nla_put(nlskb, nl_type, sizeof(dr), &dr);
33}
34
35static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
36 struct sk_buff *nlskb)
37{
38 int err = 0;
39
40 if (xs->rx)
41 err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
42 if (!err && xs->tx)
43 err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
44 return err;
45}
46
47static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
48{
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020049 struct xsk_buff_pool *pool = xs->pool;
Björn Töpela36b38aa2019-01-24 19:59:39 +010050 struct xdp_umem *umem = xs->umem;
51 struct xdp_diag_umem du = {};
52 int err;
53
54 if (!umem)
55 return 0;
56
57 du.id = umem->id;
58 du.size = umem->size;
59 du.num_pages = umem->npgs;
Björn Töpel2b434702020-05-20 21:20:53 +020060 du.chunk_size = umem->chunk_size;
Björn Töpela36b38aa2019-01-24 19:59:39 +010061 du.headroom = umem->headroom;
Magnus Karlsson53ea2072020-09-02 10:52:23 +020062 du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
63 du.queue_id = pool ? pool->queue_id : 0;
Björn Töpela36b38aa2019-01-24 19:59:39 +010064 du.flags = 0;
65 if (umem->zc)
66 du.flags |= XDP_DU_F_ZEROCOPY;
67 du.refs = refcount_read(&umem->users);
68
69 err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
Magnus Karlsson53ea2072020-09-02 10:52:23 +020070 if (!err && pool && pool->fq)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020071 err = xsk_diag_put_ring(pool->fq,
72 XDP_DIAG_UMEM_FILL_RING, nlskb);
Magnus Karlsson53ea2072020-09-02 10:52:23 +020073 if (!err && pool && pool->cq)
74 err = xsk_diag_put_ring(pool->cq,
75 XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
Björn Töpela36b38aa2019-01-24 19:59:39 +010076 return err;
77}
78
Ciara Loftus0d80cb42020-07-08 07:28:35 +000079static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
80{
81 struct xdp_diag_stats du = {};
82
83 du.n_rx_dropped = xs->rx_dropped;
84 du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
85 du.n_rx_full = xs->rx_queue_full;
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020086 du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
Ciara Loftus0d80cb42020-07-08 07:28:35 +000087 du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
88 du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
89 return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
90}
91
Björn Töpela36b38aa2019-01-24 19:59:39 +010092static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
93 struct xdp_diag_req *req,
94 struct user_namespace *user_ns,
95 u32 portid, u32 seq, u32 flags, int sk_ino)
96{
97 struct xdp_sock *xs = xdp_sk(sk);
98 struct xdp_diag_msg *msg;
99 struct nlmsghdr *nlh;
100
101 nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
102 flags);
103 if (!nlh)
104 return -EMSGSIZE;
105
106 msg = nlmsg_data(nlh);
107 memset(msg, 0, sizeof(*msg));
108 msg->xdiag_family = AF_XDP;
109 msg->xdiag_type = sk->sk_type;
110 msg->xdiag_ino = sk_ino;
111 sock_diag_save_cookie(sk, msg->xdiag_cookie);
112
Björn Töpel25dc18f2019-09-04 13:49:13 +0200113 mutex_lock(&xs->mutex);
Björn Töpela36b38aa2019-01-24 19:59:39 +0100114 if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
115 goto out_nlmsg_trim;
116
117 if ((req->xdiag_show & XDP_SHOW_INFO) &&
118 nla_put_u32(nlskb, XDP_DIAG_UID,
119 from_kuid_munged(user_ns, sock_i_uid(sk))))
120 goto out_nlmsg_trim;
121
122 if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
123 xsk_diag_put_rings_cfg(xs, nlskb))
124 goto out_nlmsg_trim;
125
126 if ((req->xdiag_show & XDP_SHOW_UMEM) &&
127 xsk_diag_put_umem(xs, nlskb))
128 goto out_nlmsg_trim;
129
130 if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
131 sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
132 goto out_nlmsg_trim;
133
Ciara Loftus0d80cb42020-07-08 07:28:35 +0000134 if ((req->xdiag_show & XDP_SHOW_STATS) &&
135 xsk_diag_put_stats(xs, nlskb))
136 goto out_nlmsg_trim;
137
Björn Töpel25dc18f2019-09-04 13:49:13 +0200138 mutex_unlock(&xs->mutex);
Björn Töpela36b38aa2019-01-24 19:59:39 +0100139 nlmsg_end(nlskb, nlh);
140 return 0;
141
142out_nlmsg_trim:
Björn Töpel25dc18f2019-09-04 13:49:13 +0200143 mutex_unlock(&xs->mutex);
Björn Töpela36b38aa2019-01-24 19:59:39 +0100144 nlmsg_cancel(nlskb, nlh);
145 return -EMSGSIZE;
146}
147
148static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
149{
150 struct xdp_diag_req *req = nlmsg_data(cb->nlh);
151 struct net *net = sock_net(nlskb->sk);
152 int num = 0, s_num = cb->args[0];
153 struct sock *sk;
154
155 mutex_lock(&net->xdp.lock);
156
157 sk_for_each(sk, &net->xdp.list) {
158 if (!net_eq(sock_net(sk), net))
159 continue;
160 if (num++ < s_num)
161 continue;
162
163 if (xsk_diag_fill(sk, nlskb, req,
164 sk_user_ns(NETLINK_CB(cb->skb).sk),
165 NETLINK_CB(cb->skb).portid,
166 cb->nlh->nlmsg_seq, NLM_F_MULTI,
167 sock_i_ino(sk)) < 0) {
168 num--;
169 break;
170 }
171 }
172
173 mutex_unlock(&net->xdp.lock);
174 cb->args[0] = num;
175 return nlskb->len;
176}
177
178static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
179{
180 struct netlink_dump_control c = { .dump = xsk_diag_dump };
181 int hdrlen = sizeof(struct xdp_diag_req);
182 struct net *net = sock_net(nlskb->sk);
183
184 if (nlmsg_len(hdr) < hdrlen)
185 return -EINVAL;
186
187 if (!(hdr->nlmsg_flags & NLM_F_DUMP))
188 return -EOPNOTSUPP;
189
190 return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
191}
192
193static const struct sock_diag_handler xsk_diag_handler = {
194 .family = AF_XDP,
195 .dump = xsk_diag_handler_dump,
196};
197
198static int __init xsk_diag_init(void)
199{
200 return sock_diag_register(&xsk_diag_handler);
201}
202
203static void __exit xsk_diag_exit(void)
204{
205 sock_diag_unregister(&xsk_diag_handler);
206}
207
208module_init(xsk_diag_init);
209module_exit(xsk_diag_exit);
210MODULE_LICENSE("GPL");
211MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);