blob: 00b9aab5fdc1289678d51e7e6cf3f6f218f3ca05 [file] [log] [blame]
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001#ifndef _NET_FLOW_OFFLOAD_H
2#define _NET_FLOW_OFFLOAD_H
3
Edward Creefa859992019-05-31 22:47:21 +01004#include <linux/kernel.h>
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +02005#include <linux/list.h>
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01006#include <net/flow_dissector.h>
7
8struct flow_match {
9 struct flow_dissector *dissector;
10 void *mask;
11 void *key;
12};
13
Jiri Pirko9558a832019-06-19 09:41:04 +030014struct flow_match_meta {
15 struct flow_dissector_key_meta *key, *mask;
16};
17
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +010018struct flow_match_basic {
19 struct flow_dissector_key_basic *key, *mask;
20};
21
22struct flow_match_control {
23 struct flow_dissector_key_control *key, *mask;
24};
25
26struct flow_match_eth_addrs {
27 struct flow_dissector_key_eth_addrs *key, *mask;
28};
29
30struct flow_match_vlan {
31 struct flow_dissector_key_vlan *key, *mask;
32};
33
34struct flow_match_ipv4_addrs {
35 struct flow_dissector_key_ipv4_addrs *key, *mask;
36};
37
38struct flow_match_ipv6_addrs {
39 struct flow_dissector_key_ipv6_addrs *key, *mask;
40};
41
42struct flow_match_ip {
43 struct flow_dissector_key_ip *key, *mask;
44};
45
46struct flow_match_ports {
47 struct flow_dissector_key_ports *key, *mask;
48};
49
50struct flow_match_icmp {
51 struct flow_dissector_key_icmp *key, *mask;
52};
53
54struct flow_match_tcp {
55 struct flow_dissector_key_tcp *key, *mask;
56};
57
58struct flow_match_mpls {
59 struct flow_dissector_key_mpls *key, *mask;
60};
61
62struct flow_match_enc_keyid {
63 struct flow_dissector_key_keyid *key, *mask;
64};
65
66struct flow_match_enc_opts {
67 struct flow_dissector_key_enc_opts *key, *mask;
68};
69
70struct flow_rule;
71
Jiri Pirko9558a832019-06-19 09:41:04 +030072void flow_rule_match_meta(const struct flow_rule *rule,
73 struct flow_match_meta *out);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +010074void flow_rule_match_basic(const struct flow_rule *rule,
75 struct flow_match_basic *out);
76void flow_rule_match_control(const struct flow_rule *rule,
77 struct flow_match_control *out);
78void flow_rule_match_eth_addrs(const struct flow_rule *rule,
79 struct flow_match_eth_addrs *out);
80void flow_rule_match_vlan(const struct flow_rule *rule,
81 struct flow_match_vlan *out);
Edward Creebae9ed62019-05-14 21:18:12 +010082void flow_rule_match_cvlan(const struct flow_rule *rule,
83 struct flow_match_vlan *out);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +010084void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
85 struct flow_match_ipv4_addrs *out);
86void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
87 struct flow_match_ipv6_addrs *out);
88void flow_rule_match_ip(const struct flow_rule *rule,
89 struct flow_match_ip *out);
90void flow_rule_match_ports(const struct flow_rule *rule,
91 struct flow_match_ports *out);
92void flow_rule_match_tcp(const struct flow_rule *rule,
93 struct flow_match_tcp *out);
94void flow_rule_match_icmp(const struct flow_rule *rule,
95 struct flow_match_icmp *out);
96void flow_rule_match_mpls(const struct flow_rule *rule,
97 struct flow_match_mpls *out);
98void flow_rule_match_enc_control(const struct flow_rule *rule,
99 struct flow_match_control *out);
100void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
101 struct flow_match_ipv4_addrs *out);
102void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
103 struct flow_match_ipv6_addrs *out);
104void flow_rule_match_enc_ip(const struct flow_rule *rule,
105 struct flow_match_ip *out);
106void flow_rule_match_enc_ports(const struct flow_rule *rule,
107 struct flow_match_ports *out);
108void flow_rule_match_enc_keyid(const struct flow_rule *rule,
109 struct flow_match_enc_keyid *out);
110void flow_rule_match_enc_opts(const struct flow_rule *rule,
111 struct flow_match_enc_opts *out);
112
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100113enum flow_action_id {
114 FLOW_ACTION_ACCEPT = 0,
115 FLOW_ACTION_DROP,
116 FLOW_ACTION_TRAP,
117 FLOW_ACTION_GOTO,
118 FLOW_ACTION_REDIRECT,
119 FLOW_ACTION_MIRRED,
120 FLOW_ACTION_VLAN_PUSH,
121 FLOW_ACTION_VLAN_POP,
122 FLOW_ACTION_VLAN_MANGLE,
123 FLOW_ACTION_TUNNEL_ENCAP,
124 FLOW_ACTION_TUNNEL_DECAP,
125 FLOW_ACTION_MANGLE,
126 FLOW_ACTION_ADD,
127 FLOW_ACTION_CSUM,
128 FLOW_ACTION_MARK,
Pablo Neira Ayuso8bec2832019-02-02 12:50:50 +0100129 FLOW_ACTION_WAKE,
130 FLOW_ACTION_QUEUE,
Pieter Jansen van Vuurena7a7be62019-05-04 04:46:16 -0700131 FLOW_ACTION_SAMPLE,
Pieter Jansen van Vuuren8c8cfc62019-05-04 04:46:22 -0700132 FLOW_ACTION_POLICE,
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300133 FLOW_ACTION_CT,
John Hurley6749d5902019-07-23 15:33:59 +0100134 FLOW_ACTION_MPLS_PUSH,
135 FLOW_ACTION_MPLS_POP,
136 FLOW_ACTION_MPLS_MANGLE,
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100137};
138
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100139/* This is mirroring enum pedit_header_type definition for easy mapping between
140 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
141 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
142 */
143enum flow_action_mangle_base {
144 FLOW_ACT_MANGLE_UNSPEC = 0,
145 FLOW_ACT_MANGLE_HDR_TYPE_ETH,
146 FLOW_ACT_MANGLE_HDR_TYPE_IP4,
147 FLOW_ACT_MANGLE_HDR_TYPE_IP6,
148 FLOW_ACT_MANGLE_HDR_TYPE_TCP,
149 FLOW_ACT_MANGLE_HDR_TYPE_UDP,
150};
151
152struct flow_action_entry {
153 enum flow_action_id id;
154 union {
155 u32 chain_index; /* FLOW_ACTION_GOTO */
156 struct net_device *dev; /* FLOW_ACTION_REDIRECT */
157 struct { /* FLOW_ACTION_VLAN */
158 u16 vid;
159 __be16 proto;
160 u8 prio;
161 } vlan;
162 struct { /* FLOW_ACTION_PACKET_EDIT */
163 enum flow_action_mangle_base htype;
164 u32 offset;
165 u32 mask;
166 u32 val;
167 } mangle;
168 const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
169 u32 csum_flags; /* FLOW_ACTION_CSUM */
170 u32 mark; /* FLOW_ACTION_MARK */
Pablo Neira Ayuso8bec2832019-02-02 12:50:50 +0100171 struct { /* FLOW_ACTION_QUEUE */
172 u32 ctx;
173 u32 index;
174 u8 vf;
175 } queue;
Pieter Jansen van Vuurena7a7be62019-05-04 04:46:16 -0700176 struct { /* FLOW_ACTION_SAMPLE */
177 struct psample_group *psample_group;
178 u32 rate;
179 u32 trunc_size;
180 bool truncate;
181 } sample;
Pieter Jansen van Vuuren8c8cfc62019-05-04 04:46:22 -0700182 struct { /* FLOW_ACTION_POLICE */
183 s64 burst;
184 u64 rate_bytes_ps;
185 } police;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300186 struct { /* FLOW_ACTION_CT */
187 int action;
188 u16 zone;
189 } ct;
John Hurley6749d5902019-07-23 15:33:59 +0100190 struct { /* FLOW_ACTION_MPLS_PUSH */
191 u32 label;
192 __be16 proto;
193 u8 tc;
194 u8 bos;
195 u8 ttl;
196 } mpls_push;
197 struct { /* FLOW_ACTION_MPLS_POP */
198 __be16 proto;
199 } mpls_pop;
200 struct { /* FLOW_ACTION_MPLS_MANGLE */
201 u32 label;
202 u8 tc;
203 u8 bos;
204 u8 ttl;
205 } mpls_mangle;
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100206 };
207};
208
209struct flow_action {
210 unsigned int num_entries;
211 struct flow_action_entry entries[0];
212};
213
214static inline bool flow_action_has_entries(const struct flow_action *action)
215{
216 return action->num_entries;
217}
218
Pieter Jansen van Vuurenab79af32019-05-04 04:46:18 -0700219/**
220 * flow_action_has_one_action() - check if exactly one action is present
221 * @action: tc filter flow offload action
222 *
223 * Returns true if exactly one action is present.
224 */
225static inline bool flow_offload_has_one_action(const struct flow_action *action)
226{
227 return action->num_entries == 1;
228}
229
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100230#define flow_action_for_each(__i, __act, __actions) \
Eli Britstein6663cf82019-02-11 09:52:59 +0200231 for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100232
233struct flow_rule {
234 struct flow_match match;
235 struct flow_action action;
236};
237
238struct flow_rule *flow_rule_alloc(unsigned int num_actions);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100239
240static inline bool flow_rule_match_key(const struct flow_rule *rule,
241 enum flow_dissector_key_id key)
242{
243 return dissector_uses_key(rule->match.dissector, key);
244}
245
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100246struct flow_stats {
247 u64 pkts;
248 u64 bytes;
249 u64 lastused;
250};
251
252static inline void flow_stats_update(struct flow_stats *flow_stats,
253 u64 bytes, u64 pkts, u64 lastused)
254{
John Hurley9f9dc492019-02-13 00:23:52 +0000255 flow_stats->pkts += pkts;
256 flow_stats->bytes += bytes;
257 flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100258}
259
Pablo Neira Ayuso4e95bc22019-07-09 22:55:39 +0200260enum flow_block_command {
Pablo Neira Ayuso9c0e1892019-07-09 22:55:40 +0200261 FLOW_BLOCK_BIND,
262 FLOW_BLOCK_UNBIND,
Pablo Neira Ayuso4e95bc22019-07-09 22:55:39 +0200263};
264
265enum flow_block_binder_type {
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +0200266 FLOW_BLOCK_BINDER_TYPE_UNSPEC,
267 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
268 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
Pablo Neira Ayuso4e95bc22019-07-09 22:55:39 +0200269};
270
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200271struct flow_block {
272 struct list_head cb_list;
273};
274
Pablo Neira Ayuso4e95bc22019-07-09 22:55:39 +0200275struct netlink_ext_ack;
276
277struct flow_block_offload {
278 enum flow_block_command command;
279 enum flow_block_binder_type binder_type;
Pablo Neira Ayuso955bcb62019-07-09 22:55:46 +0200280 bool block_shared;
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200281 struct net *net;
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200282 struct flow_block *block;
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200283 struct list_head cb_list;
Pablo Neira Ayuso4e95bc22019-07-09 22:55:39 +0200284 struct list_head *driver_block_list;
285 struct netlink_ext_ack *extack;
286};
287
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200288enum tc_setup_type;
289typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
290 void *cb_priv);
291
Pablo Neira Ayusod63db302019-07-09 22:55:42 +0200292struct flow_block_cb {
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200293 struct list_head driver_list;
Pablo Neira Ayusod63db302019-07-09 22:55:42 +0200294 struct list_head list;
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200295 flow_setup_cb_t *cb;
Pablo Neira Ayusod63db302019-07-09 22:55:42 +0200296 void *cb_ident;
297 void *cb_priv;
298 void (*release)(void *cb_priv);
299 unsigned int refcnt;
300};
301
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200302struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
Pablo Neira Ayusod63db302019-07-09 22:55:42 +0200303 void *cb_ident, void *cb_priv,
304 void (*release)(void *cb_priv));
305void flow_block_cb_free(struct flow_block_cb *block_cb);
306
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200307struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200308 flow_setup_cb_t *cb, void *cb_ident);
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200309
Pablo Neira Ayuso67bd0d52019-07-09 22:55:44 +0200310void *flow_block_cb_priv(struct flow_block_cb *block_cb);
311void flow_block_cb_incref(struct flow_block_cb *block_cb);
312unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
313
Pablo Neira Ayusoda3eeb92019-07-09 22:55:43 +0200314static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
315 struct flow_block_offload *offload)
316{
317 list_add_tail(&block_cb->list, &offload->cb_list);
318}
319
320static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
321 struct flow_block_offload *offload)
322{
323 list_move(&block_cb->list, &offload->cb_list);
324}
325
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200326bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
Pablo Neira Ayuso0d4fd022019-07-09 22:55:48 +0200327 struct list_head *driver_block_list);
328
Pablo Neira Ayuso4e95bc22019-07-09 22:55:39 +0200329int flow_block_cb_setup_simple(struct flow_block_offload *f,
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200330 struct list_head *driver_list,
331 flow_setup_cb_t *cb,
Pablo Neira Ayuso4e95bc22019-07-09 22:55:39 +0200332 void *cb_ident, void *cb_priv, bool ingress_only);
333
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200334enum flow_cls_command {
335 FLOW_CLS_REPLACE,
336 FLOW_CLS_DESTROY,
337 FLOW_CLS_STATS,
338 FLOW_CLS_TMPLT_CREATE,
339 FLOW_CLS_TMPLT_DESTROY,
340};
341
342struct flow_cls_common_offload {
343 u32 chain_index;
344 __be16 protocol;
345 u32 prio;
346 struct netlink_ext_ack *extack;
347};
348
349struct flow_cls_offload {
350 struct flow_cls_common_offload common;
351 enum flow_cls_command command;
352 unsigned long cookie;
353 struct flow_rule *rule;
354 struct flow_stats stats;
355 u32 classid;
356};
357
358static inline struct flow_rule *
359flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
360{
361 return flow_cmd->rule;
362}
363
Pablo Neira Ayuso14bfb132019-07-19 18:20:16 +0200364static inline void flow_block_init(struct flow_block *flow_block)
365{
366 INIT_LIST_HEAD(&flow_block->cb_list);
367}
368
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100369#endif /* _NET_FLOW_OFFLOAD_H */