Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/core/dst.c Protocol independent destination cache. |
| 4 | * |
| 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 6 | * |
| 7 | */ |
| 8 | |
| 9 | #include <linux/bitops.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/kernel.h> |
Eric Dumazet | 86bba26 | 2007-09-12 14:29:01 +0200 | [diff] [blame] | 13 | #include <linux/workqueue.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
| 15 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/netdevice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/skbuff.h> |
| 19 | #include <linux/string.h> |
| 20 | #include <linux/types.h> |
Eric W. Biederman | e9dc865 | 2007-09-12 13:02:17 +0200 | [diff] [blame] | 21 | #include <net/net_namespace.h> |
Eric Dumazet | 2fc1b5d | 2010-02-08 15:00:39 -0800 | [diff] [blame] | 22 | #include <linux/sched.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 23 | #include <linux/prefetch.h> |
Jiri Benc | 61adedf | 2015-08-20 13:56:25 +0200 | [diff] [blame] | 24 | #include <net/lwtunnel.h> |
David Miller | b6ca8bd | 2017-11-28 15:45:44 -0500 | [diff] [blame] | 25 | #include <net/xfrm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #include <net/dst.h> |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 28 | #include <net/dst_metadata.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 30 | int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | { |
| 32 | kfree_skb(skb); |
| 33 | return 0; |
| 34 | } |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 35 | EXPORT_SYMBOL(dst_discard_out); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 37 | const struct dst_metrics dst_default_metrics = { |
Eric Dumazet | a37e6e3 | 2012-08-07 10:55:45 +0000 | [diff] [blame] | 38 | /* This initializer is needed to force linker to place this variable |
| 39 | * into const section. Otherwise it might end into bss section. |
| 40 | * We really want to avoid false sharing on this variable, and catch |
| 41 | * any writes on it. |
| 42 | */ |
Eric Dumazet | 9620fef | 2017-08-18 12:08:07 -0700 | [diff] [blame] | 43 | .refcnt = REFCOUNT_INIT(1), |
Eric Dumazet | a37e6e3 | 2012-08-07 10:55:45 +0000 | [diff] [blame] | 44 | }; |
David Ahern | d4ead6b | 2018-04-17 17:33:16 -0700 | [diff] [blame] | 45 | EXPORT_SYMBOL(dst_default_metrics); |
Eric Dumazet | a37e6e3 | 2012-08-07 10:55:45 +0000 | [diff] [blame] | 46 | |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 47 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
| 48 | struct net_device *dev, int initial_ref, int initial_obsolete, |
| 49 | unsigned short flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | { |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 51 | dst->dev = dev; |
Eric Dumazet | 9038c32 | 2021-12-04 20:22:03 -0800 | [diff] [blame] | 52 | dev_hold_track(dev, &dst->dev_tracker, GFP_ATOMIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | dst->ops = ops; |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 54 | dst_init_metrics(dst, dst_default_metrics.metrics, true); |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 55 | dst->expires = 0UL; |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 56 | #ifdef CONFIG_XFRM |
| 57 | dst->xfrm = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #endif |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 59 | dst->input = dst_discard; |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 60 | dst->output = dst_discard_out; |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 61 | dst->error = 0; |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 62 | dst->obsolete = initial_obsolete; |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 63 | dst->header_len = 0; |
| 64 | dst->trailer_len = 0; |
| 65 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 66 | dst->tclassid = 0; |
| 67 | #endif |
Jiri Benc | 61adedf | 2015-08-20 13:56:25 +0200 | [diff] [blame] | 68 | dst->lwtstate = NULL; |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 69 | atomic_set(&dst->__refcnt, initial_ref); |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 70 | dst->__use = 0; |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 71 | dst->lastuse = jiffies; |
| 72 | dst->flags = flags; |
David S. Miller | 957c665 | 2011-06-24 15:25:00 -0700 | [diff] [blame] | 73 | if (!(flags & DST_NOCOUNT)) |
| 74 | dst_entries_add(ops, 1); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 75 | } |
| 76 | EXPORT_SYMBOL(dst_init); |
| 77 | |
| 78 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, |
| 79 | int initial_ref, int initial_obsolete, unsigned short flags) |
| 80 | { |
| 81 | struct dst_entry *dst; |
| 82 | |
Eric Dumazet | cf86a08 | 2020-05-07 18:58:10 -0700 | [diff] [blame] | 83 | if (ops->gc && |
| 84 | !(flags & DST_NOCOUNT) && |
| 85 | dst_entries_get_fast(ops) > ops->gc_thresh) { |
Peter Oskolkov | 22c2ad6 | 2019-01-16 08:50:28 -0800 | [diff] [blame] | 86 | if (ops->gc(ops)) { |
Eric Dumazet | cf86a08 | 2020-05-07 18:58:10 -0700 | [diff] [blame] | 87 | pr_notice_ratelimited("Route cache is full: consider increasing sysctl net.ipv6.route.max_size.\n"); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 88 | return NULL; |
Peter Oskolkov | 22c2ad6 | 2019-01-16 08:50:28 -0800 | [diff] [blame] | 89 | } |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); |
| 93 | if (!dst) |
| 94 | return NULL; |
| 95 | |
| 96 | dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags); |
| 97 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | return dst; |
| 99 | } |
laurent chavey | 598ed93 | 2010-03-29 10:41:36 +0000 | [diff] [blame] | 100 | EXPORT_SYMBOL(dst_alloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | struct dst_entry *dst_destroy(struct dst_entry * dst) |
| 103 | { |
David Miller | b92cf4a | 2017-11-28 15:40:22 -0500 | [diff] [blame] | 104 | struct dst_entry *child = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
| 106 | smp_rmb(); |
| 107 | |
David Miller | b92cf4a | 2017-11-28 15:40:22 -0500 | [diff] [blame] | 108 | #ifdef CONFIG_XFRM |
David Miller | b6ca8bd | 2017-11-28 15:45:44 -0500 | [diff] [blame] | 109 | if (dst->xfrm) { |
| 110 | struct xfrm_dst *xdst = (struct xfrm_dst *) dst; |
| 111 | |
| 112 | child = xdst->child; |
| 113 | } |
David Miller | b92cf4a | 2017-11-28 15:40:22 -0500 | [diff] [blame] | 114 | #endif |
David S. Miller | 957c665 | 2011-06-24 15:25:00 -0700 | [diff] [blame] | 115 | if (!(dst->flags & DST_NOCOUNT)) |
| 116 | dst_entries_add(dst->ops, -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
| 118 | if (dst->ops->destroy) |
| 119 | dst->ops->destroy(dst); |
Eric Dumazet | 9038c32 | 2021-12-04 20:22:03 -0800 | [diff] [blame] | 120 | dev_put_track(dst->dev, &dst->dev_tracker); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 121 | |
WANG Cong | e252b3d | 2015-08-25 10:38:53 -0700 | [diff] [blame] | 122 | lwtstate_put(dst->lwtstate); |
| 123 | |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 124 | if (dst->flags & DST_METADATA) |
Paolo Abeni | d71785f | 2016-02-12 15:43:57 +0100 | [diff] [blame] | 125 | metadata_dst_free((struct metadata_dst *)dst); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 126 | else |
| 127 | kmem_cache_free(dst->ops->kmem_cachep, dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
| 129 | dst = child; |
Wei Wang | 52df157 | 2017-06-17 10:42:38 -0700 | [diff] [blame] | 130 | if (dst) |
| 131 | dst_release_immediate(dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | return NULL; |
| 133 | } |
laurent chavey | 598ed93 | 2010-03-29 10:41:36 +0000 | [diff] [blame] | 134 | EXPORT_SYMBOL(dst_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Eric Dumazet | f886497 | 2014-06-24 10:05:11 -0700 | [diff] [blame] | 136 | static void dst_destroy_rcu(struct rcu_head *head) |
| 137 | { |
| 138 | struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); |
| 139 | |
| 140 | dst = dst_destroy(dst); |
Eric Dumazet | f886497 | 2014-06-24 10:05:11 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Wei Wang | 4a6ce2b | 2017-06-17 10:42:28 -0700 | [diff] [blame] | 143 | /* Operations to mark dst as DEAD and clean up the net device referenced |
| 144 | * by dst: |
Miaohe Lin | 1be107d | 2020-09-10 04:41:53 -0400 | [diff] [blame] | 145 | * 1. put the dst under blackhole interface and discard all tx/rx packets |
Wei Wang | 4a6ce2b | 2017-06-17 10:42:28 -0700 | [diff] [blame] | 146 | * on this route. |
| 147 | * 2. release the net_device |
| 148 | * This function should be called when removing routes from the fib tree |
| 149 | * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to |
| 150 | * make the next dst_ops->check() fail. |
| 151 | */ |
| 152 | void dst_dev_put(struct dst_entry *dst) |
| 153 | { |
| 154 | struct net_device *dev = dst->dev; |
| 155 | |
| 156 | dst->obsolete = DST_OBSOLETE_DEAD; |
| 157 | if (dst->ops->ifdown) |
| 158 | dst->ops->ifdown(dst, dev, true); |
| 159 | dst->input = dst_discard; |
| 160 | dst->output = dst_discard_out; |
Mahesh Bandewar | 8d7017f | 2019-07-01 14:38:57 -0700 | [diff] [blame] | 161 | dst->dev = blackhole_netdev; |
Eric Dumazet | 9038c32 | 2021-12-04 20:22:03 -0800 | [diff] [blame] | 162 | dev_replace_track(dev, blackhole_netdev, &dst->dev_tracker, |
| 163 | GFP_ATOMIC); |
Wei Wang | 4a6ce2b | 2017-06-17 10:42:28 -0700 | [diff] [blame] | 164 | } |
| 165 | EXPORT_SYMBOL(dst_dev_put); |
| 166 | |
Ilpo Järvinen | 8d33086 | 2008-03-27 17:53:31 -0700 | [diff] [blame] | 167 | void dst_release(struct dst_entry *dst) |
| 168 | { |
| 169 | if (dst) { |
laurent chavey | 598ed93 | 2010-03-29 10:41:36 +0000 | [diff] [blame] | 170 | int newrefcnt; |
Eric Dumazet | ef711cf | 2008-11-14 00:53:54 -0800 | [diff] [blame] | 171 | |
laurent chavey | 598ed93 | 2010-03-29 10:41:36 +0000 | [diff] [blame] | 172 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
Jason A. Donenfeld | adecda5 | 2019-09-24 11:09:37 +0200 | [diff] [blame] | 173 | if (WARN_ONCE(newrefcnt < 0, "dst_release underflow")) |
Konstantin Khlebnikov | 8bf4ada | 2015-07-17 14:01:11 +0300 | [diff] [blame] | 174 | net_warn_ratelimited("%s: dst:%p refcnt:%d\n", |
| 175 | __func__, dst, newrefcnt); |
Wei Wang | b2a9c0e | 2017-06-17 10:42:41 -0700 | [diff] [blame] | 176 | if (!newrefcnt) |
Eric Dumazet | f886497 | 2014-06-24 10:05:11 -0700 | [diff] [blame] | 177 | call_rcu(&dst->rcu_head, dst_destroy_rcu); |
Ilpo Järvinen | 8d33086 | 2008-03-27 17:53:31 -0700 | [diff] [blame] | 178 | } |
| 179 | } |
| 180 | EXPORT_SYMBOL(dst_release); |
| 181 | |
Wei Wang | 5f56f40 | 2017-06-17 10:42:27 -0700 | [diff] [blame] | 182 | void dst_release_immediate(struct dst_entry *dst) |
| 183 | { |
| 184 | if (dst) { |
| 185 | int newrefcnt; |
| 186 | |
| 187 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
Jason A. Donenfeld | adecda5 | 2019-09-24 11:09:37 +0200 | [diff] [blame] | 188 | if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow")) |
Wei Wang | 5f56f40 | 2017-06-17 10:42:27 -0700 | [diff] [blame] | 189 | net_warn_ratelimited("%s: dst:%p refcnt:%d\n", |
| 190 | __func__, dst, newrefcnt); |
| 191 | if (!newrefcnt) |
| 192 | dst_destroy(dst); |
| 193 | } |
| 194 | } |
| 195 | EXPORT_SYMBOL(dst_release_immediate); |
| 196 | |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 197 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) |
| 198 | { |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 199 | struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 200 | |
| 201 | if (p) { |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 202 | struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 203 | unsigned long prev, new; |
| 204 | |
Eric Dumazet | 9620fef | 2017-08-18 12:08:07 -0700 | [diff] [blame] | 205 | refcount_set(&p->refcnt, 1); |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 206 | memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 207 | |
| 208 | new = (unsigned long) p; |
| 209 | prev = cmpxchg(&dst->_metrics, old, new); |
| 210 | |
| 211 | if (prev != old) { |
| 212 | kfree(p); |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 213 | p = (struct dst_metrics *)__DST_METRICS_PTR(prev); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 214 | if (prev & DST_METRICS_READ_ONLY) |
| 215 | p = NULL; |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 216 | } else if (prev & DST_METRICS_REFCOUNTED) { |
Eric Dumazet | 9620fef | 2017-08-18 12:08:07 -0700 | [diff] [blame] | 217 | if (refcount_dec_and_test(&old_p->refcnt)) |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 218 | kfree(old_p); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 219 | } |
| 220 | } |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 221 | BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0); |
| 222 | return (u32 *)p; |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 223 | } |
| 224 | EXPORT_SYMBOL(dst_cow_metrics_generic); |
| 225 | |
| 226 | /* Caller asserts that dst_metrics_read_only(dst) is false. */ |
| 227 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) |
| 228 | { |
| 229 | unsigned long prev, new; |
| 230 | |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 231 | new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY; |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 232 | prev = cmpxchg(&dst->_metrics, old, new); |
| 233 | if (prev == old) |
| 234 | kfree(__DST_METRICS_PTR(old)); |
| 235 | } |
| 236 | EXPORT_SYMBOL(__dst_destroy_metrics_generic); |
| 237 | |
Daniel Borkmann | c4c877b | 2021-03-10 01:38:09 +0100 | [diff] [blame] | 238 | struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie) |
| 239 | { |
| 240 | return NULL; |
| 241 | } |
| 242 | |
| 243 | u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old) |
| 244 | { |
| 245 | return NULL; |
| 246 | } |
| 247 | |
| 248 | struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst, |
| 249 | struct sk_buff *skb, |
| 250 | const void *daddr) |
| 251 | { |
| 252 | return NULL; |
| 253 | } |
| 254 | |
| 255 | void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, |
| 256 | struct sk_buff *skb, u32 mtu, |
| 257 | bool confirm_neigh) |
| 258 | { |
| 259 | } |
| 260 | EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu); |
| 261 | |
| 262 | void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk, |
| 263 | struct sk_buff *skb) |
| 264 | { |
| 265 | } |
| 266 | EXPORT_SYMBOL_GPL(dst_blackhole_redirect); |
| 267 | |
| 268 | unsigned int dst_blackhole_mtu(const struct dst_entry *dst) |
| 269 | { |
| 270 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
| 271 | |
| 272 | return mtu ? : dst->dev->mtu; |
| 273 | } |
| 274 | EXPORT_SYMBOL_GPL(dst_blackhole_mtu); |
| 275 | |
Daniel Borkmann | a188bb5 | 2021-03-10 01:38:10 +0100 | [diff] [blame] | 276 | static struct dst_ops dst_blackhole_ops = { |
| 277 | .family = AF_UNSPEC, |
| 278 | .neigh_lookup = dst_blackhole_neigh_lookup, |
| 279 | .check = dst_blackhole_check, |
| 280 | .cow_metrics = dst_blackhole_cow_metrics, |
| 281 | .update_pmtu = dst_blackhole_update_pmtu, |
| 282 | .redirect = dst_blackhole_redirect, |
| 283 | .mtu = dst_blackhole_mtu, |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 284 | }; |
| 285 | |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 286 | static void __metadata_dst_init(struct metadata_dst *md_dst, |
| 287 | enum metadata_type type, u8 optslen) |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 288 | { |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 289 | struct dst_entry *dst; |
| 290 | |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 291 | dst = &md_dst->dst; |
Daniel Borkmann | a188bb5 | 2021-03-10 01:38:10 +0100 | [diff] [blame] | 292 | dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, |
Wei Wang | a4c2fd7 | 2017-06-17 10:42:42 -0700 | [diff] [blame] | 293 | DST_METADATA | DST_NOCOUNT); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 294 | memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst)); |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 295 | md_dst->type = type; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 296 | } |
| 297 | |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 298 | struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type, |
| 299 | gfp_t flags) |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 300 | { |
| 301 | struct metadata_dst *md_dst; |
| 302 | |
| 303 | md_dst = kmalloc(sizeof(*md_dst) + optslen, flags); |
| 304 | if (!md_dst) |
| 305 | return NULL; |
| 306 | |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 307 | __metadata_dst_init(md_dst, type, optslen); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 308 | |
| 309 | return md_dst; |
| 310 | } |
| 311 | EXPORT_SYMBOL_GPL(metadata_dst_alloc); |
| 312 | |
Paolo Abeni | d71785f | 2016-02-12 15:43:57 +0100 | [diff] [blame] | 313 | void metadata_dst_free(struct metadata_dst *md_dst) |
| 314 | { |
| 315 | #ifdef CONFIG_DST_CACHE |
David Lamparter | e65a495 | 2017-08-18 14:31:35 +0200 | [diff] [blame] | 316 | if (md_dst->type == METADATA_IP_TUNNEL) |
| 317 | dst_cache_destroy(&md_dst->u.tun_info.dst_cache); |
Paolo Abeni | d71785f | 2016-02-12 15:43:57 +0100 | [diff] [blame] | 318 | #endif |
| 319 | kfree(md_dst); |
| 320 | } |
Pablo Neira Ayuso | af308b9 | 2018-08-02 20:51:39 +0200 | [diff] [blame] | 321 | EXPORT_SYMBOL_GPL(metadata_dst_free); |
Paolo Abeni | d71785f | 2016-02-12 15:43:57 +0100 | [diff] [blame] | 322 | |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 323 | struct metadata_dst __percpu * |
| 324 | metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags) |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 325 | { |
| 326 | int cpu; |
| 327 | struct metadata_dst __percpu *md_dst; |
| 328 | |
| 329 | md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen, |
| 330 | __alignof__(struct metadata_dst), flags); |
| 331 | if (!md_dst) |
| 332 | return NULL; |
| 333 | |
| 334 | for_each_possible_cpu(cpu) |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 335 | __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen); |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 336 | |
| 337 | return md_dst; |
| 338 | } |
| 339 | EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu); |
Jakub Kicinski | d66f2b9 | 2017-10-09 10:30:14 -0700 | [diff] [blame] | 340 | |
| 341 | void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst) |
| 342 | { |
Jakub Kicinski | 833e0e2 | 2017-10-10 15:05:39 -0700 | [diff] [blame] | 343 | #ifdef CONFIG_DST_CACHE |
Jakub Kicinski | d66f2b9 | 2017-10-09 10:30:14 -0700 | [diff] [blame] | 344 | int cpu; |
| 345 | |
Jakub Kicinski | d66f2b9 | 2017-10-09 10:30:14 -0700 | [diff] [blame] | 346 | for_each_possible_cpu(cpu) { |
| 347 | struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu); |
| 348 | |
| 349 | if (one_md_dst->type == METADATA_IP_TUNNEL) |
| 350 | dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache); |
| 351 | } |
| 352 | #endif |
| 353 | free_percpu(md_dst); |
| 354 | } |
| 355 | EXPORT_SYMBOL_GPL(metadata_dst_free_percpu); |