Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/core/dst.c Protocol independent destination cache. |
| 4 | * |
| 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 6 | * |
| 7 | */ |
| 8 | |
| 9 | #include <linux/bitops.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/kernel.h> |
Eric Dumazet | 86bba26 | 2007-09-12 14:29:01 +0200 | [diff] [blame] | 13 | #include <linux/workqueue.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
| 15 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/netdevice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/skbuff.h> |
| 19 | #include <linux/string.h> |
| 20 | #include <linux/types.h> |
Eric W. Biederman | e9dc865 | 2007-09-12 13:02:17 +0200 | [diff] [blame] | 21 | #include <net/net_namespace.h> |
Eric Dumazet | 2fc1b5d | 2010-02-08 15:00:39 -0800 | [diff] [blame] | 22 | #include <linux/sched.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 23 | #include <linux/prefetch.h> |
Jiri Benc | 61adedf | 2015-08-20 13:56:25 +0200 | [diff] [blame] | 24 | #include <net/lwtunnel.h> |
David Miller | b6ca8bd | 2017-11-28 15:45:44 -0500 | [diff] [blame] | 25 | #include <net/xfrm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #include <net/dst.h> |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 28 | #include <net/dst_metadata.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 30 | int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | { |
| 32 | kfree_skb(skb); |
| 33 | return 0; |
| 34 | } |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 35 | EXPORT_SYMBOL(dst_discard_out); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 37 | const struct dst_metrics dst_default_metrics = { |
Eric Dumazet | a37e6e3 | 2012-08-07 10:55:45 +0000 | [diff] [blame] | 38 | /* This initializer is needed to force linker to place this variable |
| 39 | * into const section. Otherwise it might end into bss section. |
| 40 | * We really want to avoid false sharing on this variable, and catch |
| 41 | * any writes on it. |
| 42 | */ |
Eric Dumazet | 9620fef | 2017-08-18 12:08:07 -0700 | [diff] [blame] | 43 | .refcnt = REFCOUNT_INIT(1), |
Eric Dumazet | a37e6e3 | 2012-08-07 10:55:45 +0000 | [diff] [blame] | 44 | }; |
David Ahern | d4ead6b | 2018-04-17 17:33:16 -0700 | [diff] [blame] | 45 | EXPORT_SYMBOL(dst_default_metrics); |
Eric Dumazet | a37e6e3 | 2012-08-07 10:55:45 +0000 | [diff] [blame] | 46 | |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 47 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
| 48 | struct net_device *dev, int initial_ref, int initial_obsolete, |
| 49 | unsigned short flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | { |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 51 | dst->dev = dev; |
| 52 | if (dev) |
| 53 | dev_hold(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | dst->ops = ops; |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 55 | dst_init_metrics(dst, dst_default_metrics.metrics, true); |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 56 | dst->expires = 0UL; |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 57 | #ifdef CONFIG_XFRM |
| 58 | dst->xfrm = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #endif |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 60 | dst->input = dst_discard; |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 61 | dst->output = dst_discard_out; |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 62 | dst->error = 0; |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 63 | dst->obsolete = initial_obsolete; |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 64 | dst->header_len = 0; |
| 65 | dst->trailer_len = 0; |
| 66 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 67 | dst->tclassid = 0; |
| 68 | #endif |
Jiri Benc | 61adedf | 2015-08-20 13:56:25 +0200 | [diff] [blame] | 69 | dst->lwtstate = NULL; |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 70 | atomic_set(&dst->__refcnt, initial_ref); |
David S. Miller | cf91166 | 2011-04-28 14:31:47 -0700 | [diff] [blame] | 71 | dst->__use = 0; |
David S. Miller | 5c1e6aa | 2011-04-28 14:13:38 -0700 | [diff] [blame] | 72 | dst->lastuse = jiffies; |
| 73 | dst->flags = flags; |
David S. Miller | 957c665 | 2011-06-24 15:25:00 -0700 | [diff] [blame] | 74 | if (!(flags & DST_NOCOUNT)) |
| 75 | dst_entries_add(ops, 1); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 76 | } |
| 77 | EXPORT_SYMBOL(dst_init); |
| 78 | |
| 79 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, |
| 80 | int initial_ref, int initial_obsolete, unsigned short flags) |
| 81 | { |
| 82 | struct dst_entry *dst; |
| 83 | |
Eric Dumazet | cf86a08 | 2020-05-07 18:58:10 -0700 | [diff] [blame] | 84 | if (ops->gc && |
| 85 | !(flags & DST_NOCOUNT) && |
| 86 | dst_entries_get_fast(ops) > ops->gc_thresh) { |
Peter Oskolkov | 22c2ad6 | 2019-01-16 08:50:28 -0800 | [diff] [blame] | 87 | if (ops->gc(ops)) { |
Eric Dumazet | cf86a08 | 2020-05-07 18:58:10 -0700 | [diff] [blame] | 88 | pr_notice_ratelimited("Route cache is full: consider increasing sysctl net.ipv6.route.max_size.\n"); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 89 | return NULL; |
Peter Oskolkov | 22c2ad6 | 2019-01-16 08:50:28 -0800 | [diff] [blame] | 90 | } |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); |
| 94 | if (!dst) |
| 95 | return NULL; |
| 96 | |
| 97 | dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags); |
| 98 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | return dst; |
| 100 | } |
laurent chavey | 598ed93 | 2010-03-29 10:41:36 +0000 | [diff] [blame] | 101 | EXPORT_SYMBOL(dst_alloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | struct dst_entry *dst_destroy(struct dst_entry * dst) |
| 104 | { |
David Miller | b92cf4a | 2017-11-28 15:40:22 -0500 | [diff] [blame] | 105 | struct dst_entry *child = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
| 107 | smp_rmb(); |
| 108 | |
David Miller | b92cf4a | 2017-11-28 15:40:22 -0500 | [diff] [blame] | 109 | #ifdef CONFIG_XFRM |
David Miller | b6ca8bd | 2017-11-28 15:45:44 -0500 | [diff] [blame] | 110 | if (dst->xfrm) { |
| 111 | struct xfrm_dst *xdst = (struct xfrm_dst *) dst; |
| 112 | |
| 113 | child = xdst->child; |
| 114 | } |
David Miller | b92cf4a | 2017-11-28 15:40:22 -0500 | [diff] [blame] | 115 | #endif |
David S. Miller | 957c665 | 2011-06-24 15:25:00 -0700 | [diff] [blame] | 116 | if (!(dst->flags & DST_NOCOUNT)) |
| 117 | dst_entries_add(dst->ops, -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | |
| 119 | if (dst->ops->destroy) |
| 120 | dst->ops->destroy(dst); |
| 121 | if (dst->dev) |
| 122 | dev_put(dst->dev); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 123 | |
WANG Cong | e252b3d | 2015-08-25 10:38:53 -0700 | [diff] [blame] | 124 | lwtstate_put(dst->lwtstate); |
| 125 | |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 126 | if (dst->flags & DST_METADATA) |
Paolo Abeni | d71785f | 2016-02-12 15:43:57 +0100 | [diff] [blame] | 127 | metadata_dst_free((struct metadata_dst *)dst); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 128 | else |
| 129 | kmem_cache_free(dst->ops->kmem_cachep, dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | |
| 131 | dst = child; |
Wei Wang | 52df157 | 2017-06-17 10:42:38 -0700 | [diff] [blame] | 132 | if (dst) |
| 133 | dst_release_immediate(dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | return NULL; |
| 135 | } |
laurent chavey | 598ed93 | 2010-03-29 10:41:36 +0000 | [diff] [blame] | 136 | EXPORT_SYMBOL(dst_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Eric Dumazet | f886497 | 2014-06-24 10:05:11 -0700 | [diff] [blame] | 138 | static void dst_destroy_rcu(struct rcu_head *head) |
| 139 | { |
| 140 | struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); |
| 141 | |
| 142 | dst = dst_destroy(dst); |
Eric Dumazet | f886497 | 2014-06-24 10:05:11 -0700 | [diff] [blame] | 143 | } |
| 144 | |
Wei Wang | 4a6ce2b | 2017-06-17 10:42:28 -0700 | [diff] [blame] | 145 | /* Operations to mark dst as DEAD and clean up the net device referenced |
| 146 | * by dst: |
Miaohe Lin | 1be107d | 2020-09-10 04:41:53 -0400 | [diff] [blame] | 147 | * 1. put the dst under blackhole interface and discard all tx/rx packets |
Wei Wang | 4a6ce2b | 2017-06-17 10:42:28 -0700 | [diff] [blame] | 148 | * on this route. |
| 149 | * 2. release the net_device |
| 150 | * This function should be called when removing routes from the fib tree |
| 151 | * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to |
| 152 | * make the next dst_ops->check() fail. |
| 153 | */ |
| 154 | void dst_dev_put(struct dst_entry *dst) |
| 155 | { |
| 156 | struct net_device *dev = dst->dev; |
| 157 | |
| 158 | dst->obsolete = DST_OBSOLETE_DEAD; |
| 159 | if (dst->ops->ifdown) |
| 160 | dst->ops->ifdown(dst, dev, true); |
| 161 | dst->input = dst_discard; |
| 162 | dst->output = dst_discard_out; |
Mahesh Bandewar | 8d7017f | 2019-07-01 14:38:57 -0700 | [diff] [blame] | 163 | dst->dev = blackhole_netdev; |
Wei Wang | 4a6ce2b | 2017-06-17 10:42:28 -0700 | [diff] [blame] | 164 | dev_hold(dst->dev); |
| 165 | dev_put(dev); |
| 166 | } |
| 167 | EXPORT_SYMBOL(dst_dev_put); |
| 168 | |
Ilpo Järvinen | 8d33086 | 2008-03-27 17:53:31 -0700 | [diff] [blame] | 169 | void dst_release(struct dst_entry *dst) |
| 170 | { |
| 171 | if (dst) { |
laurent chavey | 598ed93 | 2010-03-29 10:41:36 +0000 | [diff] [blame] | 172 | int newrefcnt; |
Eric Dumazet | ef711cf | 2008-11-14 00:53:54 -0800 | [diff] [blame] | 173 | |
laurent chavey | 598ed93 | 2010-03-29 10:41:36 +0000 | [diff] [blame] | 174 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
Jason A. Donenfeld | adecda5 | 2019-09-24 11:09:37 +0200 | [diff] [blame] | 175 | if (WARN_ONCE(newrefcnt < 0, "dst_release underflow")) |
Konstantin Khlebnikov | 8bf4ada | 2015-07-17 14:01:11 +0300 | [diff] [blame] | 176 | net_warn_ratelimited("%s: dst:%p refcnt:%d\n", |
| 177 | __func__, dst, newrefcnt); |
Wei Wang | b2a9c0e | 2017-06-17 10:42:41 -0700 | [diff] [blame] | 178 | if (!newrefcnt) |
Eric Dumazet | f886497 | 2014-06-24 10:05:11 -0700 | [diff] [blame] | 179 | call_rcu(&dst->rcu_head, dst_destroy_rcu); |
Ilpo Järvinen | 8d33086 | 2008-03-27 17:53:31 -0700 | [diff] [blame] | 180 | } |
| 181 | } |
| 182 | EXPORT_SYMBOL(dst_release); |
| 183 | |
Wei Wang | 5f56f40 | 2017-06-17 10:42:27 -0700 | [diff] [blame] | 184 | void dst_release_immediate(struct dst_entry *dst) |
| 185 | { |
| 186 | if (dst) { |
| 187 | int newrefcnt; |
| 188 | |
| 189 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
Jason A. Donenfeld | adecda5 | 2019-09-24 11:09:37 +0200 | [diff] [blame] | 190 | if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow")) |
Wei Wang | 5f56f40 | 2017-06-17 10:42:27 -0700 | [diff] [blame] | 191 | net_warn_ratelimited("%s: dst:%p refcnt:%d\n", |
| 192 | __func__, dst, newrefcnt); |
| 193 | if (!newrefcnt) |
| 194 | dst_destroy(dst); |
| 195 | } |
| 196 | } |
| 197 | EXPORT_SYMBOL(dst_release_immediate); |
| 198 | |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 199 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) |
| 200 | { |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 201 | struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 202 | |
| 203 | if (p) { |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 204 | struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 205 | unsigned long prev, new; |
| 206 | |
Eric Dumazet | 9620fef | 2017-08-18 12:08:07 -0700 | [diff] [blame] | 207 | refcount_set(&p->refcnt, 1); |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 208 | memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 209 | |
| 210 | new = (unsigned long) p; |
| 211 | prev = cmpxchg(&dst->_metrics, old, new); |
| 212 | |
| 213 | if (prev != old) { |
| 214 | kfree(p); |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 215 | p = (struct dst_metrics *)__DST_METRICS_PTR(prev); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 216 | if (prev & DST_METRICS_READ_ONLY) |
| 217 | p = NULL; |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 218 | } else if (prev & DST_METRICS_REFCOUNTED) { |
Eric Dumazet | 9620fef | 2017-08-18 12:08:07 -0700 | [diff] [blame] | 219 | if (refcount_dec_and_test(&old_p->refcnt)) |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 220 | kfree(old_p); |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 221 | } |
| 222 | } |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 223 | BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0); |
| 224 | return (u32 *)p; |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 225 | } |
| 226 | EXPORT_SYMBOL(dst_cow_metrics_generic); |
| 227 | |
| 228 | /* Caller asserts that dst_metrics_read_only(dst) is false. */ |
| 229 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) |
| 230 | { |
| 231 | unsigned long prev, new; |
| 232 | |
Eric Dumazet | 3fb07da | 2017-05-25 14:27:35 -0700 | [diff] [blame] | 233 | new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY; |
David S. Miller | 62fa8a8 | 2011-01-26 20:51:05 -0800 | [diff] [blame] | 234 | prev = cmpxchg(&dst->_metrics, old, new); |
| 235 | if (prev == old) |
| 236 | kfree(__DST_METRICS_PTR(old)); |
| 237 | } |
| 238 | EXPORT_SYMBOL(__dst_destroy_metrics_generic); |
| 239 | |
Daniel Borkmann | c4c877b | 2021-03-10 01:38:09 +0100 | [diff] [blame] | 240 | struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie) |
| 241 | { |
| 242 | return NULL; |
| 243 | } |
| 244 | |
| 245 | u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old) |
| 246 | { |
| 247 | return NULL; |
| 248 | } |
| 249 | |
| 250 | struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst, |
| 251 | struct sk_buff *skb, |
| 252 | const void *daddr) |
| 253 | { |
| 254 | return NULL; |
| 255 | } |
| 256 | |
| 257 | void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, |
| 258 | struct sk_buff *skb, u32 mtu, |
| 259 | bool confirm_neigh) |
| 260 | { |
| 261 | } |
| 262 | EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu); |
| 263 | |
| 264 | void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk, |
| 265 | struct sk_buff *skb) |
| 266 | { |
| 267 | } |
| 268 | EXPORT_SYMBOL_GPL(dst_blackhole_redirect); |
| 269 | |
| 270 | unsigned int dst_blackhole_mtu(const struct dst_entry *dst) |
| 271 | { |
| 272 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
| 273 | |
| 274 | return mtu ? : dst->dev->mtu; |
| 275 | } |
| 276 | EXPORT_SYMBOL_GPL(dst_blackhole_mtu); |
| 277 | |
Daniel Borkmann | a188bb5 | 2021-03-10 01:38:10 +0100 | [diff] [blame] | 278 | static struct dst_ops dst_blackhole_ops = { |
| 279 | .family = AF_UNSPEC, |
| 280 | .neigh_lookup = dst_blackhole_neigh_lookup, |
| 281 | .check = dst_blackhole_check, |
| 282 | .cow_metrics = dst_blackhole_cow_metrics, |
| 283 | .update_pmtu = dst_blackhole_update_pmtu, |
| 284 | .redirect = dst_blackhole_redirect, |
| 285 | .mtu = dst_blackhole_mtu, |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 286 | }; |
| 287 | |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 288 | static void __metadata_dst_init(struct metadata_dst *md_dst, |
| 289 | enum metadata_type type, u8 optslen) |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 290 | { |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 291 | struct dst_entry *dst; |
| 292 | |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 293 | dst = &md_dst->dst; |
Daniel Borkmann | a188bb5 | 2021-03-10 01:38:10 +0100 | [diff] [blame] | 294 | dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, |
Wei Wang | a4c2fd7 | 2017-06-17 10:42:42 -0700 | [diff] [blame] | 295 | DST_METADATA | DST_NOCOUNT); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 296 | memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst)); |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 297 | md_dst->type = type; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 300 | struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type, |
| 301 | gfp_t flags) |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 302 | { |
| 303 | struct metadata_dst *md_dst; |
| 304 | |
| 305 | md_dst = kmalloc(sizeof(*md_dst) + optslen, flags); |
| 306 | if (!md_dst) |
| 307 | return NULL; |
| 308 | |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 309 | __metadata_dst_init(md_dst, type, optslen); |
Thomas Graf | f38a9eb | 2015-07-21 10:43:56 +0200 | [diff] [blame] | 310 | |
| 311 | return md_dst; |
| 312 | } |
| 313 | EXPORT_SYMBOL_GPL(metadata_dst_alloc); |
| 314 | |
Paolo Abeni | d71785f | 2016-02-12 15:43:57 +0100 | [diff] [blame] | 315 | void metadata_dst_free(struct metadata_dst *md_dst) |
| 316 | { |
| 317 | #ifdef CONFIG_DST_CACHE |
David Lamparter | e65a495 | 2017-08-18 14:31:35 +0200 | [diff] [blame] | 318 | if (md_dst->type == METADATA_IP_TUNNEL) |
| 319 | dst_cache_destroy(&md_dst->u.tun_info.dst_cache); |
Paolo Abeni | d71785f | 2016-02-12 15:43:57 +0100 | [diff] [blame] | 320 | #endif |
| 321 | kfree(md_dst); |
| 322 | } |
Pablo Neira Ayuso | af308b9 | 2018-08-02 20:51:39 +0200 | [diff] [blame] | 323 | EXPORT_SYMBOL_GPL(metadata_dst_free); |
Paolo Abeni | d71785f | 2016-02-12 15:43:57 +0100 | [diff] [blame] | 324 | |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 325 | struct metadata_dst __percpu * |
| 326 | metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags) |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 327 | { |
| 328 | int cpu; |
| 329 | struct metadata_dst __percpu *md_dst; |
| 330 | |
| 331 | md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen, |
| 332 | __alignof__(struct metadata_dst), flags); |
| 333 | if (!md_dst) |
| 334 | return NULL; |
| 335 | |
| 336 | for_each_possible_cpu(cpu) |
Jakub Kicinski | 3fcece1 | 2017-06-23 22:11:58 +0200 | [diff] [blame] | 337 | __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen); |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 338 | |
| 339 | return md_dst; |
| 340 | } |
| 341 | EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu); |
Jakub Kicinski | d66f2b9 | 2017-10-09 10:30:14 -0700 | [diff] [blame] | 342 | |
| 343 | void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst) |
| 344 | { |
Jakub Kicinski | 833e0e2 | 2017-10-10 15:05:39 -0700 | [diff] [blame] | 345 | #ifdef CONFIG_DST_CACHE |
Jakub Kicinski | d66f2b9 | 2017-10-09 10:30:14 -0700 | [diff] [blame] | 346 | int cpu; |
| 347 | |
Jakub Kicinski | d66f2b9 | 2017-10-09 10:30:14 -0700 | [diff] [blame] | 348 | for_each_possible_cpu(cpu) { |
| 349 | struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu); |
| 350 | |
| 351 | if (one_md_dst->type == METADATA_IP_TUNNEL) |
| 352 | dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache); |
| 353 | } |
| 354 | #endif |
| 355 | free_percpu(md_dst); |
| 356 | } |
| 357 | EXPORT_SYMBOL_GPL(metadata_dst_free_percpu); |