blob: 5c13a8021d4c1c4d5ce8f3b937ad35823d0cf00b [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * xfrm_state.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * YOSHIFUJI Hideaki @USAGI
11 * Split up af-specific functions
12 * Derek Atkins <derek@ihtfp.com>
13 * Add UDP Encapsulation
Trent Jaegerdf718372005-12-13 23:12:27 -080014 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 */
16
17#include <linux/workqueue.h>
18#include <net/xfrm.h>
19#include <linux/pfkeyv2.h>
20#include <linux/ipsec.h>
21#include <linux/module.h>
David S. Millerf034b5d2006-08-24 03:08:07 -070022#include <linux/cache.h>
Paul Moore68277ac2007-12-20 20:49:33 -080023#include <linux/audit.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080024#include <linux/uaccess.h>
Yury Polyanskiy9e0d57f2009-11-08 20:58:41 -080025#include <linux/ktime.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Yury Polyanskiy9e0d57f2009-11-08 20:58:41 -080027#include <linux/interrupt.h>
28#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
David S. Miller44e36b42006-08-24 04:50:50 -070030#include "xfrm_hash.h"
31
Florian Westphalc8406992016-08-09 12:16:08 +020032#define xfrm_state_deref_prot(table, net) \
33 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
34
Florian Westphal35db57bb2016-08-23 16:00:12 +020035static void xfrm_state_gc_task(struct work_struct *work);
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037/* Each xfrm_state may be linked to two tables:
38
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
David S. Millera624c102006-08-24 03:24:33 -070040 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 destination/tunnel endpoint. (output)
42 */
43
David S. Millerf034b5d2006-08-24 03:08:07 -070044static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
Florian Westphalb65e3d72016-08-09 12:16:07 +020045static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
Mathias Krause565f0fa2018-05-03 10:55:07 +020046static struct kmem_cache *xfrm_state_cache __ro_after_init;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Florian Westphal35db57bb2016-08-23 16:00:12 +020048static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
49static HLIST_HEAD(xfrm_state_gc_list);
50
Florian Westphal02efdff2016-08-09 12:16:05 +020051static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
52{
Reshetova, Elena88755e9c2017-07-04 15:53:21 +030053 return refcount_inc_not_zero(&x->refcnt);
Florian Westphal02efdff2016-08-09 12:16:05 +020054}
55
Alexey Dobriyan64d0cd02008-11-25 17:29:21 -080056static inline unsigned int xfrm_dst_hash(struct net *net,
David S. Miller2ab38502011-02-24 01:47:16 -050057 const xfrm_address_t *daddr,
58 const xfrm_address_t *saddr,
David S. Millerc1969f22006-08-24 04:00:03 -070059 u32 reqid,
David S. Millera624c102006-08-24 03:24:33 -070060 unsigned short family)
61{
Alexey Dobriyan64d0cd02008-11-25 17:29:21 -080062 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
David S. Millera624c102006-08-24 03:24:33 -070063}
64
Alexey Dobriyan64d0cd02008-11-25 17:29:21 -080065static inline unsigned int xfrm_src_hash(struct net *net,
David S. Miller2ab38502011-02-24 01:47:16 -050066 const xfrm_address_t *daddr,
67 const xfrm_address_t *saddr,
David S. Miller44e36b42006-08-24 04:50:50 -070068 unsigned short family)
David S. Millerf034b5d2006-08-24 03:08:07 -070069{
Alexey Dobriyan64d0cd02008-11-25 17:29:21 -080070 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
David S. Millerf034b5d2006-08-24 03:08:07 -070071}
72
David S. Miller2575b652006-08-24 03:26:44 -070073static inline unsigned int
David S. Miller2ab38502011-02-24 01:47:16 -050074xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
75 __be32 spi, u8 proto, unsigned short family)
David S. Millerf034b5d2006-08-24 03:08:07 -070076{
Alexey Dobriyan64d0cd02008-11-25 17:29:21 -080077 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
David S. Millerf034b5d2006-08-24 03:08:07 -070078}
79
David S. Millerf034b5d2006-08-24 03:08:07 -070080static void xfrm_hash_transfer(struct hlist_head *list,
81 struct hlist_head *ndsttable,
82 struct hlist_head *nsrctable,
83 struct hlist_head *nspitable,
84 unsigned int nhashmask)
85{
Sasha Levinb67bfe02013-02-27 17:06:00 -080086 struct hlist_node *tmp;
David S. Millerf034b5d2006-08-24 03:08:07 -070087 struct xfrm_state *x;
88
Sasha Levinb67bfe02013-02-27 17:06:00 -080089 hlist_for_each_entry_safe(x, tmp, list, bydst) {
David S. Millerf034b5d2006-08-24 03:08:07 -070090 unsigned int h;
91
David S. Millerc1969f22006-08-24 04:00:03 -070092 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
93 x->props.reqid, x->props.family,
94 nhashmask);
Florian Westphalae3fb6d2016-08-09 12:16:04 +020095 hlist_add_head_rcu(&x->bydst, ndsttable + h);
David S. Millerf034b5d2006-08-24 03:08:07 -070096
Masahide NAKAMURA667bbcb2006-10-03 15:56:09 -070097 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
98 x->props.family,
David S. Millerf034b5d2006-08-24 03:08:07 -070099 nhashmask);
Florian Westphalae3fb6d2016-08-09 12:16:04 +0200100 hlist_add_head_rcu(&x->bysrc, nsrctable + h);
David S. Millerf034b5d2006-08-24 03:08:07 -0700101
Masahide NAKAMURA7b4dc3602006-09-27 22:21:52 -0700102 if (x->id.spi) {
103 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
104 x->id.proto, x->props.family,
105 nhashmask);
Florian Westphalae3fb6d2016-08-09 12:16:04 +0200106 hlist_add_head_rcu(&x->byspi, nspitable + h);
Masahide NAKAMURA7b4dc3602006-09-27 22:21:52 -0700107 }
David S. Millerf034b5d2006-08-24 03:08:07 -0700108 }
109}
110
Alexey Dobriyan63082732008-11-25 17:19:07 -0800111static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
David S. Millerf034b5d2006-08-24 03:08:07 -0700112{
Alexey Dobriyan63082732008-11-25 17:19:07 -0800113 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
David S. Millerf034b5d2006-08-24 03:08:07 -0700114}
115
Alexey Dobriyan63082732008-11-25 17:19:07 -0800116static void xfrm_hash_resize(struct work_struct *work)
David S. Millerf034b5d2006-08-24 03:08:07 -0700117{
Alexey Dobriyan63082732008-11-25 17:19:07 -0800118 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
David S. Millerf034b5d2006-08-24 03:08:07 -0700119 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
120 unsigned long nsize, osize;
121 unsigned int nhashmask, ohashmask;
122 int i;
123
Alexey Dobriyan63082732008-11-25 17:19:07 -0800124 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
David S. Miller44e36b42006-08-24 04:50:50 -0700125 ndst = xfrm_hash_alloc(nsize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700126 if (!ndst)
Ying Xue02447902014-08-29 17:09:07 +0800127 return;
David S. Miller44e36b42006-08-24 04:50:50 -0700128 nsrc = xfrm_hash_alloc(nsize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700129 if (!nsrc) {
David S. Miller44e36b42006-08-24 04:50:50 -0700130 xfrm_hash_free(ndst, nsize);
Ying Xue02447902014-08-29 17:09:07 +0800131 return;
David S. Millerf034b5d2006-08-24 03:08:07 -0700132 }
David S. Miller44e36b42006-08-24 04:50:50 -0700133 nspi = xfrm_hash_alloc(nsize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700134 if (!nspi) {
David S. Miller44e36b42006-08-24 04:50:50 -0700135 xfrm_hash_free(ndst, nsize);
136 xfrm_hash_free(nsrc, nsize);
Ying Xue02447902014-08-29 17:09:07 +0800137 return;
David S. Millerf034b5d2006-08-24 03:08:07 -0700138 }
139
Fan Du283bc9f2013-11-07 17:47:50 +0800140 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Florian Westphalb65e3d72016-08-09 12:16:07 +0200141 write_seqcount_begin(&xfrm_state_hash_generation);
David S. Millerf034b5d2006-08-24 03:08:07 -0700142
143 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
Florian Westphalc8406992016-08-09 12:16:08 +0200144 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
Alexey Dobriyan63082732008-11-25 17:19:07 -0800145 for (i = net->xfrm.state_hmask; i >= 0; i--)
Florian Westphalc8406992016-08-09 12:16:08 +0200146 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
David S. Millerf034b5d2006-08-24 03:08:07 -0700147
Florian Westphalc8406992016-08-09 12:16:08 +0200148 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
149 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
Alexey Dobriyan63082732008-11-25 17:19:07 -0800150 ohashmask = net->xfrm.state_hmask;
David S. Millerf034b5d2006-08-24 03:08:07 -0700151
Florian Westphalc8406992016-08-09 12:16:08 +0200152 rcu_assign_pointer(net->xfrm.state_bydst, ndst);
153 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
154 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
Alexey Dobriyan63082732008-11-25 17:19:07 -0800155 net->xfrm.state_hmask = nhashmask;
David S. Millerf034b5d2006-08-24 03:08:07 -0700156
Florian Westphalb65e3d72016-08-09 12:16:07 +0200157 write_seqcount_end(&xfrm_state_hash_generation);
Fan Du283bc9f2013-11-07 17:47:50 +0800158 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
David S. Millerf034b5d2006-08-24 03:08:07 -0700159
160 osize = (ohashmask + 1) * sizeof(struct hlist_head);
Florian Westphaldf7274e2016-08-09 12:16:06 +0200161
162 synchronize_rcu();
163
David S. Miller44e36b42006-08-24 04:50:50 -0700164 xfrm_hash_free(odst, osize);
165 xfrm_hash_free(osrc, osize);
166 xfrm_hash_free(ospi, osize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700167}
168
Cong Wang44abdc32013-01-16 16:05:05 +0800169static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
170static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172static DEFINE_SPINLOCK(xfrm_state_gc_lock);
173
Jamal Hadi Salim53bc6b4d2006-03-20 19:17:03 -0800174int __xfrm_state_delete(struct xfrm_state *x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -0800176int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
Florian Westphalbb9cd072019-04-17 11:45:13 +0200177static bool km_is_alive(const struct km_event *c);
Eric W. Biederman15e47302012-09-07 20:12:54 +0000178void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Cong Wang7a9885b2013-01-17 16:34:11 +0800180static DEFINE_SPINLOCK(xfrm_type_lock);
Eric Dumazet533cb5b2008-01-30 19:11:50 -0800181int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700182{
Cong Wang7a9885b2013-01-17 16:34:11 +0800183 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Eric Dumazet533cb5b2008-01-30 19:11:50 -0800184 const struct xfrm_type **typemap;
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700185 int err = 0;
186
187 if (unlikely(afinfo == NULL))
188 return -EAFNOSUPPORT;
189 typemap = afinfo->type_map;
Cong Wang7a9885b2013-01-17 16:34:11 +0800190 spin_lock_bh(&xfrm_type_lock);
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700191
192 if (likely(typemap[type->proto] == NULL))
193 typemap[type->proto] = type;
194 else
195 err = -EEXIST;
Cong Wang7a9885b2013-01-17 16:34:11 +0800196 spin_unlock_bh(&xfrm_type_lock);
Florian Westphalaf5d27c2017-01-09 14:20:47 +0100197 rcu_read_unlock();
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700198 return err;
199}
200EXPORT_SYMBOL(xfrm_register_type);
201
Eric Dumazet533cb5b2008-01-30 19:11:50 -0800202int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700203{
Cong Wang7a9885b2013-01-17 16:34:11 +0800204 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Eric Dumazet533cb5b2008-01-30 19:11:50 -0800205 const struct xfrm_type **typemap;
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700206 int err = 0;
207
208 if (unlikely(afinfo == NULL))
209 return -EAFNOSUPPORT;
210 typemap = afinfo->type_map;
Cong Wang7a9885b2013-01-17 16:34:11 +0800211 spin_lock_bh(&xfrm_type_lock);
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700212
213 if (unlikely(typemap[type->proto] != type))
214 err = -ENOENT;
215 else
216 typemap[type->proto] = NULL;
Cong Wang7a9885b2013-01-17 16:34:11 +0800217 spin_unlock_bh(&xfrm_type_lock);
Florian Westphalaf5d27c2017-01-09 14:20:47 +0100218 rcu_read_unlock();
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700219 return err;
220}
221EXPORT_SYMBOL(xfrm_unregister_type);
222
Eric Dumazet533cb5b2008-01-30 19:11:50 -0800223static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700224{
225 struct xfrm_state_afinfo *afinfo;
Eric Dumazet533cb5b2008-01-30 19:11:50 -0800226 const struct xfrm_type **typemap;
227 const struct xfrm_type *type;
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700228 int modload_attempted = 0;
229
230retry:
231 afinfo = xfrm_state_get_afinfo(family);
232 if (unlikely(afinfo == NULL))
233 return NULL;
234 typemap = afinfo->type_map;
235
Florian Westphal75cda622017-01-09 14:20:49 +0100236 type = READ_ONCE(typemap[proto]);
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700237 if (unlikely(type && !try_module_get(type->owner)))
238 type = NULL;
Florian Westphal75cda622017-01-09 14:20:49 +0100239
240 rcu_read_unlock();
241
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700242 if (!type && !modload_attempted) {
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700243 request_module("xfrm-type-%d-%d", family, proto);
244 modload_attempted = 1;
245 goto retry;
246 }
247
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700248 return type;
249}
250
Eric Dumazet533cb5b2008-01-30 19:11:50 -0800251static void xfrm_put_type(const struct xfrm_type *type)
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700252{
253 module_put(type->owner);
254}
255
Steffen Klassert9d389d72017-04-14 10:05:44 +0200256static DEFINE_SPINLOCK(xfrm_type_offload_lock);
257int xfrm_register_type_offload(const struct xfrm_type_offload *type,
258 unsigned short family)
259{
260 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
261 const struct xfrm_type_offload **typemap;
262 int err = 0;
263
264 if (unlikely(afinfo == NULL))
265 return -EAFNOSUPPORT;
266 typemap = afinfo->type_offload_map;
267 spin_lock_bh(&xfrm_type_offload_lock);
268
269 if (likely(typemap[type->proto] == NULL))
270 typemap[type->proto] = type;
271 else
272 err = -EEXIST;
273 spin_unlock_bh(&xfrm_type_offload_lock);
274 rcu_read_unlock();
275 return err;
276}
277EXPORT_SYMBOL(xfrm_register_type_offload);
278
279int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
280 unsigned short family)
281{
282 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
283 const struct xfrm_type_offload **typemap;
284 int err = 0;
285
286 if (unlikely(afinfo == NULL))
287 return -EAFNOSUPPORT;
288 typemap = afinfo->type_offload_map;
289 spin_lock_bh(&xfrm_type_offload_lock);
290
291 if (unlikely(typemap[type->proto] != type))
292 err = -ENOENT;
293 else
294 typemap[type->proto] = NULL;
295 spin_unlock_bh(&xfrm_type_offload_lock);
296 rcu_read_unlock();
297 return err;
298}
299EXPORT_SYMBOL(xfrm_unregister_type_offload);
300
Ilan Tayariffdb5212017-08-01 12:49:08 +0300301static const struct xfrm_type_offload *
302xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
Steffen Klassert9d389d72017-04-14 10:05:44 +0200303{
304 struct xfrm_state_afinfo *afinfo;
305 const struct xfrm_type_offload **typemap;
306 const struct xfrm_type_offload *type;
307
Ilan Tayariffdb5212017-08-01 12:49:08 +0300308retry:
Steffen Klassert9d389d72017-04-14 10:05:44 +0200309 afinfo = xfrm_state_get_afinfo(family);
310 if (unlikely(afinfo == NULL))
311 return NULL;
312 typemap = afinfo->type_offload_map;
313
314 type = typemap[proto];
315 if ((type && !try_module_get(type->owner)))
316 type = NULL;
317
Sabrina Dubroca2f10a612017-12-31 16:18:56 +0100318 rcu_read_unlock();
319
Ilan Tayariffdb5212017-08-01 12:49:08 +0300320 if (!type && try_load) {
321 request_module("xfrm-offload-%d-%d", family, proto);
Gustavo A. R. Silva545d8ae2018-01-22 16:34:09 -0600322 try_load = false;
Ilan Tayariffdb5212017-08-01 12:49:08 +0300323 goto retry;
324 }
325
Steffen Klassert9d389d72017-04-14 10:05:44 +0200326 return type;
327}
328
329static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
330{
331 module_put(type->owner);
332}
333
Florian Westphal4c145dc2019-03-29 21:16:31 +0100334static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
335 [XFRM_MODE_BEET] = {
336 .encap = XFRM_MODE_BEET,
337 .flags = XFRM_MODE_FLAG_TUNNEL,
338 .family = AF_INET,
339 },
340 [XFRM_MODE_TRANSPORT] = {
341 .encap = XFRM_MODE_TRANSPORT,
342 .family = AF_INET,
343 },
344 [XFRM_MODE_TUNNEL] = {
345 .encap = XFRM_MODE_TUNNEL,
346 .flags = XFRM_MODE_FLAG_TUNNEL,
347 .family = AF_INET,
348 },
349};
350
351static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
352 [XFRM_MODE_BEET] = {
353 .encap = XFRM_MODE_BEET,
354 .flags = XFRM_MODE_FLAG_TUNNEL,
355 .family = AF_INET6,
356 },
357 [XFRM_MODE_ROUTEOPTIMIZATION] = {
358 .encap = XFRM_MODE_ROUTEOPTIMIZATION,
359 .family = AF_INET6,
360 },
361 [XFRM_MODE_TRANSPORT] = {
362 .encap = XFRM_MODE_TRANSPORT,
363 .family = AF_INET6,
364 },
365 [XFRM_MODE_TUNNEL] = {
366 .encap = XFRM_MODE_TUNNEL,
367 .flags = XFRM_MODE_FLAG_TUNNEL,
368 .family = AF_INET6,
369 },
370};
371
372static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700373{
Florian Westphal4c145dc2019-03-29 21:16:31 +0100374 const struct xfrm_mode *mode;
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700375
376 if (unlikely(encap >= XFRM_MODE_MAX))
377 return NULL;
378
Florian Westphal4c145dc2019-03-29 21:16:31 +0100379 switch (family) {
380 case AF_INET:
381 mode = &xfrm4_mode_map[encap];
382 if (mode->family == family)
383 return mode;
384 break;
385 case AF_INET6:
386 mode = &xfrm6_mode_map[encap];
387 if (mode->family == family)
388 return mode;
389 break;
390 default:
391 break;
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700392 }
393
Florian Westphal4c145dc2019-03-29 21:16:31 +0100394 return NULL;
Herbert Xuaa5d62c2007-10-17 21:31:12 -0700395}
396
Mathias Krause4a135e52018-11-21 21:09:23 +0100397void xfrm_state_free(struct xfrm_state *x)
398{
399 kmem_cache_free(xfrm_state_cache, x);
400}
401EXPORT_SYMBOL(xfrm_state_free);
402
Cong Wangf75a2802019-01-31 13:05:49 -0800403static void ___xfrm_state_destroy(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
Thomas Gleixner671422b2019-03-01 23:48:20 +0100405 hrtimer_cancel(&x->mtimer);
David S. Millera47f0ce2006-08-24 03:54:22 -0700406 del_timer_sync(&x->rtimer);
Ilan Tayarib5884792016-09-18 07:42:53 +0000407 kfree(x->aead);
Jesper Juhla51482b2005-11-08 09:41:34 -0800408 kfree(x->aalg);
409 kfree(x->ealg);
410 kfree(x->calg);
411 kfree(x->encap);
Noriaki TAKAMIYA060f02a2006-08-23 18:18:55 -0700412 kfree(x->coaddr);
Steffen Klassertd8647b72011-03-08 00:10:27 +0000413 kfree(x->replay_esn);
414 kfree(x->preplay_esn);
Steffen Klassert9d389d72017-04-14 10:05:44 +0200415 if (x->type_offload)
416 xfrm_put_type_offload(x->type_offload);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 if (x->type) {
418 x->type->destructor(x);
419 xfrm_put_type(x->type);
420 }
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200421 xfrm_dev_state_free(x);
Trent Jaegerdf718372005-12-13 23:12:27 -0800422 security_xfrm_state_free(x);
Mathias Krause4a135e52018-11-21 21:09:23 +0100423 xfrm_state_free(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424}
425
Alexey Dobriyanc7837142008-11-25 17:20:36 -0800426static void xfrm_state_gc_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427{
Herbert Xu12a169e2008-10-01 07:03:24 -0700428 struct xfrm_state *x;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800429 struct hlist_node *tmp;
Herbert Xu12a169e2008-10-01 07:03:24 -0700430 struct hlist_head gc_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 spin_lock_bh(&xfrm_state_gc_lock);
Florian Westphal35db57bb2016-08-23 16:00:12 +0200433 hlist_move_list(&xfrm_state_gc_list, &gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 spin_unlock_bh(&xfrm_state_gc_lock);
435
Florian Westphaldf7274e2016-08-09 12:16:06 +0200436 synchronize_rcu();
437
Sasha Levinb67bfe02013-02-27 17:06:00 -0800438 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
Cong Wangf75a2802019-01-31 13:05:49 -0800439 ___xfrm_state_destroy(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440}
441
Weilong Chen3e94c2d2013-12-24 09:43:47 +0800442static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
Thomas Gleixner671422b2019-03-01 23:48:20 +0100444 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
445 enum hrtimer_restart ret = HRTIMER_NORESTART;
Arnd Bergmann386c5682018-07-11 12:19:13 +0200446 time64_t now = ktime_get_real_seconds();
447 time64_t next = TIME64_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 int warn = 0;
Joy Latten161a09e2006-11-27 13:11:54 -0600449 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
451 spin_lock(&x->lock);
452 if (x->km.state == XFRM_STATE_DEAD)
453 goto out;
454 if (x->km.state == XFRM_STATE_EXPIRED)
455 goto expired;
456 if (x->lft.hard_add_expires_seconds) {
457 long tmo = x->lft.hard_add_expires_seconds +
458 x->curlft.add_time - now;
Fan Due3c0d042012-07-30 21:43:54 +0000459 if (tmo <= 0) {
460 if (x->xflags & XFRM_SOFT_EXPIRE) {
461 /* enter hard expire without soft expire first?!
462 * setting a new date could trigger this.
Alexander Alemayhu1365e547c2017-01-03 17:13:20 +0100463 * workaround: fix x->curflt.add_time by below:
Fan Due3c0d042012-07-30 21:43:54 +0000464 */
465 x->curlft.add_time = now - x->saved_tmo - 1;
466 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
467 } else
468 goto expired;
469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 if (tmo < next)
471 next = tmo;
472 }
473 if (x->lft.hard_use_expires_seconds) {
474 long tmo = x->lft.hard_use_expires_seconds +
475 (x->curlft.use_time ? : now) - now;
476 if (tmo <= 0)
477 goto expired;
478 if (tmo < next)
479 next = tmo;
480 }
481 if (x->km.dying)
482 goto resched;
483 if (x->lft.soft_add_expires_seconds) {
484 long tmo = x->lft.soft_add_expires_seconds +
485 x->curlft.add_time - now;
Fan Due3c0d042012-07-30 21:43:54 +0000486 if (tmo <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 warn = 1;
Fan Due3c0d042012-07-30 21:43:54 +0000488 x->xflags &= ~XFRM_SOFT_EXPIRE;
489 } else if (tmo < next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 next = tmo;
Fan Due3c0d042012-07-30 21:43:54 +0000491 x->xflags |= XFRM_SOFT_EXPIRE;
492 x->saved_tmo = tmo;
493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 }
495 if (x->lft.soft_use_expires_seconds) {
496 long tmo = x->lft.soft_use_expires_seconds +
497 (x->curlft.use_time ? : now) - now;
498 if (tmo <= 0)
499 warn = 1;
500 else if (tmo < next)
501 next = tmo;
502 }
503
Herbert Xu4666faa2005-06-18 22:43:22 -0700504 x->km.dying = warn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (warn)
Jamal Hadi Salim53bc6b4d2006-03-20 19:17:03 -0800506 km_state_expired(x, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507resched:
Arnd Bergmann386c5682018-07-11 12:19:13 +0200508 if (next != TIME64_MAX) {
Thomas Gleixner671422b2019-03-01 23:48:20 +0100509 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
510 ret = HRTIMER_RESTART;
Yury Polyanskiy9e0d57f2009-11-08 20:58:41 -0800511 }
David S. Millera47f0ce2006-08-24 03:54:22 -0700512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 goto out;
514
515expired:
Steffen Klassert5b8ef342013-08-27 13:43:30 +0200516 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 x->km.state = XFRM_STATE_EXPIRED;
Joy Latten161a09e2006-11-27 13:11:54 -0600518
519 err = __xfrm_state_delete(x);
Nicolas Dichtel0806ae42013-08-23 15:46:08 +0200520 if (!err)
Jamal Hadi Salim53bc6b4d2006-03-20 19:17:03 -0800521 km_state_expired(x, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Tetsuo Handa2e710292014-04-22 21:48:30 +0900523 xfrm_audit_state_delete(x, err ? 0 : 1, true);
Joy Latten161a09e2006-11-27 13:11:54 -0600524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525out:
526 spin_unlock(&x->lock);
Thomas Gleixner671422b2019-03-01 23:48:20 +0100527 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528}
529
Kees Cooke99e88a2017-10-16 14:43:17 -0700530static void xfrm_replay_timer_handler(struct timer_list *t);
David S. Miller0ac84752006-03-20 19:18:23 -0800531
Alexey Dobriyan673c09b2008-11-25 17:15:16 -0800532struct xfrm_state *xfrm_state_alloc(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533{
534 struct xfrm_state *x;
535
Mathias Krause565f0fa2018-05-03 10:55:07 +0200536 x = kmem_cache_alloc(xfrm_state_cache, GFP_ATOMIC | __GFP_ZERO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
538 if (x) {
Alexey Dobriyan673c09b2008-11-25 17:15:16 -0800539 write_pnet(&x->xs_net, net);
Reshetova, Elena88755e9c2017-07-04 15:53:21 +0300540 refcount_set(&x->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 atomic_set(&x->tunnel_users, 0);
Herbert Xu12a169e2008-10-01 07:03:24 -0700542 INIT_LIST_HEAD(&x->km.all);
David S. Miller8f126e32006-08-24 02:45:07 -0700543 INIT_HLIST_NODE(&x->bydst);
544 INIT_HLIST_NODE(&x->bysrc);
545 INIT_HLIST_NODE(&x->byspi);
Thomas Gleixner671422b2019-03-01 23:48:20 +0100546 hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
547 x->mtimer.function = xfrm_timer_handler;
Kees Cooke99e88a2017-10-16 14:43:17 -0700548 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
Arnd Bergmann386c5682018-07-11 12:19:13 +0200549 x->curlft.add_time = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 x->lft.soft_byte_limit = XFRM_INF;
551 x->lft.soft_packet_limit = XFRM_INF;
552 x->lft.hard_byte_limit = XFRM_INF;
553 x->lft.hard_packet_limit = XFRM_INF;
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800554 x->replay_maxage = 0;
555 x->replay_maxdiff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 spin_lock_init(&x->lock);
557 }
558 return x;
559}
560EXPORT_SYMBOL(xfrm_state_alloc);
561
Cong Wangf75a2802019-01-31 13:05:49 -0800562void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700564 WARN_ON(x->km.state != XFRM_STATE_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Cong Wangf75a2802019-01-31 13:05:49 -0800566 if (sync) {
567 synchronize_rcu();
568 ___xfrm_state_destroy(x);
569 } else {
570 spin_lock_bh(&xfrm_state_gc_lock);
571 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
572 spin_unlock_bh(&xfrm_state_gc_lock);
573 schedule_work(&xfrm_state_gc_work);
574 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575}
576EXPORT_SYMBOL(__xfrm_state_destroy);
577
Jamal Hadi Salim53bc6b4d2006-03-20 19:17:03 -0800578int __xfrm_state_delete(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
Alexey Dobriyan98806f72008-11-25 17:29:47 -0800580 struct net *net = xs_net(x);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700581 int err = -ESRCH;
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 if (x->km.state != XFRM_STATE_DEAD) {
584 x->km.state = XFRM_STATE_DEAD;
Fan Du283bc9f2013-11-07 17:47:50 +0800585 spin_lock(&net->xfrm.xfrm_state_lock);
Herbert Xu12a169e2008-10-01 07:03:24 -0700586 list_del(&x->km.all);
Florian Westphalae3fb6d2016-08-09 12:16:04 +0200587 hlist_del_rcu(&x->bydst);
588 hlist_del_rcu(&x->bysrc);
David S. Millera47f0ce2006-08-24 03:54:22 -0700589 if (x->id.spi)
Florian Westphalae3fb6d2016-08-09 12:16:04 +0200590 hlist_del_rcu(&x->byspi);
Alexey Dobriyan98806f72008-11-25 17:29:47 -0800591 net->xfrm.state_num--;
Fan Du283bc9f2013-11-07 17:47:50 +0800592 spin_unlock(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200594 xfrm_dev_state_delete(x);
595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 /* All xfrm_state objects are created by xfrm_state_alloc.
597 * The xfrm_state_alloc call gives a reference, and that
598 * is what we are dropping here.
599 */
Patrick McHardy5dba4792007-11-27 11:10:07 +0800600 xfrm_state_put(x);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700601 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 }
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700603
604 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605}
Jamal Hadi Salim53bc6b4d2006-03-20 19:17:03 -0800606EXPORT_SYMBOL(__xfrm_state_delete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700608int xfrm_state_delete(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700610 int err;
611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 spin_lock_bh(&x->lock);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700613 err = __xfrm_state_delete(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 spin_unlock_bh(&x->lock);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700615
616 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618EXPORT_SYMBOL(xfrm_state_delete);
619
Joy Latten4aa2e622007-06-04 19:05:57 -0400620#ifdef CONFIG_SECURITY_NETWORK_XFRM
621static inline int
Tetsuo Handa2e710292014-04-22 21:48:30 +0900622xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
Joy Latten4aa2e622007-06-04 19:05:57 -0400624 int i, err = 0;
625
Alexey Dobriyan0e602452008-11-25 17:30:18 -0800626 for (i = 0; i <= net->xfrm.state_hmask; i++) {
Joy Latten4aa2e622007-06-04 19:05:57 -0400627 struct xfrm_state *x;
628
Sasha Levinb67bfe02013-02-27 17:06:00 -0800629 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
Joy Latten4aa2e622007-06-04 19:05:57 -0400630 if (xfrm_id_proto_match(x->id.proto, proto) &&
631 (err = security_xfrm_state_delete(x)) != 0) {
Tetsuo Handa2e710292014-04-22 21:48:30 +0900632 xfrm_audit_state_delete(x, 0, task_valid);
Joy Latten4aa2e622007-06-04 19:05:57 -0400633 return err;
634 }
635 }
636 }
637
638 return err;
639}
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200640
641static inline int
642xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
643{
644 int i, err = 0;
645
646 for (i = 0; i <= net->xfrm.state_hmask; i++) {
647 struct xfrm_state *x;
648 struct xfrm_state_offload *xso;
649
650 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
651 xso = &x->xso;
652
653 if (xso->dev == dev &&
654 (err = security_xfrm_state_delete(x)) != 0) {
655 xfrm_audit_state_delete(x, 0, task_valid);
656 return err;
657 }
658 }
659 }
660
661 return err;
662}
Joy Latten4aa2e622007-06-04 19:05:57 -0400663#else
664static inline int
Tetsuo Handa2e710292014-04-22 21:48:30 +0900665xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
Joy Latten4aa2e622007-06-04 19:05:57 -0400666{
667 return 0;
668}
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200669
670static inline int
671xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
672{
673 return 0;
674}
Joy Latten4aa2e622007-06-04 19:05:57 -0400675#endif
676
Cong Wangf75a2802019-01-31 13:05:49 -0800677int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
Joy Latten4aa2e622007-06-04 19:05:57 -0400678{
Jamal Hadi Salim9e64cc92010-02-19 02:00:41 +0000679 int i, err = 0, cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Fan Du283bc9f2013-11-07 17:47:50 +0800681 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Tetsuo Handa2e710292014-04-22 21:48:30 +0900682 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
Joy Latten4aa2e622007-06-04 19:05:57 -0400683 if (err)
684 goto out;
685
Jamal Hadi Salim9e64cc92010-02-19 02:00:41 +0000686 err = -ESRCH;
Alexey Dobriyan0e602452008-11-25 17:30:18 -0800687 for (i = 0; i <= net->xfrm.state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -0700688 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689restart:
Sasha Levinb67bfe02013-02-27 17:06:00 -0800690 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 if (!xfrm_state_kern(x) &&
Masahide NAKAMURA57947082006-09-22 15:06:24 -0700692 xfrm_id_proto_match(x->id.proto, proto)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 xfrm_state_hold(x);
Fan Du283bc9f2013-11-07 17:47:50 +0800694 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Joy Latten161a09e2006-11-27 13:11:54 -0600696 err = xfrm_state_delete(x);
Joy Lattenab5f5e82007-09-17 11:51:22 -0700697 xfrm_audit_state_delete(x, err ? 0 : 1,
Tetsuo Handa2e710292014-04-22 21:48:30 +0900698 task_valid);
Cong Wangf75a2802019-01-31 13:05:49 -0800699 if (sync)
700 xfrm_state_put_sync(x);
701 else
702 xfrm_state_put(x);
Jamal Hadi Salim9e64cc92010-02-19 02:00:41 +0000703 if (!err)
704 cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
Fan Du283bc9f2013-11-07 17:47:50 +0800706 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 goto restart;
708 }
709 }
710 }
Artem Savkovdd269db2017-09-27 14:25:37 +0200711out:
712 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Florian Westphale4db5b62018-06-25 17:26:02 +0200713 if (cnt)
Jamal Hadi Salim9e64cc92010-02-19 02:00:41 +0000714 err = 0;
Florian Westphale4db5b62018-06-25 17:26:02 +0200715
Joy Latten4aa2e622007-06-04 19:05:57 -0400716 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718EXPORT_SYMBOL(xfrm_state_flush);
719
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200720int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
721{
722 int i, err = 0, cnt = 0;
723
724 spin_lock_bh(&net->xfrm.xfrm_state_lock);
725 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
726 if (err)
727 goto out;
728
729 err = -ESRCH;
730 for (i = 0; i <= net->xfrm.state_hmask; i++) {
731 struct xfrm_state *x;
732 struct xfrm_state_offload *xso;
733restart:
734 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
735 xso = &x->xso;
736
737 if (!xfrm_state_kern(x) && xso->dev == dev) {
738 xfrm_state_hold(x);
739 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
740
741 err = xfrm_state_delete(x);
742 xfrm_audit_state_delete(x, err ? 0 : 1,
743 task_valid);
744 xfrm_state_put(x);
745 if (!err)
746 cnt++;
747
748 spin_lock_bh(&net->xfrm.xfrm_state_lock);
749 goto restart;
750 }
751 }
752 }
753 if (cnt)
754 err = 0;
755
756out:
757 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
758 return err;
759}
760EXPORT_SYMBOL(xfrm_dev_state_flush);
761
Alexey Dobriyane0710412010-01-23 13:37:10 +0000762void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
Jamal Hadi Salim28d89092007-04-26 00:10:29 -0700763{
Fan Du283bc9f2013-11-07 17:47:50 +0800764 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Alexey Dobriyane0710412010-01-23 13:37:10 +0000765 si->sadcnt = net->xfrm.state_num;
Benjamin Poirierca92e172018-11-05 17:00:53 +0900766 si->sadhcnt = net->xfrm.state_hmask + 1;
Jamal Hadi Salim28d89092007-04-26 00:10:29 -0700767 si->sadhmcnt = xfrm_state_hashmax;
Fan Du283bc9f2013-11-07 17:47:50 +0800768 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Jamal Hadi Salim28d89092007-04-26 00:10:29 -0700769}
770EXPORT_SYMBOL(xfrm_sad_getinfo);
771
Florian Westphal711059b2017-01-09 14:20:48 +0100772static void
Florian Westphalbac95932019-05-03 17:46:14 +0200773__xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
774{
775 const struct flowi4 *fl4 = &fl->u.ip4;
776
777 sel->daddr.a4 = fl4->daddr;
778 sel->saddr.a4 = fl4->saddr;
779 sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
780 sel->dport_mask = htons(0xffff);
781 sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
782 sel->sport_mask = htons(0xffff);
783 sel->family = AF_INET;
784 sel->prefixlen_d = 32;
785 sel->prefixlen_s = 32;
786 sel->proto = fl4->flowi4_proto;
787 sel->ifindex = fl4->flowi4_oif;
788}
789
790static void
791__xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
792{
793 const struct flowi6 *fl6 = &fl->u.ip6;
794
795 /* Initialize temporary selector matching only to current session. */
796 *(struct in6_addr *)&sel->daddr = fl6->daddr;
797 *(struct in6_addr *)&sel->saddr = fl6->saddr;
798 sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
799 sel->dport_mask = htons(0xffff);
800 sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
801 sel->sport_mask = htons(0xffff);
802 sel->family = AF_INET6;
803 sel->prefixlen_d = 128;
804 sel->prefixlen_s = 128;
805 sel->proto = fl6->flowi6_proto;
806 sel->ifindex = fl6->flowi6_oif;
807}
808
809static void
David S. Miller1a898592011-02-22 18:22:34 -0800810xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
David S. Miller04686012011-02-24 01:50:12 -0500811 const struct xfrm_tmpl *tmpl,
David S. Miller33765d02011-02-24 01:55:45 -0500812 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
Thomas Egerer8444cf72010-09-20 11:11:38 -0700813 unsigned short family)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814{
Florian Westphalbac95932019-05-03 17:46:14 +0200815 switch (family) {
816 case AF_INET:
817 __xfrm4_init_tempsel(&x->sel, fl);
818 break;
819 case AF_INET6:
820 __xfrm6_init_tempsel(&x->sel, fl);
821 break;
822 }
823
Florian Westphal5c1b9ab2019-05-03 17:46:15 +0200824 x->id = tmpl->id;
Florian Westphalbac95932019-05-03 17:46:14 +0200825
Florian Westphal5c1b9ab2019-05-03 17:46:15 +0200826 switch (tmpl->encap_family) {
827 case AF_INET:
828 if (x->id.daddr.a4 == 0)
829 x->id.daddr.a4 = daddr->a4;
830 x->props.saddr = tmpl->saddr;
831 if (x->props.saddr.a4 == 0)
832 x->props.saddr.a4 = saddr->a4;
833 break;
834 case AF_INET6:
835 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
836 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
837 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
838 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
839 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
840 break;
841 }
Florian Westphal3819a352017-01-13 14:55:14 +0100842
Florian Westphal5c1b9ab2019-05-03 17:46:15 +0200843 x->props.mode = tmpl->mode;
844 x->props.reqid = tmpl->reqid;
845 x->props.family = tmpl->encap_family;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846}
847
David S. Miller9aa60082011-02-24 01:51:36 -0500848static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
849 const xfrm_address_t *daddr,
850 __be32 spi, u8 proto,
851 unsigned short family)
David S. Milleredcd5822006-08-24 00:42:45 -0700852{
Alexey Dobriyan221df1e2008-11-25 17:30:50 -0800853 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
David S. Milleredcd5822006-08-24 00:42:45 -0700854 struct xfrm_state *x;
855
Florian Westphalae3fb6d2016-08-09 12:16:04 +0200856 hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
David S. Milleredcd5822006-08-24 00:42:45 -0700857 if (x->props.family != family ||
858 x->id.spi != spi ||
Wei Yongjun18025712009-06-28 18:42:53 +0000859 x->id.proto != proto ||
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +0000860 !xfrm_addr_equal(&x->id.daddr, daddr, family))
David S. Milleredcd5822006-08-24 00:42:45 -0700861 continue;
862
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +0000863 if ((mark & x->mark.m) != x->mark.v)
864 continue;
Florian Westphal02efdff2016-08-09 12:16:05 +0200865 if (!xfrm_state_hold_rcu(x))
866 continue;
David S. Milleredcd5822006-08-24 00:42:45 -0700867 return x;
868 }
869
870 return NULL;
871}
872
David S. Miller9aa60082011-02-24 01:51:36 -0500873static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
874 const xfrm_address_t *daddr,
875 const xfrm_address_t *saddr,
876 u8 proto, unsigned short family)
David S. Milleredcd5822006-08-24 00:42:45 -0700877{
Alexey Dobriyan221df1e2008-11-25 17:30:50 -0800878 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
David S. Milleredcd5822006-08-24 00:42:45 -0700879 struct xfrm_state *x;
880
Florian Westphalae3fb6d2016-08-09 12:16:04 +0200881 hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
David S. Milleredcd5822006-08-24 00:42:45 -0700882 if (x->props.family != family ||
Wei Yongjun18025712009-06-28 18:42:53 +0000883 x->id.proto != proto ||
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +0000884 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
885 !xfrm_addr_equal(&x->props.saddr, saddr, family))
David S. Milleredcd5822006-08-24 00:42:45 -0700886 continue;
887
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +0000888 if ((mark & x->mark.m) != x->mark.v)
889 continue;
Florian Westphal02efdff2016-08-09 12:16:05 +0200890 if (!xfrm_state_hold_rcu(x))
891 continue;
David S. Milleredcd5822006-08-24 00:42:45 -0700892 return x;
893 }
894
895 return NULL;
896}
897
898static inline struct xfrm_state *
899__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
900{
Alexey Dobriyan221df1e2008-11-25 17:30:50 -0800901 struct net *net = xs_net(x);
Jamal Hadi Salimbd557752010-02-22 16:20:22 -0800902 u32 mark = x->mark.v & x->mark.m;
Alexey Dobriyan221df1e2008-11-25 17:30:50 -0800903
David S. Milleredcd5822006-08-24 00:42:45 -0700904 if (use_spi)
Jamal Hadi Salimbd557752010-02-22 16:20:22 -0800905 return __xfrm_state_lookup(net, mark, &x->id.daddr,
906 x->id.spi, x->id.proto, family);
David S. Milleredcd5822006-08-24 00:42:45 -0700907 else
Jamal Hadi Salimbd557752010-02-22 16:20:22 -0800908 return __xfrm_state_lookup_byaddr(net, mark,
909 &x->id.daddr,
David S. Milleredcd5822006-08-24 00:42:45 -0700910 &x->props.saddr,
911 x->id.proto, family);
912}
913
Alexey Dobriyan98806f72008-11-25 17:29:47 -0800914static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
Patrick McHardy2fab22f2006-10-24 15:34:00 -0700915{
916 if (have_hash_collision &&
Alexey Dobriyan98806f72008-11-25 17:29:47 -0800917 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
918 net->xfrm.state_num > net->xfrm.state_hmask)
919 schedule_work(&net->xfrm.state_hash_work);
Patrick McHardy2fab22f2006-10-24 15:34:00 -0700920}
921
David S. Miller08ec9af2009-03-13 14:22:40 -0700922static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
David S. Miller4a08ab02011-02-22 18:21:31 -0800923 const struct flowi *fl, unsigned short family,
David S. Miller08ec9af2009-03-13 14:22:40 -0700924 struct xfrm_state **best, int *acq_in_progress,
925 int *error)
926{
927 /* Resolution logic:
928 * 1. There is a valid state with matching selector. Done.
929 * 2. Valid state with inappropriate selector. Skip.
930 *
931 * Entering area of "sysdeps".
932 *
933 * 3. If state is not valid, selector is temporary, it selects
934 * only session which triggered previous resolution. Key
935 * manager will do something to install a state with proper
936 * selector.
937 */
938 if (x->km.state == XFRM_STATE_VALID) {
939 if ((x->sel.family &&
940 !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
941 !security_xfrm_state_pol_flow_match(x, pol, fl))
942 return;
943
944 if (!*best ||
945 (*best)->km.dying > x->km.dying ||
946 ((*best)->km.dying == x->km.dying &&
947 (*best)->curlft.add_time < x->curlft.add_time))
948 *best = x;
949 } else if (x->km.state == XFRM_STATE_ACQ) {
950 *acq_in_progress = 1;
951 } else if (x->km.state == XFRM_STATE_ERROR ||
952 x->km.state == XFRM_STATE_EXPIRED) {
953 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
954 security_xfrm_state_pol_flow_match(x, pol, fl))
955 *error = -ESRCH;
956 }
957}
958
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959struct xfrm_state *
David S. Miller33765d02011-02-24 01:55:45 -0500960xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
David S. Millerb520e9f2011-02-22 18:24:19 -0800961 const struct flowi *fl, struct xfrm_tmpl *tmpl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 struct xfrm_policy *pol, int *err,
Benedict Wongbc56b332018-07-19 10:50:44 -0700963 unsigned short family, u32 if_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
David S. Miller08ec9af2009-03-13 14:22:40 -0700965 static xfrm_address_t saddr_wildcard = { };
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -0800966 struct net *net = xp_net(pol);
Nicolas Dichtel6a783c92009-04-27 02:58:59 -0700967 unsigned int h, h_wildcard;
David S. Miller37b08e32008-09-02 20:14:15 -0700968 struct xfrm_state *x, *x0, *to_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 int acquire_in_progress = 0;
970 int error = 0;
971 struct xfrm_state *best = NULL;
Jamal Hadi Salimbd557752010-02-22 16:20:22 -0800972 u32 mark = pol->mark.v & pol->mark.m;
Thomas Egerer8444cf72010-09-20 11:11:38 -0700973 unsigned short encap_family = tmpl->encap_family;
Florian Westphalb65e3d72016-08-09 12:16:07 +0200974 unsigned int sequence;
Horia Geanta0f245582014-02-12 16:20:06 +0200975 struct km_event c;
YOSHIFUJI Hideakia716c112007-02-09 23:25:29 +0900976
David S. Miller37b08e32008-09-02 20:14:15 -0700977 to_put = NULL;
978
Florian Westphalb65e3d72016-08-09 12:16:07 +0200979 sequence = read_seqcount_begin(&xfrm_state_hash_generation);
980
Florian Westphald737a582016-08-09 12:16:09 +0200981 rcu_read_lock();
Thomas Egerer8444cf72010-09-20 11:11:38 -0700982 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
Florian Westphalae3fb6d2016-08-09 12:16:04 +0200983 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
Thomas Egerer8444cf72010-09-20 11:11:38 -0700984 if (x->props.family == encap_family &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 x->props.reqid == tmpl->reqid &&
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +0000986 (mark & x->mark.m) == x->mark.v &&
Steffen Klassert7e652642018-06-12 14:07:07 +0200987 x->if_id == if_id &&
Masahide NAKAMURAfbd9a5b2006-08-23 18:08:21 -0700988 !(x->props.flags & XFRM_STATE_WILDRECV) &&
Thomas Egerer8444cf72010-09-20 11:11:38 -0700989 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 tmpl->mode == x->props.mode &&
991 tmpl->id.proto == x->id.proto &&
David S. Miller08ec9af2009-03-13 14:22:40 -0700992 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
David S. Miller1f673c52011-02-24 01:53:13 -0500993 xfrm_state_look_at(pol, x, fl, encap_family,
David S. Miller08ec9af2009-03-13 14:22:40 -0700994 &best, &acquire_in_progress, &error);
995 }
Fan Du6f115632013-09-23 17:18:25 +0800996 if (best || acquire_in_progress)
David S. Miller08ec9af2009-03-13 14:22:40 -0700997 goto found;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Thomas Egerer8444cf72010-09-20 11:11:38 -0700999 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001000 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
Thomas Egerer8444cf72010-09-20 11:11:38 -07001001 if (x->props.family == encap_family &&
David S. Miller08ec9af2009-03-13 14:22:40 -07001002 x->props.reqid == tmpl->reqid &&
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +00001003 (mark & x->mark.m) == x->mark.v &&
Steffen Klassert7e652642018-06-12 14:07:07 +02001004 x->if_id == if_id &&
David S. Miller08ec9af2009-03-13 14:22:40 -07001005 !(x->props.flags & XFRM_STATE_WILDRECV) &&
Fan Duf59bbdf2013-09-27 16:32:50 +08001006 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
David S. Miller08ec9af2009-03-13 14:22:40 -07001007 tmpl->mode == x->props.mode &&
1008 tmpl->id.proto == x->id.proto &&
1009 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
David S. Miller1f673c52011-02-24 01:53:13 -05001010 xfrm_state_look_at(pol, x, fl, encap_family,
David S. Miller08ec9af2009-03-13 14:22:40 -07001011 &best, &acquire_in_progress, &error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 }
1013
David S. Miller08ec9af2009-03-13 14:22:40 -07001014found:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 x = best;
1016 if (!x && !error && !acquire_in_progress) {
Patrick McHardy5c5d2812005-04-21 20:12:32 -07001017 if (tmpl->id.spi &&
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001018 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
Thomas Egerer8444cf72010-09-20 11:11:38 -07001019 tmpl->id.proto, encap_family)) != NULL) {
David S. Miller37b08e32008-09-02 20:14:15 -07001020 to_put = x0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 error = -EEXIST;
1022 goto out;
1023 }
Horia Geanta0f245582014-02-12 16:20:06 +02001024
1025 c.net = net;
1026 /* If the KMs have no listeners (yet...), avoid allocating an SA
1027 * for each and every packet - garbage collection might not
1028 * handle the flood.
1029 */
1030 if (!km_is_alive(&c)) {
1031 error = -ESRCH;
1032 goto out;
1033 }
1034
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001035 x = xfrm_state_alloc(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 if (x == NULL) {
1037 error = -ENOMEM;
1038 goto out;
1039 }
Thomas Egerer8444cf72010-09-20 11:11:38 -07001040 /* Initialize temporary state matching only
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 * to current session. */
Thomas Egerer8444cf72010-09-20 11:11:38 -07001042 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001043 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
Steffen Klassert7e652642018-06-12 14:07:07 +02001044 x->if_id = if_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
David S. Miller1d28f422011-03-12 00:29:39 -05001046 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -07001047 if (error) {
1048 x->km.state = XFRM_STATE_DEAD;
David S. Miller37b08e32008-09-02 20:14:15 -07001049 to_put = x;
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -07001050 x = NULL;
1051 goto out;
1052 }
1053
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 if (km_query(x, tmpl, pol) == 0) {
Florian Westphald737a582016-08-09 12:16:09 +02001055 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 x->km.state = XFRM_STATE_ACQ;
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001057 list_add(&x->km.all, &net->xfrm.state_all);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001058 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
Thomas Egerer8444cf72010-09-20 11:11:38 -07001059 h = xfrm_src_hash(net, daddr, saddr, encap_family);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001060 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 if (x->id.spi) {
Thomas Egerer8444cf72010-09-20 11:11:38 -07001062 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001063 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 }
Alexey Dobriyanb27aead2008-11-25 18:00:48 -08001065 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
Thomas Gleixner671422b2019-03-01 23:48:20 +01001066 hrtimer_start(&x->mtimer,
1067 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1068 HRTIMER_MODE_REL_SOFT);
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001069 net->xfrm.state_num++;
1070 xfrm_hash_grow_check(net, x->bydst.next != NULL);
Florian Westphald737a582016-08-09 12:16:09 +02001071 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 } else {
1073 x->km.state = XFRM_STATE_DEAD;
David S. Miller37b08e32008-09-02 20:14:15 -07001074 to_put = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 x = NULL;
1076 error = -ESRCH;
1077 }
1078 }
1079out:
Florian Westphal02efdff2016-08-09 12:16:05 +02001080 if (x) {
1081 if (!xfrm_state_hold_rcu(x)) {
1082 *err = -EAGAIN;
1083 x = NULL;
1084 }
1085 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 *err = acquire_in_progress ? -EAGAIN : error;
Florian Westphal02efdff2016-08-09 12:16:05 +02001087 }
Florian Westphald737a582016-08-09 12:16:09 +02001088 rcu_read_unlock();
David S. Miller37b08e32008-09-02 20:14:15 -07001089 if (to_put)
1090 xfrm_state_put(to_put);
Florian Westphalb65e3d72016-08-09 12:16:07 +02001091
1092 if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
1093 *err = -EAGAIN;
1094 if (x) {
1095 xfrm_state_put(x);
1096 x = NULL;
1097 }
1098 }
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 return x;
1101}
1102
Jamal Hadi Salim628529b2007-07-02 22:41:14 -07001103struct xfrm_state *
Steffen Klassert7e652642018-06-12 14:07:07 +02001104xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001105 xfrm_address_t *daddr, xfrm_address_t *saddr,
Jamal Hadi Salim628529b2007-07-02 22:41:14 -07001106 unsigned short family, u8 mode, u8 proto, u32 reqid)
1107{
Pavel Emelyanov4bda4f22007-12-14 11:38:04 -08001108 unsigned int h;
Jamal Hadi Salim628529b2007-07-02 22:41:14 -07001109 struct xfrm_state *rx = NULL, *x = NULL;
Jamal Hadi Salim628529b2007-07-02 22:41:14 -07001110
Fan Du4ae770b2014-01-03 11:18:29 +08001111 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001112 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001113 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
Jamal Hadi Salim628529b2007-07-02 22:41:14 -07001114 if (x->props.family == family &&
1115 x->props.reqid == reqid &&
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +00001116 (mark & x->mark.m) == x->mark.v &&
Steffen Klassert7e652642018-06-12 14:07:07 +02001117 x->if_id == if_id &&
Jamal Hadi Salim628529b2007-07-02 22:41:14 -07001118 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1119 xfrm_state_addr_check(x, daddr, saddr, family) &&
1120 mode == x->props.mode &&
1121 proto == x->id.proto &&
1122 x->km.state == XFRM_STATE_VALID) {
1123 rx = x;
1124 break;
1125 }
1126 }
1127
1128 if (rx)
1129 xfrm_state_hold(rx);
Fan Du4ae770b2014-01-03 11:18:29 +08001130 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Jamal Hadi Salim628529b2007-07-02 22:41:14 -07001131
1132
1133 return rx;
1134}
1135EXPORT_SYMBOL(xfrm_stateonly_find);
1136
Fan Duc4549972014-01-03 11:18:32 +08001137struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1138 unsigned short family)
1139{
1140 struct xfrm_state *x;
1141 struct xfrm_state_walk *w;
1142
1143 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1144 list_for_each_entry(w, &net->xfrm.state_all, all) {
1145 x = container_of(w, struct xfrm_state, km);
1146 if (x->props.family != family ||
1147 x->id.spi != spi)
1148 continue;
1149
Fan Duc4549972014-01-03 11:18:32 +08001150 xfrm_state_hold(x);
Li RongQingbdddbf62015-04-29 08:42:44 +08001151 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Fan Duc4549972014-01-03 11:18:32 +08001152 return x;
1153 }
1154 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1155 return NULL;
1156}
1157EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159static void __xfrm_state_insert(struct xfrm_state *x)
1160{
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001161 struct net *net = xs_net(x);
David S. Millera624c102006-08-24 03:24:33 -07001162 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001164 list_add(&x->km.all, &net->xfrm.state_all);
Timo Teras4c563f72008-02-28 21:31:08 -08001165
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001166 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
David S. Millerc1969f22006-08-24 04:00:03 -07001167 x->props.reqid, x->props.family);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001168 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001170 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001171 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Masahide NAKAMURA7b4dc3602006-09-27 22:21:52 -07001173 if (x->id.spi) {
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001174 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -07001175 x->props.family);
1176
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001177 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -07001178 }
1179
Thomas Gleixner671422b2019-03-01 23:48:20 +01001180 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
David S. Millera47f0ce2006-08-24 03:54:22 -07001181 if (x->replay_maxage)
1182 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001183
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001184 net->xfrm.state_num++;
David S. Millerf034b5d2006-08-24 03:08:07 -07001185
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001186 xfrm_hash_grow_check(net, x->bydst.next != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187}
1188
Fan Du283bc9f2013-11-07 17:47:50 +08001189/* net->xfrm.xfrm_state_lock is held */
David S. Millerc7f5ea32006-08-24 03:29:04 -07001190static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1191{
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001192 struct net *net = xs_net(xnew);
David S. Millerc7f5ea32006-08-24 03:29:04 -07001193 unsigned short family = xnew->props.family;
1194 u32 reqid = xnew->props.reqid;
1195 struct xfrm_state *x;
David S. Millerc7f5ea32006-08-24 03:29:04 -07001196 unsigned int h;
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +00001197 u32 mark = xnew->mark.v & xnew->mark.m;
Steffen Klassert7e652642018-06-12 14:07:07 +02001198 u32 if_id = xnew->if_id;
David S. Millerc7f5ea32006-08-24 03:29:04 -07001199
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001200 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001201 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
David S. Millerc7f5ea32006-08-24 03:29:04 -07001202 if (x->props.family == family &&
1203 x->props.reqid == reqid &&
Steffen Klassert7e652642018-06-12 14:07:07 +02001204 x->if_id == if_id &&
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +00001205 (mark & x->mark.m) == x->mark.v &&
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +00001206 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1207 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
Herbert Xu34996cb2010-03-31 01:19:49 +00001208 x->genid++;
David S. Millerc7f5ea32006-08-24 03:29:04 -07001209 }
1210}
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212void xfrm_state_insert(struct xfrm_state *x)
1213{
Fan Du283bc9f2013-11-07 17:47:50 +08001214 struct net *net = xs_net(x);
1215
1216 spin_lock_bh(&net->xfrm.xfrm_state_lock);
David S. Millerc7f5ea32006-08-24 03:29:04 -07001217 __xfrm_state_bump_genids(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 __xfrm_state_insert(x);
Fan Du283bc9f2013-11-07 17:47:50 +08001219 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220}
1221EXPORT_SYMBOL(xfrm_state_insert);
1222
Fan Du283bc9f2013-11-07 17:47:50 +08001223/* net->xfrm.xfrm_state_lock is held */
Mathias Krausee473fcb2013-06-26 23:56:58 +02001224static struct xfrm_state *__find_acq_core(struct net *net,
1225 const struct xfrm_mark *m,
David S. Millera70486f2011-02-27 23:17:24 -08001226 unsigned short family, u8 mode,
Steffen Klassert7e652642018-06-12 14:07:07 +02001227 u32 reqid, u32 if_id, u8 proto,
David S. Millera70486f2011-02-27 23:17:24 -08001228 const xfrm_address_t *daddr,
Mathias Krausee473fcb2013-06-26 23:56:58 +02001229 const xfrm_address_t *saddr,
1230 int create)
David S. Miller27708342006-08-24 00:13:10 -07001231{
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001232 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
David S. Miller27708342006-08-24 00:13:10 -07001233 struct xfrm_state *x;
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +00001234 u32 mark = m->v & m->m;
David S. Miller27708342006-08-24 00:13:10 -07001235
Sasha Levinb67bfe02013-02-27 17:06:00 -08001236 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
David S. Miller27708342006-08-24 00:13:10 -07001237 if (x->props.reqid != reqid ||
1238 x->props.mode != mode ||
1239 x->props.family != family ||
1240 x->km.state != XFRM_STATE_ACQ ||
Joy Latten75e252d2007-03-12 17:14:07 -07001241 x->id.spi != 0 ||
Wei Yongjun18025712009-06-28 18:42:53 +00001242 x->id.proto != proto ||
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +00001243 (mark & x->mark.m) != x->mark.v ||
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +00001244 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1245 !xfrm_addr_equal(&x->props.saddr, saddr, family))
David S. Miller27708342006-08-24 00:13:10 -07001246 continue;
1247
David S. Miller27708342006-08-24 00:13:10 -07001248 xfrm_state_hold(x);
1249 return x;
1250 }
1251
1252 if (!create)
1253 return NULL;
1254
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001255 x = xfrm_state_alloc(net);
David S. Miller27708342006-08-24 00:13:10 -07001256 if (likely(x)) {
1257 switch (family) {
1258 case AF_INET:
1259 x->sel.daddr.a4 = daddr->a4;
1260 x->sel.saddr.a4 = saddr->a4;
1261 x->sel.prefixlen_d = 32;
1262 x->sel.prefixlen_s = 32;
1263 x->props.saddr.a4 = saddr->a4;
1264 x->id.daddr.a4 = daddr->a4;
1265 break;
1266
1267 case AF_INET6:
Jiri Benc15e318b2015-03-29 16:59:24 +02001268 x->sel.daddr.in6 = daddr->in6;
1269 x->sel.saddr.in6 = saddr->in6;
David S. Miller27708342006-08-24 00:13:10 -07001270 x->sel.prefixlen_d = 128;
1271 x->sel.prefixlen_s = 128;
Jiri Benc15e318b2015-03-29 16:59:24 +02001272 x->props.saddr.in6 = saddr->in6;
1273 x->id.daddr.in6 = daddr->in6;
David S. Miller27708342006-08-24 00:13:10 -07001274 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001275 }
David S. Miller27708342006-08-24 00:13:10 -07001276
1277 x->km.state = XFRM_STATE_ACQ;
1278 x->id.proto = proto;
1279 x->props.family = family;
1280 x->props.mode = mode;
1281 x->props.reqid = reqid;
Steffen Klassert7e652642018-06-12 14:07:07 +02001282 x->if_id = if_id;
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001283 x->mark.v = m->v;
1284 x->mark.m = m->m;
Alexey Dobriyanb27aead2008-11-25 18:00:48 -08001285 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
David S. Miller27708342006-08-24 00:13:10 -07001286 xfrm_state_hold(x);
Thomas Gleixner671422b2019-03-01 23:48:20 +01001287 hrtimer_start(&x->mtimer,
1288 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1289 HRTIMER_MODE_REL_SOFT);
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001290 list_add(&x->km.all, &net->xfrm.state_all);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001291 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001292 h = xfrm_src_hash(net, daddr, saddr, family);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001293 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
David S. Miller918049f2006-10-12 22:03:24 -07001294
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001295 net->xfrm.state_num++;
David S. Miller918049f2006-10-12 22:03:24 -07001296
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001297 xfrm_hash_grow_check(net, x->bydst.next != NULL);
David S. Miller27708342006-08-24 00:13:10 -07001298 }
1299
1300 return x;
1301}
1302
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001303static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
1305int xfrm_state_add(struct xfrm_state *x)
1306{
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001307 struct net *net = xs_net(x);
David S. Miller37b08e32008-09-02 20:14:15 -07001308 struct xfrm_state *x1, *to_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 int family;
1310 int err;
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001311 u32 mark = x->mark.v & x->mark.m;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001312 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 family = x->props.family;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
David S. Miller37b08e32008-09-02 20:14:15 -07001316 to_put = NULL;
1317
Fan Du283bc9f2013-11-07 17:47:50 +08001318 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
David S. Milleredcd5822006-08-24 00:42:45 -07001320 x1 = __xfrm_state_locate(x, use_spi, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 if (x1) {
David S. Miller37b08e32008-09-02 20:14:15 -07001322 to_put = x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 x1 = NULL;
1324 err = -EEXIST;
1325 goto out;
1326 }
1327
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001328 if (use_spi && x->km.seq) {
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001329 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
Joy Latten75e252d2007-03-12 17:14:07 -07001330 if (x1 && ((x1->id.proto != x->id.proto) ||
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +00001331 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
David S. Miller37b08e32008-09-02 20:14:15 -07001332 to_put = x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 x1 = NULL;
1334 }
1335 }
1336
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001337 if (use_spi && !x1)
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001338 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
Steffen Klassert7e652642018-06-12 14:07:07 +02001339 x->props.reqid, x->if_id, x->id.proto,
David S. Miller27708342006-08-24 00:13:10 -07001340 &x->id.daddr, &x->props.saddr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
David S. Millerc7f5ea32006-08-24 03:29:04 -07001342 __xfrm_state_bump_genids(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 __xfrm_state_insert(x);
1344 err = 0;
1345
1346out:
Fan Du283bc9f2013-11-07 17:47:50 +08001347 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
1349 if (x1) {
1350 xfrm_state_delete(x1);
1351 xfrm_state_put(x1);
1352 }
1353
David S. Miller37b08e32008-09-02 20:14:15 -07001354 if (to_put)
1355 xfrm_state_put(to_put);
1356
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 return err;
1358}
1359EXPORT_SYMBOL(xfrm_state_add);
1360
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001361#ifdef CONFIG_XFRM_MIGRATE
Antony Antony4ab47d42017-06-06 12:12:13 +02001362static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1363 struct xfrm_encap_tmpl *encap)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001364{
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001365 struct net *net = xs_net(orig);
Alexey Dobriyan98806f72008-11-25 17:29:47 -08001366 struct xfrm_state *x = xfrm_state_alloc(net);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001367 if (!x)
Herbert Xu553f9112010-02-15 20:00:51 +00001368 goto out;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001369
1370 memcpy(&x->id, &orig->id, sizeof(x->id));
1371 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1372 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1373 x->props.mode = orig->props.mode;
1374 x->props.replay_window = orig->props.replay_window;
1375 x->props.reqid = orig->props.reqid;
1376 x->props.family = orig->props.family;
1377 x->props.saddr = orig->props.saddr;
1378
1379 if (orig->aalg) {
Martin Willi4447bb32009-11-25 00:29:52 +00001380 x->aalg = xfrm_algo_auth_clone(orig->aalg);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001381 if (!x->aalg)
1382 goto error;
1383 }
1384 x->props.aalgo = orig->props.aalgo;
1385
Steffen Klassertee5c2312014-02-19 13:33:24 +01001386 if (orig->aead) {
1387 x->aead = xfrm_algo_aead_clone(orig->aead);
Antony Antony75bf50f2017-12-07 21:54:27 +01001388 x->geniv = orig->geniv;
Steffen Klassertee5c2312014-02-19 13:33:24 +01001389 if (!x->aead)
1390 goto error;
1391 }
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001392 if (orig->ealg) {
1393 x->ealg = xfrm_algo_clone(orig->ealg);
1394 if (!x->ealg)
1395 goto error;
1396 }
1397 x->props.ealgo = orig->props.ealgo;
1398
1399 if (orig->calg) {
1400 x->calg = xfrm_algo_clone(orig->calg);
1401 if (!x->calg)
1402 goto error;
1403 }
1404 x->props.calgo = orig->props.calgo;
1405
Antony Antony4ab47d42017-06-06 12:12:13 +02001406 if (encap || orig->encap) {
1407 if (encap)
1408 x->encap = kmemdup(encap, sizeof(*x->encap),
1409 GFP_KERNEL);
1410 else
1411 x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1412 GFP_KERNEL);
1413
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001414 if (!x->encap)
1415 goto error;
1416 }
1417
1418 if (orig->coaddr) {
1419 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1420 GFP_KERNEL);
1421 if (!x->coaddr)
1422 goto error;
1423 }
1424
Steffen Klassertaf2f4642011-03-28 19:46:39 +00001425 if (orig->replay_esn) {
Steffen Klassertcc9ab602014-02-19 13:33:24 +01001426 if (xfrm_replay_clone(x, orig))
Steffen Klassertaf2f4642011-03-28 19:46:39 +00001427 goto error;
1428 }
1429
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001430 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1431
Steffen Klassertcc9ab602014-02-19 13:33:24 +01001432 if (xfrm_init_state(x) < 0)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001433 goto error;
1434
1435 x->props.flags = orig->props.flags;
Nicolas Dichtela947b0a2013-02-22 10:54:54 +01001436 x->props.extra_flags = orig->props.extra_flags;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001437
Steffen Klassert7e652642018-06-12 14:07:07 +02001438 x->if_id = orig->if_id;
Steffen Klassertee5c2312014-02-19 13:33:24 +01001439 x->tfcpad = orig->tfcpad;
1440 x->replay_maxdiff = orig->replay_maxdiff;
1441 x->replay_maxage = orig->replay_maxage;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001442 x->curlft.add_time = orig->curlft.add_time;
1443 x->km.state = orig->km.state;
1444 x->km.seq = orig->km.seq;
Antony Antonya486cd22017-05-19 12:47:00 +02001445 x->replay = orig->replay;
1446 x->preplay = orig->preplay;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001447
1448 return x;
1449
1450 error:
Herbert Xu553f9112010-02-15 20:00:51 +00001451 xfrm_state_put(x);
1452out:
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001453 return NULL;
1454}
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001455
Fan Du283bc9f2013-11-07 17:47:50 +08001456struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001457{
1458 unsigned int h;
Steffen Klassert8c0cba22014-02-19 13:33:24 +01001459 struct xfrm_state *x = NULL;
1460
1461 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001462
1463 if (m->reqid) {
Fan Du283bc9f2013-11-07 17:47:50 +08001464 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001465 m->reqid, m->old_family);
Fan Du283bc9f2013-11-07 17:47:50 +08001466 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001467 if (x->props.mode != m->mode ||
1468 x->id.proto != m->proto)
1469 continue;
1470 if (m->reqid && x->props.reqid != m->reqid)
1471 continue;
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +00001472 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1473 m->old_family) ||
1474 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1475 m->old_family))
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001476 continue;
1477 xfrm_state_hold(x);
Steffen Klassert8c0cba22014-02-19 13:33:24 +01001478 break;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001479 }
1480 } else {
Fan Du283bc9f2013-11-07 17:47:50 +08001481 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001482 m->old_family);
Fan Du283bc9f2013-11-07 17:47:50 +08001483 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001484 if (x->props.mode != m->mode ||
1485 x->id.proto != m->proto)
1486 continue;
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +00001487 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1488 m->old_family) ||
1489 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1490 m->old_family))
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001491 continue;
1492 xfrm_state_hold(x);
Steffen Klassert8c0cba22014-02-19 13:33:24 +01001493 break;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001494 }
1495 }
1496
Steffen Klassert8c0cba22014-02-19 13:33:24 +01001497 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1498
1499 return x;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001500}
1501EXPORT_SYMBOL(xfrm_migrate_state_find);
1502
Weilong Chen3e94c2d2013-12-24 09:43:47 +08001503struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
Antony Antony4ab47d42017-06-06 12:12:13 +02001504 struct xfrm_migrate *m,
1505 struct xfrm_encap_tmpl *encap)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001506{
1507 struct xfrm_state *xc;
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001508
Antony Antony4ab47d42017-06-06 12:12:13 +02001509 xc = xfrm_state_clone(x, encap);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001510 if (!xc)
1511 return NULL;
1512
1513 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1514 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1515
1516 /* add state */
YOSHIFUJI Hideaki / 吉藤英明70e94e62013-01-29 12:48:50 +00001517 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001518 /* a care is needed when the destination address of the
1519 state is to be updated as it is a part of triplet */
1520 xfrm_state_insert(xc);
1521 } else {
Steffen Klassertcc9ab602014-02-19 13:33:24 +01001522 if (xfrm_state_add(xc) < 0)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001523 goto error;
1524 }
1525
1526 return xc;
1527error:
Thomas Egerer78347c82010-12-06 23:28:56 +00001528 xfrm_state_put(xc);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08001529 return NULL;
1530}
1531EXPORT_SYMBOL(xfrm_state_migrate);
1532#endif
1533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534int xfrm_state_update(struct xfrm_state *x)
1535{
David S. Miller37b08e32008-09-02 20:14:15 -07001536 struct xfrm_state *x1, *to_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 int err;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001538 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
Fan Du283bc9f2013-11-07 17:47:50 +08001539 struct net *net = xs_net(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
David S. Miller37b08e32008-09-02 20:14:15 -07001541 to_put = NULL;
1542
Fan Du283bc9f2013-11-07 17:47:50 +08001543 spin_lock_bh(&net->xfrm.xfrm_state_lock);
David S. Milleredcd5822006-08-24 00:42:45 -07001544 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
1546 err = -ESRCH;
1547 if (!x1)
1548 goto out;
1549
1550 if (xfrm_state_kern(x1)) {
David S. Miller37b08e32008-09-02 20:14:15 -07001551 to_put = x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 err = -EEXIST;
1553 goto out;
1554 }
1555
1556 if (x1->km.state == XFRM_STATE_ACQ) {
1557 __xfrm_state_insert(x);
1558 x = NULL;
1559 }
1560 err = 0;
1561
1562out:
Fan Du283bc9f2013-11-07 17:47:50 +08001563 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
David S. Miller37b08e32008-09-02 20:14:15 -07001565 if (to_put)
1566 xfrm_state_put(to_put);
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 if (err)
1569 return err;
1570
1571 if (!x) {
1572 xfrm_state_delete(x1);
1573 xfrm_state_put(x1);
1574 return 0;
1575 }
1576
1577 err = -EINVAL;
1578 spin_lock_bh(&x1->lock);
1579 if (likely(x1->km.state == XFRM_STATE_VALID)) {
Herbert Xu257a4b02017-12-26 17:34:44 +11001580 if (x->encap && x1->encap &&
1581 x->encap->encap_type == x1->encap->encap_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
Herbert Xu257a4b02017-12-26 17:34:44 +11001583 else if (x->encap || x1->encap)
1584 goto fail;
1585
Noriaki TAKAMIYA060f02a2006-08-23 18:18:55 -07001586 if (x->coaddr && x1->coaddr) {
1587 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1588 }
1589 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1590 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1592 x1->km.dying = 0;
1593
Thomas Gleixner671422b2019-03-01 23:48:20 +01001594 hrtimer_start(&x1->mtimer, ktime_set(1, 0),
1595 HRTIMER_MODE_REL_SOFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 if (x1->curlft.use_time)
1597 xfrm_state_check_expire(x1);
1598
Nathan Harold5baf4f92018-07-19 19:07:47 -07001599 if (x->props.smark.m || x->props.smark.v || x->if_id) {
Nathan Harold6d8e85f2018-06-29 15:07:10 -07001600 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1601
Nathan Harold5baf4f92018-07-19 19:07:47 -07001602 if (x->props.smark.m || x->props.smark.v)
1603 x1->props.smark = x->props.smark;
1604
1605 if (x->if_id)
1606 x1->if_id = x->if_id;
Nathan Harold6d8e85f2018-06-29 15:07:10 -07001607
1608 __xfrm_state_bump_genids(x1);
1609 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1610 }
1611
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 err = 0;
Tushar Gohad8fcbc632011-07-07 15:38:52 +00001613 x->km.state = XFRM_STATE_DEAD;
1614 __xfrm_state_put(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 }
Herbert Xu257a4b02017-12-26 17:34:44 +11001616
1617fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 spin_unlock_bh(&x1->lock);
1619
1620 xfrm_state_put(x1);
1621
1622 return err;
1623}
1624EXPORT_SYMBOL(xfrm_state_update);
1625
1626int xfrm_state_check_expire(struct xfrm_state *x)
1627{
1628 if (!x->curlft.use_time)
Arnd Bergmann386c5682018-07-11 12:19:13 +02001629 x->curlft.use_time = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1632 x->curlft.packets >= x->lft.hard_packet_limit) {
Herbert Xu4666faa2005-06-18 22:43:22 -07001633 x->km.state = XFRM_STATE_EXPIRED;
Thomas Gleixner671422b2019-03-01 23:48:20 +01001634 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 return -EINVAL;
1636 }
1637
1638 if (!x->km.dying &&
1639 (x->curlft.bytes >= x->lft.soft_byte_limit ||
Herbert Xu4666faa2005-06-18 22:43:22 -07001640 x->curlft.packets >= x->lft.soft_packet_limit)) {
1641 x->km.dying = 1;
Jamal Hadi Salim53bc6b4d2006-03-20 19:17:03 -08001642 km_state_expired(x, 0, 0);
Herbert Xu4666faa2005-06-18 22:43:22 -07001643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 return 0;
1645}
1646EXPORT_SYMBOL(xfrm_state_check_expire);
1647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648struct xfrm_state *
David S. Millera70486f2011-02-27 23:17:24 -08001649xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001650 u8 proto, unsigned short family)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651{
1652 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Florian Westphalc2f672f2016-09-20 15:45:26 +02001654 rcu_read_lock();
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001655 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
Florian Westphalc2f672f2016-09-20 15:45:26 +02001656 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 return x;
1658}
1659EXPORT_SYMBOL(xfrm_state_lookup);
1660
1661struct xfrm_state *
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001662xfrm_state_lookup_byaddr(struct net *net, u32 mark,
David S. Millera70486f2011-02-27 23:17:24 -08001663 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001664 u8 proto, unsigned short family)
1665{
1666 struct xfrm_state *x;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001667
Fan Du283bc9f2013-11-07 17:47:50 +08001668 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001669 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
Fan Du283bc9f2013-11-07 17:47:50 +08001670 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001671 return x;
1672}
1673EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1674
1675struct xfrm_state *
Mathias Krausee473fcb2013-06-26 23:56:58 +02001676xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
Steffen Klassert7e652642018-06-12 14:07:07 +02001677 u32 if_id, u8 proto, const xfrm_address_t *daddr,
Mathias Krausee473fcb2013-06-26 23:56:58 +02001678 const xfrm_address_t *saddr, int create, unsigned short family)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679{
1680 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Fan Du283bc9f2013-11-07 17:47:50 +08001682 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Steffen Klassert7e652642018-06-12 14:07:07 +02001683 x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
Fan Du283bc9f2013-11-07 17:47:50 +08001684 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
David S. Miller27708342006-08-24 00:13:10 -07001685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 return x;
1687}
1688EXPORT_SYMBOL(xfrm_find_acq);
1689
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001690#ifdef CONFIG_XFRM_SUB_POLICY
1691int
1692xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
Fan Du283bc9f2013-11-07 17:47:50 +08001693 unsigned short family, struct net *net)
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001694{
Koichiro Den3f5a95a2017-08-01 23:21:46 +09001695 int i;
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001696 int err = 0;
1697 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1698 if (!afinfo)
1699 return -EAFNOSUPPORT;
1700
Fan Du283bc9f2013-11-07 17:47:50 +08001701 spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001702 if (afinfo->tmpl_sort)
1703 err = afinfo->tmpl_sort(dst, src, n);
Koichiro Den3f5a95a2017-08-01 23:21:46 +09001704 else
1705 for (i = 0; i < n; i++)
1706 dst[i] = src[i];
Fan Du283bc9f2013-11-07 17:47:50 +08001707 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Florian Westphalaf5d27c2017-01-09 14:20:47 +01001708 rcu_read_unlock();
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001709 return err;
1710}
1711EXPORT_SYMBOL(xfrm_tmpl_sort);
1712
1713int
1714xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1715 unsigned short family)
1716{
Koichiro Den3f5a95a2017-08-01 23:21:46 +09001717 int i;
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001718 int err = 0;
1719 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Steffen Klassert35ea790d2014-02-19 13:33:23 +01001720 struct net *net = xs_net(*src);
Fan Du283bc9f2013-11-07 17:47:50 +08001721
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001722 if (!afinfo)
1723 return -EAFNOSUPPORT;
1724
Fan Du283bc9f2013-11-07 17:47:50 +08001725 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001726 if (afinfo->state_sort)
1727 err = afinfo->state_sort(dst, src, n);
Koichiro Den3f5a95a2017-08-01 23:21:46 +09001728 else
1729 for (i = 0; i < n; i++)
1730 dst[i] = src[i];
Fan Du283bc9f2013-11-07 17:47:50 +08001731 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Florian Westphalaf5d27c2017-01-09 14:20:47 +01001732 rcu_read_unlock();
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001733 return err;
1734}
1735EXPORT_SYMBOL(xfrm_state_sort);
1736#endif
1737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738/* Silly enough, but I'm lazy to build resolution list */
1739
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001740static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741{
1742 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
Alexey Dobriyan5447c5e2008-11-25 17:31:51 -08001744 for (i = 0; i <= net->xfrm.state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -07001745 struct xfrm_state *x;
1746
Sasha Levinb67bfe02013-02-27 17:06:00 -08001747 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
David S. Miller8f126e32006-08-24 02:45:07 -07001748 if (x->km.seq == seq &&
Jamal Hadi Salim3d6acfa2010-02-22 11:32:56 +00001749 (mark & x->mark.m) == x->mark.v &&
David S. Miller8f126e32006-08-24 02:45:07 -07001750 x->km.state == XFRM_STATE_ACQ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 xfrm_state_hold(x);
1752 return x;
1753 }
1754 }
1755 }
1756 return NULL;
1757}
1758
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001759struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760{
1761 struct xfrm_state *x;
1762
Fan Du283bc9f2013-11-07 17:47:50 +08001763 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001764 x = __xfrm_find_acq_byseq(net, mark, seq);
Fan Du283bc9f2013-11-07 17:47:50 +08001765 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 return x;
1767}
1768EXPORT_SYMBOL(xfrm_find_acq_byseq);
1769
1770u32 xfrm_get_acqseq(void)
1771{
1772 u32 res;
jamal6836b9b2010-02-16 02:01:22 +00001773 static atomic_t acqseq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
jamal6836b9b2010-02-16 02:01:22 +00001775 do {
1776 res = atomic_inc_return(&acqseq);
1777 } while (!res);
1778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 return res;
1780}
1781EXPORT_SYMBOL(xfrm_get_acqseq);
1782
Fan Du776e9dd2013-12-16 18:47:49 +08001783int verify_spi_info(u8 proto, u32 min, u32 max)
1784{
1785 switch (proto) {
1786 case IPPROTO_AH:
1787 case IPPROTO_ESP:
1788 break;
1789
1790 case IPPROTO_COMP:
1791 /* IPCOMP spi is 16-bits. */
1792 if (max >= 0x10000)
1793 return -EINVAL;
1794 break;
1795
1796 default:
1797 return -EINVAL;
1798 }
1799
1800 if (min > max)
1801 return -EINVAL;
1802
1803 return 0;
1804}
1805EXPORT_SYMBOL(verify_spi_info);
1806
Herbert Xu658b2192007-10-09 13:29:52 -07001807int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808{
Alexey Dobriyan221df1e2008-11-25 17:30:50 -08001809 struct net *net = xs_net(x);
David S. Millerf034b5d2006-08-24 03:08:07 -07001810 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 struct xfrm_state *x0;
Herbert Xu658b2192007-10-09 13:29:52 -07001812 int err = -ENOENT;
1813 __be32 minspi = htonl(low);
1814 __be32 maxspi = htonl(high);
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001815 u32 mark = x->mark.v & x->mark.m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Herbert Xu658b2192007-10-09 13:29:52 -07001817 spin_lock_bh(&x->lock);
1818 if (x->km.state == XFRM_STATE_DEAD)
1819 goto unlock;
1820
1821 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 if (x->id.spi)
Herbert Xu658b2192007-10-09 13:29:52 -07001823 goto unlock;
1824
1825 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
1827 if (minspi == maxspi) {
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001828 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 if (x0) {
1830 xfrm_state_put(x0);
Herbert Xu658b2192007-10-09 13:29:52 -07001831 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 }
1833 x->id.spi = minspi;
1834 } else {
1835 u32 spi = 0;
Weilong Chen9b7a7872013-12-24 09:43:46 +08001836 for (h = 0; h < high-low+1; h++) {
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -05001837 spi = low + prandom_u32()%(high-low+1);
Jamal Hadi Salimbd557752010-02-22 16:20:22 -08001838 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 if (x0 == NULL) {
1840 x->id.spi = htonl(spi);
1841 break;
1842 }
1843 xfrm_state_put(x0);
1844 }
1845 }
1846 if (x->id.spi) {
Fan Du283bc9f2013-11-07 17:47:50 +08001847 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Alexey Dobriyan12604d82008-11-25 17:31:18 -08001848 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
Florian Westphalae3fb6d2016-08-09 12:16:04 +02001849 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
Fan Du283bc9f2013-11-07 17:47:50 +08001850 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Herbert Xu658b2192007-10-09 13:29:52 -07001851
1852 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 }
Herbert Xu658b2192007-10-09 13:29:52 -07001854
1855unlock:
1856 spin_unlock_bh(&x->lock);
1857
1858 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859}
1860EXPORT_SYMBOL(xfrm_alloc_spi);
1861
Nicolas Dichteld3623092014-02-14 15:30:36 +01001862static bool __xfrm_state_filter_match(struct xfrm_state *x,
Nicolas Dichtel870a2df2014-03-06 18:24:29 +01001863 struct xfrm_address_filter *filter)
Nicolas Dichteld3623092014-02-14 15:30:36 +01001864{
1865 if (filter) {
1866 if ((filter->family == AF_INET ||
1867 filter->family == AF_INET6) &&
1868 x->props.family != filter->family)
1869 return false;
1870
1871 return addr_match(&x->props.saddr, &filter->saddr,
1872 filter->splen) &&
1873 addr_match(&x->id.daddr, &filter->daddr,
1874 filter->dplen);
1875 }
1876 return true;
1877}
1878
Alexey Dobriyan284fa7d2008-11-25 17:32:14 -08001879int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
Timo Teras4c563f72008-02-28 21:31:08 -08001880 int (*func)(struct xfrm_state *, int, void*),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 void *data)
1882{
Herbert Xu12a169e2008-10-01 07:03:24 -07001883 struct xfrm_state *state;
1884 struct xfrm_state_walk *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 int err = 0;
1886
Herbert Xu12a169e2008-10-01 07:03:24 -07001887 if (walk->seq != 0 && list_empty(&walk->all))
Timo Teras4c563f72008-02-28 21:31:08 -08001888 return 0;
1889
Fan Du283bc9f2013-11-07 17:47:50 +08001890 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Herbert Xu12a169e2008-10-01 07:03:24 -07001891 if (list_empty(&walk->all))
Alexey Dobriyan284fa7d2008-11-25 17:32:14 -08001892 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
Herbert Xu12a169e2008-10-01 07:03:24 -07001893 else
Li RongQing80077702015-04-22 17:09:54 +08001894 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
Alexey Dobriyan284fa7d2008-11-25 17:32:14 -08001895 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
Herbert Xu12a169e2008-10-01 07:03:24 -07001896 if (x->state == XFRM_STATE_DEAD)
Timo Teras4c563f72008-02-28 21:31:08 -08001897 continue;
Herbert Xu12a169e2008-10-01 07:03:24 -07001898 state = container_of(x, struct xfrm_state, km);
1899 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
Timo Teras4c563f72008-02-28 21:31:08 -08001900 continue;
Nicolas Dichteld3623092014-02-14 15:30:36 +01001901 if (!__xfrm_state_filter_match(state, walk->filter))
1902 continue;
Herbert Xu12a169e2008-10-01 07:03:24 -07001903 err = func(state, walk->seq, data);
1904 if (err) {
1905 list_move_tail(&walk->all, &x->all);
1906 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 }
Herbert Xu12a169e2008-10-01 07:03:24 -07001908 walk->seq++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 }
Herbert Xu12a169e2008-10-01 07:03:24 -07001910 if (walk->seq == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 err = -ENOENT;
1912 goto out;
1913 }
Herbert Xu12a169e2008-10-01 07:03:24 -07001914 list_del_init(&walk->all);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915out:
Fan Du283bc9f2013-11-07 17:47:50 +08001916 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 return err;
1918}
1919EXPORT_SYMBOL(xfrm_state_walk);
1920
Nicolas Dichteld3623092014-02-14 15:30:36 +01001921void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
Nicolas Dichtel870a2df2014-03-06 18:24:29 +01001922 struct xfrm_address_filter *filter)
Herbert Xu5c182452008-09-22 19:48:19 -07001923{
Herbert Xu12a169e2008-10-01 07:03:24 -07001924 INIT_LIST_HEAD(&walk->all);
Herbert Xu5c182452008-09-22 19:48:19 -07001925 walk->proto = proto;
Herbert Xu12a169e2008-10-01 07:03:24 -07001926 walk->state = XFRM_STATE_DEAD;
1927 walk->seq = 0;
Nicolas Dichteld3623092014-02-14 15:30:36 +01001928 walk->filter = filter;
Herbert Xu5c182452008-09-22 19:48:19 -07001929}
1930EXPORT_SYMBOL(xfrm_state_walk_init);
1931
Fan Du283bc9f2013-11-07 17:47:50 +08001932void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
Herbert Xuabb81c42008-09-09 19:58:29 -07001933{
Nicolas Dichteld3623092014-02-14 15:30:36 +01001934 kfree(walk->filter);
1935
Herbert Xu12a169e2008-10-01 07:03:24 -07001936 if (list_empty(&walk->all))
Herbert Xu5c182452008-09-22 19:48:19 -07001937 return;
Herbert Xu5c182452008-09-22 19:48:19 -07001938
Fan Du283bc9f2013-11-07 17:47:50 +08001939 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Herbert Xu12a169e2008-10-01 07:03:24 -07001940 list_del(&walk->all);
Fan Du283bc9f2013-11-07 17:47:50 +08001941 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
Herbert Xuabb81c42008-09-09 19:58:29 -07001942}
1943EXPORT_SYMBOL(xfrm_state_walk_done);
1944
Kees Cooke99e88a2017-10-16 14:43:17 -07001945static void xfrm_replay_timer_handler(struct timer_list *t)
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001946{
Kees Cooke99e88a2017-10-16 14:43:17 -07001947 struct xfrm_state *x = from_timer(x, t, rtimer);
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001948
1949 spin_lock(&x->lock);
1950
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001951 if (x->km.state == XFRM_STATE_VALID) {
Alexey Dobriyana6483b72008-11-25 17:38:20 -08001952 if (xfrm_aevent_is_on(xs_net(x)))
Steffen Klassert9fdc4882011-03-08 00:08:32 +00001953 x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001954 else
1955 x->xflags |= XFRM_TIME_DEFER;
1956 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001957
1958 spin_unlock(&x->lock);
1959}
1960
Denis Chengdf018122007-12-07 00:51:11 -08001961static LIST_HEAD(xfrm_km_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
David S. Miller214e0052011-02-24 00:02:38 -05001963void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964{
1965 struct xfrm_mgr *km;
1966
Cong Wang85168c02013-01-16 16:05:06 +08001967 rcu_read_lock();
1968 list_for_each_entry_rcu(km, &xfrm_km_list, list)
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001969 if (km->notify_policy)
1970 km->notify_policy(xp, dir, c);
Cong Wang85168c02013-01-16 16:05:06 +08001971 rcu_read_unlock();
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001972}
1973
David S. Miller214e0052011-02-24 00:02:38 -05001974void km_state_notify(struct xfrm_state *x, const struct km_event *c)
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001975{
1976 struct xfrm_mgr *km;
Cong Wang85168c02013-01-16 16:05:06 +08001977 rcu_read_lock();
1978 list_for_each_entry_rcu(km, &xfrm_km_list, list)
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001979 if (km->notify)
1980 km->notify(x, c);
Cong Wang85168c02013-01-16 16:05:06 +08001981 rcu_read_unlock();
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001982}
1983
1984EXPORT_SYMBOL(km_policy_notify);
1985EXPORT_SYMBOL(km_state_notify);
1986
Eric W. Biederman15e47302012-09-07 20:12:54 +00001987void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001988{
1989 struct km_event c;
1990
Herbert Xubf08867f92005-06-18 22:44:00 -07001991 c.data.hard = hard;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001992 c.portid = portid;
Herbert Xuf60f6b82005-06-18 22:44:37 -07001993 c.event = XFRM_MSG_EXPIRE;
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001994 km_state_notify(x, &c);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995}
1996
Jamal Hadi Salim53bc6b4d2006-03-20 19:17:03 -08001997EXPORT_SYMBOL(km_state_expired);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001998/*
1999 * We send to all registered managers regardless of failure
2000 * We are happy with one success
2001*/
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -08002002int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07002004 int err = -EINVAL, acqret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 struct xfrm_mgr *km;
2006
Cong Wang85168c02013-01-16 16:05:06 +08002007 rcu_read_lock();
2008 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
Fan Du65e07362012-08-15 10:13:47 +08002009 acqret = km->acquire(x, t, pol);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07002010 if (!acqret)
2011 err = acqret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 }
Cong Wang85168c02013-01-16 16:05:06 +08002013 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 return err;
2015}
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -08002016EXPORT_SYMBOL(km_query);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
Al Viro5d36b182006-11-08 00:24:06 -08002018int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019{
2020 int err = -EINVAL;
2021 struct xfrm_mgr *km;
2022
Cong Wang85168c02013-01-16 16:05:06 +08002023 rcu_read_lock();
2024 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 if (km->new_mapping)
2026 err = km->new_mapping(x, ipaddr, sport);
2027 if (!err)
2028 break;
2029 }
Cong Wang85168c02013-01-16 16:05:06 +08002030 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 return err;
2032}
2033EXPORT_SYMBOL(km_new_mapping);
2034
Eric W. Biederman15e47302012-09-07 20:12:54 +00002035void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07002037 struct km_event c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
Herbert Xubf08867f92005-06-18 22:44:00 -07002039 c.data.hard = hard;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002040 c.portid = portid;
Herbert Xuf60f6b82005-06-18 22:44:37 -07002041 c.event = XFRM_MSG_POLEXPIRE;
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07002042 km_policy_notify(pol, dir, &c);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}
David S. Millera70fcb02006-03-20 19:18:52 -08002044EXPORT_SYMBOL(km_policy_expired);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
Eric Dumazet2d60abc2008-01-03 20:43:21 -08002046#ifdef CONFIG_XFRM_MIGRATE
David S. Miller183cad12011-02-24 00:28:01 -05002047int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2048 const struct xfrm_migrate *m, int num_migrate,
Antony Antony8bafd732017-06-06 12:12:14 +02002049 const struct xfrm_kmaddress *k,
2050 const struct xfrm_encap_tmpl *encap)
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08002051{
2052 int err = -EINVAL;
2053 int ret;
2054 struct xfrm_mgr *km;
2055
Cong Wang85168c02013-01-16 16:05:06 +08002056 rcu_read_lock();
2057 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08002058 if (km->migrate) {
Antony Antony8bafd732017-06-06 12:12:14 +02002059 ret = km->migrate(sel, dir, type, m, num_migrate, k,
2060 encap);
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08002061 if (!ret)
2062 err = ret;
2063 }
2064 }
Cong Wang85168c02013-01-16 16:05:06 +08002065 rcu_read_unlock();
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08002066 return err;
2067}
2068EXPORT_SYMBOL(km_migrate);
Eric Dumazet2d60abc2008-01-03 20:43:21 -08002069#endif
Shinta Sugimoto80c9aba2007-02-08 13:11:42 -08002070
Alexey Dobriyandb983c12008-11-25 17:51:01 -08002071int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
Masahide NAKAMURA97a64b42006-08-23 20:44:06 -07002072{
2073 int err = -EINVAL;
2074 int ret;
2075 struct xfrm_mgr *km;
2076
Cong Wang85168c02013-01-16 16:05:06 +08002077 rcu_read_lock();
2078 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
Masahide NAKAMURA97a64b42006-08-23 20:44:06 -07002079 if (km->report) {
Alexey Dobriyandb983c12008-11-25 17:51:01 -08002080 ret = km->report(net, proto, sel, addr);
Masahide NAKAMURA97a64b42006-08-23 20:44:06 -07002081 if (!ret)
2082 err = ret;
2083 }
2084 }
Cong Wang85168c02013-01-16 16:05:06 +08002085 rcu_read_unlock();
Masahide NAKAMURA97a64b42006-08-23 20:44:06 -07002086 return err;
2087}
2088EXPORT_SYMBOL(km_report);
2089
Florian Westphalbb9cd072019-04-17 11:45:13 +02002090static bool km_is_alive(const struct km_event *c)
Horia Geanta0f245582014-02-12 16:20:06 +02002091{
2092 struct xfrm_mgr *km;
2093 bool is_alive = false;
2094
2095 rcu_read_lock();
2096 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2097 if (km->is_alive && km->is_alive(c)) {
2098 is_alive = true;
2099 break;
2100 }
2101 }
2102 rcu_read_unlock();
2103
2104 return is_alive;
2105}
Horia Geanta0f245582014-02-12 16:20:06 +02002106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
2108{
2109 int err;
2110 u8 *data;
2111 struct xfrm_mgr *km;
2112 struct xfrm_policy *pol = NULL;
2113
Steffen Klassert19d7df62018-02-01 08:49:23 +01002114 if (in_compat_syscall())
2115 return -EOPNOTSUPP;
Steffen Klassert19d7df62018-02-01 08:49:23 +01002116
Lorenzo Colittibe8f8282017-11-20 19:26:02 +09002117 if (!optval && !optlen) {
2118 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2119 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2120 __sk_dst_reset(sk);
2121 return 0;
2122 }
2123
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 if (optlen <= 0 || optlen > PAGE_SIZE)
2125 return -EMSGSIZE;
2126
Geliang Tanga133d932017-05-06 23:42:21 +08002127 data = memdup_user(optval, optlen);
2128 if (IS_ERR(data))
2129 return PTR_ERR(data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 err = -EINVAL;
Cong Wang85168c02013-01-16 16:05:06 +08002132 rcu_read_lock();
2133 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
Venkat Yekkiralacb969f02006-07-24 23:32:20 -07002134 pol = km->compile_policy(sk, optname, data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 optlen, &err);
2136 if (err >= 0)
2137 break;
2138 }
Cong Wang85168c02013-01-16 16:05:06 +08002139 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 if (err >= 0) {
2142 xfrm_sk_policy_insert(sk, err, pol);
2143 xfrm_pol_put(pol);
Jonathan Basseri2b06cdf2017-10-25 09:52:27 -07002144 __sk_dst_reset(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 err = 0;
2146 }
2147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 kfree(data);
2149 return err;
2150}
2151EXPORT_SYMBOL(xfrm_user_policy);
2152
Cong Wang85168c02013-01-16 16:05:06 +08002153static DEFINE_SPINLOCK(xfrm_km_lock);
2154
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155int xfrm_register_km(struct xfrm_mgr *km)
2156{
Cong Wang85168c02013-01-16 16:05:06 +08002157 spin_lock_bh(&xfrm_km_lock);
2158 list_add_tail_rcu(&km->list, &xfrm_km_list);
2159 spin_unlock_bh(&xfrm_km_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return 0;
2161}
2162EXPORT_SYMBOL(xfrm_register_km);
2163
2164int xfrm_unregister_km(struct xfrm_mgr *km)
2165{
Cong Wang85168c02013-01-16 16:05:06 +08002166 spin_lock_bh(&xfrm_km_lock);
2167 list_del_rcu(&km->list);
2168 spin_unlock_bh(&xfrm_km_lock);
2169 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 return 0;
2171}
2172EXPORT_SYMBOL(xfrm_unregister_km);
2173
2174int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2175{
2176 int err = 0;
Florian Westphal423826a2017-01-09 14:20:46 +01002177
2178 if (WARN_ON(afinfo->family >= NPROTO))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 return -EAFNOSUPPORT;
Florian Westphal423826a2017-01-09 14:20:46 +01002180
Cong Wang44abdc32013-01-16 16:05:05 +08002181 spin_lock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
Li RongQingf31e8d4f2015-04-23 11:06:53 +08002183 err = -EEXIST;
David S. Milleredcd5822006-08-24 00:42:45 -07002184 else
Cong Wang44abdc32013-01-16 16:05:05 +08002185 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2186 spin_unlock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 return err;
2188}
2189EXPORT_SYMBOL(xfrm_state_register_afinfo);
2190
2191int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2192{
Florian Westphal423826a2017-01-09 14:20:46 +01002193 int err = 0, family = afinfo->family;
2194
2195 if (WARN_ON(family >= NPROTO))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 return -EAFNOSUPPORT;
Florian Westphal423826a2017-01-09 14:20:46 +01002197
Cong Wang44abdc32013-01-16 16:05:05 +08002198 spin_lock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
Florian Westphal423826a2017-01-09 14:20:46 +01002200 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 err = -EINVAL;
David S. Milleredcd5822006-08-24 00:42:45 -07002202 else
Cong Wang44abdc32013-01-16 16:05:05 +08002203 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 }
Cong Wang44abdc32013-01-16 16:05:05 +08002205 spin_unlock_bh(&xfrm_state_afinfo_lock);
2206 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 return err;
2208}
2209EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2210
Florian Westphal711059b2017-01-09 14:20:48 +01002211struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2212{
2213 if (unlikely(family >= NPROTO))
2214 return NULL;
2215
2216 return rcu_dereference(xfrm_state_afinfo[family]);
2217}
Florian Westphal733a5fa2019-03-29 21:16:30 +01002218EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
Florian Westphal711059b2017-01-09 14:20:48 +01002219
Hannes Frederic Sowa628e3412013-08-14 13:05:23 +02002220struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
2222 struct xfrm_state_afinfo *afinfo;
2223 if (unlikely(family >= NPROTO))
2224 return NULL;
Cong Wang44abdc32013-01-16 16:05:05 +08002225 rcu_read_lock();
2226 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
Herbert Xu546be242006-05-27 23:03:58 -07002227 if (unlikely(!afinfo))
Cong Wang44abdc32013-01-16 16:05:05 +08002228 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return afinfo;
2230}
2231
Steffen Klassertb48c05a2018-04-16 07:50:09 +02002232void xfrm_flush_gc(void)
2233{
2234 flush_work(&xfrm_state_gc_work);
2235}
2236EXPORT_SYMBOL(xfrm_flush_gc);
2237
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2239void xfrm_state_delete_tunnel(struct xfrm_state *x)
2240{
2241 if (x->tunnel) {
2242 struct xfrm_state *t = x->tunnel;
2243
2244 if (atomic_read(&t->tunnel_users) == 2)
2245 xfrm_state_delete(t);
2246 atomic_dec(&t->tunnel_users);
Cong Wangf75a2802019-01-31 13:05:49 -08002247 xfrm_state_put_sync(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 x->tunnel = NULL;
2249 }
2250}
2251EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2252
2253int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2254{
Florian Westphalb3b73b82017-01-05 13:23:58 +01002255 const struct xfrm_type *type = READ_ONCE(x->type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
Patrick McHardyc5c25232007-04-09 11:47:18 -07002257 if (x->km.state == XFRM_STATE_VALID &&
Florian Westphalb3b73b82017-01-05 13:23:58 +01002258 type && type->get_mtu)
2259 return type->get_mtu(x, mtu);
2260
2261 return mtu - x->props.header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262}
2263
Ilan Tayariffdb5212017-08-01 12:49:08 +03002264int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
Herbert Xu72cb6962005-06-20 13:18:08 -07002265{
Florian Westphalc9500d72019-03-29 21:16:32 +01002266 const struct xfrm_mode *inner_mode;
2267 const struct xfrm_mode *outer_mode;
Herbert Xud094cd82005-06-20 13:19:41 -07002268 int family = x->props.family;
Herbert Xu72cb6962005-06-20 13:18:08 -07002269 int err;
2270
Florian Westphale4681742019-05-03 17:46:16 +02002271 if (family == AF_INET &&
2272 xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)
2273 x->props.flags |= XFRM_STATE_NOPMTUDISC;
Herbert Xud094cd82005-06-20 13:19:41 -07002274
2275 err = -EPROTONOSUPPORT;
Herbert Xu13996372007-10-17 21:35:51 -07002276
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002277 if (x->sel.family != AF_UNSPEC) {
2278 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2279 if (inner_mode == NULL)
2280 goto error;
2281
2282 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
Florian Westphal4c145dc2019-03-29 21:16:31 +01002283 family != x->sel.family)
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002284 goto error;
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002285
Florian Westphalc9500d72019-03-29 21:16:32 +01002286 x->inner_mode = *inner_mode;
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002287 } else {
Florian Westphal4c145dc2019-03-29 21:16:31 +01002288 const struct xfrm_mode *inner_mode_iaf;
Martin Willid81d2282008-12-03 15:38:07 -08002289 int iafamily = AF_INET;
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002290
Martin Willid81d2282008-12-03 15:38:07 -08002291 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002292 if (inner_mode == NULL)
2293 goto error;
2294
Florian Westphal4c145dc2019-03-29 21:16:31 +01002295 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL))
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002296 goto error;
Florian Westphal4c145dc2019-03-29 21:16:31 +01002297
Florian Westphalc9500d72019-03-29 21:16:32 +01002298 x->inner_mode = *inner_mode;
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002299
Martin Willid81d2282008-12-03 15:38:07 -08002300 if (x->props.family == AF_INET)
2301 iafamily = AF_INET6;
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002302
Martin Willid81d2282008-12-03 15:38:07 -08002303 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2304 if (inner_mode_iaf) {
2305 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
Florian Westphalc9500d72019-03-29 21:16:32 +01002306 x->inner_mode_iaf = *inner_mode_iaf;
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -07002307 }
2308 }
Herbert Xu13996372007-10-17 21:35:51 -07002309
Herbert Xud094cd82005-06-20 13:19:41 -07002310 x->type = xfrm_get_type(x->id.proto, family);
Herbert Xu72cb6962005-06-20 13:18:08 -07002311 if (x->type == NULL)
2312 goto error;
2313
Ilan Tayariffdb5212017-08-01 12:49:08 +03002314 x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
Steffen Klassert9d389d72017-04-14 10:05:44 +02002315
Herbert Xu72cb6962005-06-20 13:18:08 -07002316 err = x->type->init_state(x);
2317 if (err)
2318 goto error;
2319
Florian Westphalc9500d72019-03-29 21:16:32 +01002320 outer_mode = xfrm_get_mode(x->props.mode, family);
2321 if (!outer_mode) {
Julia Lawall599901c2012-08-29 06:49:15 +00002322 err = -EPROTONOSUPPORT;
Herbert Xub59f45d2006-05-27 23:05:54 -07002323 goto error;
Julia Lawall599901c2012-08-29 06:49:15 +00002324 }
Herbert Xub59f45d2006-05-27 23:05:54 -07002325
Florian Westphalc9500d72019-03-29 21:16:32 +01002326 x->outer_mode = *outer_mode;
Wei Yongjuna454f0c2011-03-21 18:08:28 -07002327 if (init_replay) {
2328 err = xfrm_init_replay(x);
2329 if (err)
2330 goto error;
2331 }
2332
Herbert Xu72cb6962005-06-20 13:18:08 -07002333error:
2334 return err;
2335}
2336
Wei Yongjuna454f0c2011-03-21 18:08:28 -07002337EXPORT_SYMBOL(__xfrm_init_state);
2338
2339int xfrm_init_state(struct xfrm_state *x)
2340{
Yossi Kupermancc015722018-01-17 15:52:41 +02002341 int err;
2342
2343 err = __xfrm_init_state(x, true, false);
2344 if (!err)
2345 x->km.state = XFRM_STATE_VALID;
2346
2347 return err;
Wei Yongjuna454f0c2011-03-21 18:08:28 -07002348}
2349
Herbert Xu72cb6962005-06-20 13:18:08 -07002350EXPORT_SYMBOL(xfrm_init_state);
YOSHIFUJI Hideakia716c112007-02-09 23:25:29 +09002351
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08002352int __net_init xfrm_state_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353{
David S. Millerf034b5d2006-08-24 03:08:07 -07002354 unsigned int sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
Mathias Krause565f0fa2018-05-03 10:55:07 +02002356 if (net_eq(net, &init_net))
2357 xfrm_state_cache = KMEM_CACHE(xfrm_state,
2358 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2359
Alexey Dobriyan9d4139c2008-11-25 17:16:11 -08002360 INIT_LIST_HEAD(&net->xfrm.state_all);
2361
David S. Millerf034b5d2006-08-24 03:08:07 -07002362 sz = sizeof(struct hlist_head) * 8;
2363
Alexey Dobriyan73d189d2008-11-25 17:16:58 -08002364 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2365 if (!net->xfrm.state_bydst)
2366 goto out_bydst;
Alexey Dobriyand320bbb2008-11-25 17:17:24 -08002367 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2368 if (!net->xfrm.state_bysrc)
2369 goto out_bysrc;
Alexey Dobriyanb754a4f2008-11-25 17:17:47 -08002370 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2371 if (!net->xfrm.state_byspi)
2372 goto out_byspi;
Alexey Dobriyan529983e2008-11-25 17:18:12 -08002373 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
David S. Millerf034b5d2006-08-24 03:08:07 -07002374
Alexey Dobriyan0bf7c5b2008-11-25 17:18:39 -08002375 net->xfrm.state_num = 0;
Alexey Dobriyan63082732008-11-25 17:19:07 -08002376 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
Fan Du283bc9f2013-11-07 17:47:50 +08002377 spin_lock_init(&net->xfrm.xfrm_state_lock);
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08002378 return 0;
Alexey Dobriyan73d189d2008-11-25 17:16:58 -08002379
Alexey Dobriyanb754a4f2008-11-25 17:17:47 -08002380out_byspi:
2381 xfrm_hash_free(net->xfrm.state_bysrc, sz);
Alexey Dobriyand320bbb2008-11-25 17:17:24 -08002382out_bysrc:
2383 xfrm_hash_free(net->xfrm.state_bydst, sz);
Alexey Dobriyan73d189d2008-11-25 17:16:58 -08002384out_bydst:
2385 return -ENOMEM;
Alexey Dobriyand62ddc22008-11-25 17:14:31 -08002386}
2387
2388void xfrm_state_fini(struct net *net)
2389{
Alexey Dobriyan73d189d2008-11-25 17:16:58 -08002390 unsigned int sz;
2391
Alexey Dobriyan7c2776e2008-11-25 17:57:44 -08002392 flush_work(&net->xfrm.state_hash_work);
Florian Westphal35db57bb2016-08-23 16:00:12 +02002393 flush_work(&xfrm_state_gc_work);
Cong Wangdbb24832019-03-22 16:26:19 -07002394 xfrm_state_flush(net, 0, false, true);
Alexey Dobriyan7c2776e2008-11-25 17:57:44 -08002395
Alexey Dobriyan9d4139c2008-11-25 17:16:11 -08002396 WARN_ON(!list_empty(&net->xfrm.state_all));
Alexey Dobriyan73d189d2008-11-25 17:16:58 -08002397
Alexey Dobriyan529983e2008-11-25 17:18:12 -08002398 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
Alexey Dobriyanb754a4f2008-11-25 17:17:47 -08002399 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2400 xfrm_hash_free(net->xfrm.state_byspi, sz);
Alexey Dobriyand320bbb2008-11-25 17:17:24 -08002401 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2402 xfrm_hash_free(net->xfrm.state_bysrc, sz);
Alexey Dobriyan73d189d2008-11-25 17:16:58 -08002403 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2404 xfrm_hash_free(net->xfrm.state_bydst, sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405}
2406
Joy Lattenab5f5e82007-09-17 11:51:22 -07002407#ifdef CONFIG_AUDITSYSCALL
Ilpo Järvinencf35f432008-01-05 23:13:20 -08002408static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2409 struct audit_buffer *audit_buf)
Joy Lattenab5f5e82007-09-17 11:51:22 -07002410{
Paul Moore68277ac2007-12-20 20:49:33 -08002411 struct xfrm_sec_ctx *ctx = x->security;
2412 u32 spi = ntohl(x->id.spi);
2413
2414 if (ctx)
Joy Lattenab5f5e82007-09-17 11:51:22 -07002415 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
Paul Moore68277ac2007-12-20 20:49:33 -08002416 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
Joy Lattenab5f5e82007-09-17 11:51:22 -07002417
Weilong Chen9b7a7872013-12-24 09:43:46 +08002418 switch (x->props.family) {
Joy Lattenab5f5e82007-09-17 11:51:22 -07002419 case AF_INET:
Harvey Harrison21454aa2008-10-31 00:54:56 -07002420 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2421 &x->props.saddr.a4, &x->id.daddr.a4);
Joy Lattenab5f5e82007-09-17 11:51:22 -07002422 break;
2423 case AF_INET6:
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002424 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
Harvey Harrisonfdb46ee2008-10-28 16:10:17 -07002425 x->props.saddr.a6, x->id.daddr.a6);
Joy Lattenab5f5e82007-09-17 11:51:22 -07002426 break;
2427 }
Paul Moore68277ac2007-12-20 20:49:33 -08002428
2429 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
Joy Lattenab5f5e82007-09-17 11:51:22 -07002430}
2431
Ilpo Järvinencf35f432008-01-05 23:13:20 -08002432static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2433 struct audit_buffer *audit_buf)
Paul Mooreafeb14b2007-12-21 14:58:11 -08002434{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002435 const struct iphdr *iph4;
2436 const struct ipv6hdr *iph6;
Paul Mooreafeb14b2007-12-21 14:58:11 -08002437
2438 switch (family) {
2439 case AF_INET:
2440 iph4 = ip_hdr(skb);
Harvey Harrison21454aa2008-10-31 00:54:56 -07002441 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2442 &iph4->saddr, &iph4->daddr);
Paul Mooreafeb14b2007-12-21 14:58:11 -08002443 break;
2444 case AF_INET6:
2445 iph6 = ipv6_hdr(skb);
2446 audit_log_format(audit_buf,
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002447 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
Weilong Chen9b7a7872013-12-24 09:43:46 +08002448 &iph6->saddr, &iph6->daddr,
Paul Mooreafeb14b2007-12-21 14:58:11 -08002449 iph6->flow_lbl[0] & 0x0f,
2450 iph6->flow_lbl[1],
2451 iph6->flow_lbl[2]);
2452 break;
2453 }
2454}
2455
Tetsuo Handa2e710292014-04-22 21:48:30 +09002456void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
Joy Lattenab5f5e82007-09-17 11:51:22 -07002457{
2458 struct audit_buffer *audit_buf;
Joy Lattenab5f5e82007-09-17 11:51:22 -07002459
Paul Mooreafeb14b2007-12-21 14:58:11 -08002460 audit_buf = xfrm_audit_start("SAD-add");
Joy Lattenab5f5e82007-09-17 11:51:22 -07002461 if (audit_buf == NULL)
2462 return;
Tetsuo Handa2e710292014-04-22 21:48:30 +09002463 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
Paul Mooreafeb14b2007-12-21 14:58:11 -08002464 xfrm_audit_helper_sainfo(x, audit_buf);
2465 audit_log_format(audit_buf, " res=%u", result);
Joy Lattenab5f5e82007-09-17 11:51:22 -07002466 audit_log_end(audit_buf);
2467}
2468EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2469
Tetsuo Handa2e710292014-04-22 21:48:30 +09002470void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
Joy Lattenab5f5e82007-09-17 11:51:22 -07002471{
2472 struct audit_buffer *audit_buf;
Joy Lattenab5f5e82007-09-17 11:51:22 -07002473
Paul Mooreafeb14b2007-12-21 14:58:11 -08002474 audit_buf = xfrm_audit_start("SAD-delete");
Joy Lattenab5f5e82007-09-17 11:51:22 -07002475 if (audit_buf == NULL)
2476 return;
Tetsuo Handa2e710292014-04-22 21:48:30 +09002477 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
Paul Mooreafeb14b2007-12-21 14:58:11 -08002478 xfrm_audit_helper_sainfo(x, audit_buf);
2479 audit_log_format(audit_buf, " res=%u", result);
Joy Lattenab5f5e82007-09-17 11:51:22 -07002480 audit_log_end(audit_buf);
2481}
2482EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
Paul Mooreafeb14b2007-12-21 14:58:11 -08002483
2484void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2485 struct sk_buff *skb)
2486{
2487 struct audit_buffer *audit_buf;
2488 u32 spi;
2489
2490 audit_buf = xfrm_audit_start("SA-replay-overflow");
2491 if (audit_buf == NULL)
2492 return;
2493 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2494 /* don't record the sequence number because it's inherent in this kind
2495 * of audit message */
2496 spi = ntohl(x->id.spi);
2497 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2498 audit_log_end(audit_buf);
2499}
2500EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2501
Steffen Klassert9fdc4882011-03-08 00:08:32 +00002502void xfrm_audit_state_replay(struct xfrm_state *x,
Paul Mooreafeb14b2007-12-21 14:58:11 -08002503 struct sk_buff *skb, __be32 net_seq)
2504{
2505 struct audit_buffer *audit_buf;
2506 u32 spi;
2507
2508 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2509 if (audit_buf == NULL)
2510 return;
2511 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2512 spi = ntohl(x->id.spi);
2513 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2514 spi, spi, ntohl(net_seq));
2515 audit_log_end(audit_buf);
2516}
Steffen Klassert9fdc4882011-03-08 00:08:32 +00002517EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
Paul Mooreafeb14b2007-12-21 14:58:11 -08002518
2519void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2520{
2521 struct audit_buffer *audit_buf;
2522
2523 audit_buf = xfrm_audit_start("SA-notfound");
2524 if (audit_buf == NULL)
2525 return;
2526 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2527 audit_log_end(audit_buf);
2528}
2529EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2530
2531void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2532 __be32 net_spi, __be32 net_seq)
2533{
2534 struct audit_buffer *audit_buf;
2535 u32 spi;
2536
2537 audit_buf = xfrm_audit_start("SA-notfound");
2538 if (audit_buf == NULL)
2539 return;
2540 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2541 spi = ntohl(net_spi);
2542 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2543 spi, spi, ntohl(net_seq));
2544 audit_log_end(audit_buf);
2545}
2546EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2547
2548void xfrm_audit_state_icvfail(struct xfrm_state *x,
2549 struct sk_buff *skb, u8 proto)
2550{
2551 struct audit_buffer *audit_buf;
2552 __be32 net_spi;
2553 __be32 net_seq;
2554
2555 audit_buf = xfrm_audit_start("SA-icv-failure");
2556 if (audit_buf == NULL)
2557 return;
2558 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2559 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2560 u32 spi = ntohl(net_spi);
2561 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2562 spi, spi, ntohl(net_seq));
2563 }
2564 audit_log_end(audit_buf);
2565}
2566EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
Joy Lattenab5f5e82007-09-17 11:51:22 -07002567#endif /* CONFIG_AUDITSYSCALL */