blob: ca5f7f437289734481ffbffda1163ec0fd5b0fac [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _TLS_OFFLOAD_H
35#define _TLS_OFFLOAD_H
36
37#include <linux/types.h>
Dmitry V. Levinb9f3eb42017-11-14 06:30:11 +030038#include <asm/byteorder.h>
Vakul Garga54667f2018-01-31 21:34:37 +053039#include <linux/crypto.h>
Dmitry V. Levinb9f3eb42017-11-14 06:30:11 +030040#include <linux/socket.h>
41#include <linux/tcp.h>
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020042#include <linux/skmsg.h>
Jakub Kicinski79ffe602019-11-05 14:24:35 -080043#include <linux/mutex.h>
Jakub Kicinski2e361172019-06-05 14:11:39 -070044#include <linux/netdevice.h>
Jakub Kicinski15a7dea2019-08-30 12:25:47 +020045#include <linux/rcupdate.h>
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020046
Jakub Kicinskid26b6982019-10-04 16:19:24 -070047#include <net/net_namespace.h>
Dmitry V. Levinb9f3eb42017-11-14 06:30:11 +030048#include <net/tcp.h>
Dave Watsonc46234e2018-03-22 10:10:35 -070049#include <net/strparser.h>
Vakul Garga42055e2018-09-21 09:46:13 +053050#include <crypto/aead.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070051#include <uapi/linux/tls.h>
52
53
54/* Maximum data size carried in a TLS record */
55#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
56
57#define TLS_HEADER_SIZE 5
58#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
59
60#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
61
62#define TLS_RECORD_TYPE_DATA 0x17
63
64#define TLS_AAD_SPACE_SIZE 13
Atul Guptadd0bed12018-03-31 21:41:52 +053065
Vakul Gargf295b3a2019-03-20 02:03:36 +000066#define MAX_IV_SIZE 16
Jakub Kicinski89fec472019-06-10 21:40:00 -070067#define TLS_MAX_REC_SEQ_SIZE 8
Vakul Gargf295b3a2019-03-20 02:03:36 +000068
69/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
70 *
71 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
72 *
73 * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
74 * Hence b0 contains (3 - 1) = 2.
75 */
76#define TLS_AES_CCM_IV_B0_BYTE 2
77
Jakub Kicinskid26b6982019-10-04 16:19:24 -070078#define __TLS_INC_STATS(net, field) \
79 __SNMP_INC_STATS((net)->mib.tls_statistics, field)
80#define TLS_INC_STATS(net, field) \
81 SNMP_INC_STATS((net)->mib.tls_statistics, field)
82#define __TLS_DEC_STATS(net, field) \
83 __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
84#define TLS_DEC_STATS(net, field) \
85 SNMP_DEC_STATS((net)->mib.tls_statistics, field)
86
Boris Pismenny4799ac82018-07-13 14:33:43 +030087enum {
88 TLS_BASE,
89 TLS_SW,
Boris Pismenny4799ac82018-07-13 14:33:43 +030090 TLS_HW,
Boris Pismenny4799ac82018-07-13 14:33:43 +030091 TLS_HW_RECORD,
92 TLS_NUM_CONFIG,
93};
94
Vakul Garga42055e2018-09-21 09:46:13 +053095/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
96 * allocated or mapped for each TLS record. After encryption, the records are
97 * stores in a linked list.
98 */
99struct tls_rec {
100 struct list_head list;
Vakul Garg9932a292018-09-24 15:35:56 +0530101 int tx_ready;
Vakul Garga42055e2018-09-21 09:46:13 +0530102 int tx_flags;
Dave Watson3c4d7552017-06-14 11:37:39 -0700103
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200104 struct sk_msg msg_plaintext;
105 struct sk_msg msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530106
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200107 /* AAD | msg_plaintext.sg.data | sg_tag */
108 struct scatterlist sg_aead_in[2];
109 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
110 struct scatterlist sg_aead_out[2];
Vakul Garga42055e2018-09-21 09:46:13 +0530111
Dave Watson130b3922019-01-30 21:58:31 +0000112 char content_type;
113 struct scatterlist sg_content_type;
114
Vakul Garga42055e2018-09-21 09:46:13 +0530115 char aad_space[TLS_AAD_SPACE_SIZE];
Vakul Gargf295b3a2019-03-20 02:03:36 +0000116 u8 iv_data[MAX_IV_SIZE];
Vakul Garga42055e2018-09-21 09:46:13 +0530117 struct aead_request aead_req;
118 u8 aead_req_ctx[];
119};
120
Vakul Garg2b794c42019-02-23 08:42:37 +0000121struct tls_msg {
122 struct strp_msg rxm;
123 u8 control;
124};
125
Vakul Garga42055e2018-09-21 09:46:13 +0530126struct tx_work {
127 struct delayed_work work;
128 struct sock *sk;
129};
130
131struct tls_sw_context_tx {
132 struct crypto_aead *aead_send;
133 struct crypto_wait async_wait;
134 struct tx_work tx_work;
135 struct tls_rec *open_rec;
Vakul Garg9932a292018-09-24 15:35:56 +0530136 struct list_head tx_list;
Vakul Garga42055e2018-09-21 09:46:13 +0530137 atomic_t encrypt_pending;
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530138 /* protect crypto_wait with encrypt_pending */
139 spinlock_t encrypt_compl_lock;
Vakul Garga42055e2018-09-21 09:46:13 +0530140 int async_notify;
Jakub Kicinski5c5458e2019-10-06 21:09:31 -0700141 u8 async_capable:1;
Vakul Garga42055e2018-09-21 09:46:13 +0530142
143#define BIT_TX_SCHEDULED 0
John Fastabendf87e62d2019-07-19 10:29:16 -0700144#define BIT_TX_CLOSING 1
Vakul Garga42055e2018-09-21 09:46:13 +0530145 unsigned long tx_bitmask;
Dave Watson3c4d7552017-06-14 11:37:39 -0700146};
147
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300148struct tls_sw_context_rx {
149 struct crypto_aead *aead_recv;
150 struct crypto_wait async_wait;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300151 struct strparser strp;
Vakul Garg692d7b52019-01-16 10:40:16 +0000152 struct sk_buff_head rx_list; /* list of decrypted 'data' records */
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300153 void (*saved_data_ready)(struct sock *sk);
John Fastabend924ad652018-10-13 02:46:00 +0200154
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300155 struct sk_buff *recv_pkt;
156 u8 control;
Jakub Kicinski5c5458e2019-10-06 21:09:31 -0700157 u8 async_capable:1;
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -0700158 u8 decrypted:1;
Vakul Garg94524d82018-08-29 15:26:55 +0530159 atomic_t decrypt_pending;
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530160 /* protect crypto_wait with decrypt_pending*/
161 spinlock_t decrypt_compl_lock;
Vakul Garg94524d82018-08-29 15:26:55 +0530162 bool async_notify;
163};
164
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300165struct tls_record_info {
166 struct list_head list;
167 u32 end_seq;
168 int len;
169 int num_frags;
170 skb_frag_t frags[MAX_SKB_FRAGS];
171};
172
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300173struct tls_offload_context_tx {
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300174 struct crypto_aead *aead_send;
175 spinlock_t lock; /* protects records list */
176 struct list_head records_list;
177 struct tls_record_info *open_record;
178 struct tls_record_info *retransmit_hint;
179 u64 hint_record_sn;
180 u64 unacked_record_sn;
181
182 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
183 void (*sk_destruct)(struct sock *sk);
Jakub Kicinski2e361172019-06-05 14:11:39 -0700184 u8 driver_state[] __aligned(8);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300185 /* The TLS layer reserves room for driver specific state
186 * Currently the belief is that there is not enough
187 * driver specific state to justify another layer of indirection
188 */
Jakub Kicinski2d6b51c2019-06-05 14:11:38 -0700189#define TLS_DRIVER_STATE_SIZE_TX 16
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300190};
191
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300192#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
Jakub Kicinski2e361172019-06-05 14:11:39 -0700193 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300194
Jakub Kicinskie52972c2019-06-04 12:00:12 -0700195enum tls_context_flags {
196 TLS_RX_SYNC_RUNNING = 0,
Jakub Kicinski50180072019-06-10 21:40:09 -0700197 /* Unlike RX where resync is driven entirely by the core in TX only
198 * the driver knows when things went out of sync, so we need the flag
199 * to be atomic.
200 */
201 TLS_TX_SYNC_SCHED = 1,
Jakub Kicinskie52972c2019-06-04 12:00:12 -0700202};
203
Dave Watsondbe42552018-03-22 10:10:06 -0700204struct cipher_context {
Dave Watsondbe42552018-03-22 10:10:06 -0700205 char *iv;
Dave Watsondbe42552018-03-22 10:10:06 -0700206 char *rec_seq;
207};
208
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200209union tls_crypto_context {
210 struct tls_crypto_info info;
Dave Watsonfb99bce2019-01-30 21:58:05 +0000211 union {
212 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
213 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
214 };
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200215};
216
Vakul Garg4509de12019-02-14 07:11:35 +0000217struct tls_prot_info {
218 u16 version;
219 u16 cipher_type;
220 u16 prepend_size;
221 u16 tag_size;
222 u16 overhead_size;
223 u16 iv_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +0000224 u16 salt_size;
Vakul Garg4509de12019-02-14 07:11:35 +0000225 u16 rec_seq_size;
226 u16 aad_size;
227 u16 tail_size;
228};
229
Dave Watson3c4d7552017-06-14 11:37:39 -0700230struct tls_context {
Jakub Kicinskif0aaa2c2019-06-03 15:17:04 -0700231 /* read-only cache line */
Vakul Garg4509de12019-02-14 07:11:35 +0000232 struct tls_prot_info prot_info;
233
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300234 u8 tx_conf:3;
235 u8 rx_conf:3;
Ilya Lesokhin6d882072017-11-13 10:22:45 +0200236
Jakub Kicinskif0aaa2c2019-06-03 15:17:04 -0700237 int (*push_pending_record)(struct sock *sk, int flags);
238 void (*sk_write_space)(struct sock *sk);
239
240 void *priv_ctx_tx;
241 void *priv_ctx_rx;
242
243 struct net_device *netdev;
244
245 /* rw cache line */
Dave Watsondbe42552018-03-22 10:10:06 -0700246 struct cipher_context tx;
Dave Watsonc46234e2018-03-22 10:10:35 -0700247 struct cipher_context rx;
Dave Watson3c4d7552017-06-14 11:37:39 -0700248
249 struct scatterlist *partially_sent_record;
250 u16 partially_sent_offset;
Vakul Garga42055e2018-09-21 09:46:13 +0530251
Dave Watsonc212d2c2018-05-01 13:05:39 -0700252 bool in_tcp_sendpages;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200253 bool pending_open_record_frags;
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800254
255 struct mutex tx_lock; /* protects partially_sent_* fields and
256 * per-type TX fields
257 */
Jakub Kicinskif0aaa2c2019-06-03 15:17:04 -0700258 unsigned long flags;
Dave Watson3c4d7552017-06-14 11:37:39 -0700259
Jakub Kicinskif0aaa2c2019-06-03 15:17:04 -0700260 /* cache cold stuff */
John Fastabend32857cf2019-07-19 10:29:18 -0700261 struct proto *sk_proto;
262
Boris Pismenny4799ac82018-07-13 14:33:43 +0300263 void (*sk_destruct)(struct sock *sk);
Jakub Kicinskif0aaa2c2019-06-03 15:17:04 -0700264
265 union tls_crypto_context crypto_send;
266 union tls_crypto_context crypto_recv;
267
268 struct list_head list;
269 refcount_t refcount;
Jakub Kicinski15a7dea2019-08-30 12:25:47 +0200270 struct rcu_head rcu;
Dave Watson3c4d7552017-06-14 11:37:39 -0700271};
272
Jakub Kicinskida68b4a2019-04-25 12:32:03 -0700273enum tls_offload_ctx_dir {
274 TLS_OFFLOAD_CTX_DIR_RX,
275 TLS_OFFLOAD_CTX_DIR_TX,
276};
277
278struct tlsdev_ops {
279 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
280 enum tls_offload_ctx_dir direction,
281 struct tls_crypto_info *crypto_info,
282 u32 start_offload_tcp_sn);
283 void (*tls_dev_del)(struct net_device *netdev,
284 struct tls_context *ctx,
285 enum tls_offload_ctx_dir direction);
Dirk van der Merweb5d9a832019-07-08 19:53:13 -0700286 int (*tls_dev_resync)(struct net_device *netdev,
287 struct sock *sk, u32 seq, u8 *rcd_sn,
288 enum tls_offload_ctx_dir direction);
Jakub Kicinskida68b4a2019-04-25 12:32:03 -0700289};
290
Jakub Kicinskif953d33b2019-06-10 21:40:02 -0700291enum tls_offload_sync_type {
292 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
293 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
294};
295
296#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
297#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
298
Boris Pismenny4799ac82018-07-13 14:33:43 +0300299struct tls_offload_context_rx {
300 /* sw must be the first member of tls_offload_context_rx */
301 struct tls_sw_context_rx sw;
Jakub Kicinskif953d33b2019-06-10 21:40:02 -0700302 enum tls_offload_sync_type resync_type;
303 /* this member is set regardless of resync_type, to avoid branches */
304 u8 resync_nh_reset:1;
305 /* CORE_NEXT_HINT-only member, but use the hole here */
306 u8 resync_nh_do_now:1;
307 union {
308 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
309 struct {
310 atomic64_t resync_req;
311 };
312 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
313 struct {
314 u32 decrypted_failed;
315 u32 decrypted_tgt;
316 } resync_nh;
317 };
Jakub Kicinski2e361172019-06-05 14:11:39 -0700318 u8 driver_state[] __aligned(8);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300319 /* The TLS layer reserves room for driver specific state
320 * Currently the belief is that there is not enough
321 * driver specific state to justify another layer of indirection
322 */
Jakub Kicinski2d6b51c2019-06-05 14:11:38 -0700323#define TLS_DRIVER_STATE_SIZE_RX 8
Boris Pismenny4799ac82018-07-13 14:33:43 +0300324};
325
326#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
Jakub Kicinski2e361172019-06-05 14:11:39 -0700327 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
Boris Pismenny4799ac82018-07-13 14:33:43 +0300328
Jakub Kicinski08700da2019-10-03 11:18:57 -0700329struct tls_context *tls_ctx_create(struct sock *sk);
Jakub Kicinski15a7dea2019-08-30 12:25:47 +0200330void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
Jakub Kicinski08700da2019-10-03 11:18:57 -0700331void update_sk_prot(struct sock *sk, struct tls_context *ctx);
332
Dave Watson3c4d7552017-06-14 11:37:39 -0700333int wait_on_pending_writer(struct sock *sk, long *timeo);
334int tls_sk_query(struct sock *sk, int optname, char __user *optval,
335 int __user *optlen);
336int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
337 unsigned int optlen);
338
Dave Watsonc46234e2018-03-22 10:10:35 -0700339int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
Jakub Kicinski318892a2019-07-19 10:29:14 -0700340void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
John Fastabend313ab002019-07-19 10:29:17 -0700341void tls_sw_strparser_done(struct tls_context *tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700342int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
Willem de Bruijnd4ffb022019-11-18 10:40:51 -0500343int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
344 int offset, size_t size, int flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700345int tls_sw_sendpage(struct sock *sk, struct page *page,
346 int offset, size_t size, int flags);
John Fastabendf87e62d2019-07-19 10:29:16 -0700347void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
John Fastabend313ab002019-07-19 10:29:17 -0700348void tls_sw_release_resources_tx(struct sock *sk);
349void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300350void tls_sw_free_resources_rx(struct sock *sk);
Boris Pismenny39f56e12018-07-13 14:33:41 +0300351void tls_sw_release_resources_rx(struct sock *sk);
John Fastabend313ab002019-07-19 10:29:17 -0700352void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700353int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
354 int nonblock, int flags, int *addr_len);
John Fastabend924ad652018-10-13 02:46:00 +0200355bool tls_sw_stream_read(const struct sock *sk);
Dave Watsonc46234e2018-03-22 10:10:35 -0700356ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
357 struct pipe_inode_info *pipe,
358 size_t len, unsigned int flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700359
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300360int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
361int tls_device_sendpage(struct sock *sk, struct page *page,
362 int offset, size_t size, int flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530363int tls_tx_records(struct sock *sk, int flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700364
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300365struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300366 u32 seq, u64 *p_record_sn);
367
368static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
369{
370 return rec->len == 0;
371}
372
373static inline u32 tls_record_start_seq(struct tls_record_info *rec)
374{
375 return rec->end_seq - rec->len;
376}
377
Dave Watson3c4d7552017-06-14 11:37:39 -0700378int tls_push_sg(struct sock *sk, struct tls_context *ctx,
379 struct scatterlist *sg, u16 first_offset,
380 int flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530381int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
382 int flags);
Jakub Kicinskic5daa6c2019-11-27 12:16:44 -0800383void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530384
Vakul Garg2b794c42019-02-23 08:42:37 +0000385static inline struct tls_msg *tls_msg(struct sk_buff *skb)
386{
387 return (struct tls_msg *)strp_msg(skb);
388}
389
Boris Pismenny94850252019-02-27 17:38:03 +0200390static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
Dave Watson3c4d7552017-06-14 11:37:39 -0700391{
Boris Pismenny94850252019-02-27 17:38:03 +0200392 return !!ctx->partially_sent_record;
Dave Watson3c4d7552017-06-14 11:37:39 -0700393}
394
Dave Watson3c4d7552017-06-14 11:37:39 -0700395static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
396{
397 return tls_ctx->pending_open_record_frags;
398}
399
Vakul Garg9932a292018-09-24 15:35:56 +0530400static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
Vakul Garga42055e2018-09-21 09:46:13 +0530401{
402 struct tls_rec *rec;
Vakul Garga42055e2018-09-21 09:46:13 +0530403
Vakul Garg9932a292018-09-24 15:35:56 +0530404 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
Vakul Garga42055e2018-09-21 09:46:13 +0530405 if (!rec)
406 return false;
407
Vakul Garg9932a292018-09-24 15:35:56 +0530408 return READ_ONCE(rec->tx_ready);
Vakul Garga42055e2018-09-21 09:46:13 +0530409}
410
Davide Caratti26811cc2019-08-30 12:25:49 +0200411static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
412{
413 u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
414
415 switch (config) {
416 case TLS_BASE:
417 return TLS_CONF_BASE;
418 case TLS_SW:
419 return TLS_CONF_SW;
420 case TLS_HW:
421 return TLS_CONF_HW;
422 case TLS_HW_RECORD:
423 return TLS_CONF_HW_RECORD;
424 }
425 return 0;
426}
427
Boris Pismenny4799ac82018-07-13 14:33:43 +0300428struct sk_buff *
429tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
430 struct sk_buff *skb);
431
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300432static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
433{
Boris Pismenny4799ac82018-07-13 14:33:43 +0300434#ifdef CONFIG_SOCK_VALIDATE_XMIT
Jakub Kicinskib4f47f32019-04-08 17:59:50 -0700435 return sk_fullsock(sk) &&
Boris Pismenny4799ac82018-07-13 14:33:43 +0300436 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
437 &tls_validate_xmit_skb);
438#else
439 return false;
440#endif
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300441}
442
Dave Watsonf4a8e432018-03-22 10:10:15 -0700443static inline void tls_err_abort(struct sock *sk, int err)
Dave Watson3c4d7552017-06-14 11:37:39 -0700444{
Dave Watsonf4a8e432018-03-22 10:10:15 -0700445 sk->sk_err = err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700446 sk->sk_error_report(sk);
447}
448
449static inline bool tls_bigint_increment(unsigned char *seq, int len)
450{
451 int i;
452
453 for (i = len - 1; i >= 0; i--) {
454 ++seq[i];
455 if (seq[i] != 0)
456 break;
457 }
458
459 return (i == -1);
460}
461
Vakul Garg4509de12019-02-14 07:11:35 +0000462static inline struct tls_context *tls_get_ctx(const struct sock *sk)
463{
464 struct inet_connection_sock *icsk = inet_csk(sk);
465
Jakub Kicinski15a7dea2019-08-30 12:25:47 +0200466 /* Use RCU on icsk_ulp_data only for sock diag code,
467 * TLS data path doesn't need rcu_dereference().
468 */
469 return (__force void *)icsk->icsk_ulp_data;
Vakul Garg4509de12019-02-14 07:11:35 +0000470}
471
Dave Watson3c4d7552017-06-14 11:37:39 -0700472static inline void tls_advance_record_sn(struct sock *sk,
Jakub Kicinskifb0f8862019-06-03 15:17:05 -0700473 struct tls_prot_info *prot,
474 struct cipher_context *ctx)
Dave Watson3c4d7552017-06-14 11:37:39 -0700475{
Vakul Garg4509de12019-02-14 07:11:35 +0000476 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
Dave Watsonf4a8e432018-03-22 10:10:15 -0700477 tls_err_abort(sk, EBADMSG);
Dave Watson130b3922019-01-30 21:58:31 +0000478
Jakub Kicinskifb0f8862019-06-03 15:17:05 -0700479 if (prot->version != TLS_1_3_VERSION)
Dave Watson130b3922019-01-30 21:58:31 +0000480 tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
Vakul Garg4509de12019-02-14 07:11:35 +0000481 prot->iv_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700482}
483
484static inline void tls_fill_prepend(struct tls_context *ctx,
485 char *buf,
486 size_t plaintext_len,
Dave Watson130b3922019-01-30 21:58:31 +0000487 unsigned char record_type,
488 int version)
Dave Watson3c4d7552017-06-14 11:37:39 -0700489{
Vakul Garg4509de12019-02-14 07:11:35 +0000490 struct tls_prot_info *prot = &ctx->prot_info;
491 size_t pkt_len, iv_size = prot->iv_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700492
Vakul Garg4509de12019-02-14 07:11:35 +0000493 pkt_len = plaintext_len + prot->tag_size;
Dave Watson130b3922019-01-30 21:58:31 +0000494 if (version != TLS_1_3_VERSION) {
495 pkt_len += iv_size;
496
497 memcpy(buf + TLS_NONCE_OFFSET,
498 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
499 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700500
501 /* we cover nonce explicit here as well, so buf should be of
502 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
503 */
Dave Watson130b3922019-01-30 21:58:31 +0000504 buf[0] = version == TLS_1_3_VERSION ?
505 TLS_RECORD_TYPE_DATA : record_type;
506 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
507 buf[1] = TLS_1_2_VERSION_MINOR;
508 buf[2] = TLS_1_2_VERSION_MAJOR;
Dave Watson3c4d7552017-06-14 11:37:39 -0700509 /* we can use IV for nonce explicit according to spec */
510 buf[3] = pkt_len >> 8;
511 buf[4] = pkt_len & 0xFF;
Dave Watson3c4d7552017-06-14 11:37:39 -0700512}
513
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200514static inline void tls_make_aad(char *buf,
515 size_t size,
516 char *record_sequence,
517 int record_sequence_size,
Dave Watson130b3922019-01-30 21:58:31 +0000518 unsigned char record_type,
519 int version)
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200520{
Dave Watson130b3922019-01-30 21:58:31 +0000521 if (version != TLS_1_3_VERSION) {
522 memcpy(buf, record_sequence, record_sequence_size);
523 buf += 8;
524 } else {
525 size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
526 }
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200527
Dave Watson130b3922019-01-30 21:58:31 +0000528 buf[0] = version == TLS_1_3_VERSION ?
529 TLS_RECORD_TYPE_DATA : record_type;
530 buf[1] = TLS_1_2_VERSION_MAJOR;
531 buf[2] = TLS_1_2_VERSION_MINOR;
532 buf[3] = size >> 8;
533 buf[4] = size & 0xFF;
534}
535
536static inline void xor_iv_with_seq(int version, char *iv, char *seq)
537{
538 int i;
539
540 if (version == TLS_1_3_VERSION) {
541 for (i = 0; i < 8; i++)
542 iv[i + 4] ^= seq[i];
543 }
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200544}
545
Dave Watson3c4d7552017-06-14 11:37:39 -0700546
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300547static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
Dave Watson3c4d7552017-06-14 11:37:39 -0700548 const struct tls_context *tls_ctx)
549{
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300550 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
551}
552
553static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
554 const struct tls_context *tls_ctx)
555{
556 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
Dave Watson3c4d7552017-06-14 11:37:39 -0700557}
558
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300559static inline struct tls_offload_context_tx *
560tls_offload_ctx_tx(const struct tls_context *tls_ctx)
Dave Watson3c4d7552017-06-14 11:37:39 -0700561{
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300562 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
Dave Watson3c4d7552017-06-14 11:37:39 -0700563}
564
John Fastabend0608c692018-12-20 11:35:35 -0800565static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
566{
567 struct tls_context *ctx = tls_get_ctx(sk);
568
569 if (!ctx)
570 return false;
571 return !!tls_sw_ctx_tx(ctx);
572}
573
John Fastabende91de6a2020-05-29 16:06:59 -0700574static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
575{
576 struct tls_context *ctx = tls_get_ctx(sk);
577
578 if (!ctx)
579 return false;
580 return !!tls_sw_ctx_rx(ctx);
581}
582
Boris Pismenny7463d3a2019-02-27 17:38:04 +0200583void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
584void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
585
Boris Pismenny4799ac82018-07-13 14:33:43 +0300586static inline struct tls_offload_context_rx *
587tls_offload_ctx_rx(const struct tls_context *tls_ctx)
588{
589 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
590}
591
Jakub Kicinski2e361172019-06-05 14:11:39 -0700592#if IS_ENABLED(CONFIG_TLS_DEVICE)
593static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
594 enum tls_offload_ctx_dir direction)
595{
596 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
597 return tls_offload_ctx_tx(tls_ctx)->driver_state;
598 else
599 return tls_offload_ctx_rx(tls_ctx)->driver_state;
600}
601
602static inline void *
603tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
604{
605 return __tls_driver_ctx(tls_get_ctx(sk), direction);
606}
607#endif
608
Boris Pismenny4799ac82018-07-13 14:33:43 +0300609/* The TLS context is valid until sk_destruct is called */
610static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
611{
612 struct tls_context *tls_ctx = tls_get_ctx(sk);
613 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
614
Boris Pismennyacb5a072020-06-08 12:42:52 +0300615 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300616}
617
Jakub Kicinskif953d33b2019-06-10 21:40:02 -0700618static inline void
619tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
620{
621 struct tls_context *tls_ctx = tls_get_ctx(sk);
622
623 tls_offload_ctx_rx(tls_ctx)->resync_type = type;
624}
Boris Pismenny4799ac82018-07-13 14:33:43 +0300625
Jakub Kicinski50180072019-06-10 21:40:09 -0700626/* Driver's seq tracking has to be disabled until resync succeeded */
627static inline bool tls_offload_tx_resync_pending(struct sock *sk)
628{
629 struct tls_context *tls_ctx = tls_get_ctx(sk);
630 bool ret;
631
632 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
633 smp_mb__after_atomic();
634 return ret;
635}
636
Jakub Kicinskid26b6982019-10-04 16:19:24 -0700637int __net_init tls_proc_init(struct net *net);
638void __net_exit tls_proc_fini(struct net *net);
639
Dave Watson3c4d7552017-06-14 11:37:39 -0700640int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
641 unsigned char *record_type);
Boris Pismennydafb67f2018-07-13 14:33:40 +0300642int decrypt_skb(struct sock *sk, struct sk_buff *skb,
643 struct scatterlist *sgout);
Dirk van der Merweb9727d72019-06-05 14:11:40 -0700644struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
Dave Watson3c4d7552017-06-14 11:37:39 -0700645
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300646struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
647 struct net_device *dev,
648 struct sk_buff *skb);
649
650int tls_sw_fallback_init(struct sock *sk,
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300651 struct tls_offload_context_tx *offload_ctx,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300652 struct tls_crypto_info *crypto_info);
653
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -0700654#ifdef CONFIG_TLS_DEVICE
655void tls_device_init(void);
656void tls_device_cleanup(void);
Jakub Kicinski8d5a49e2019-12-17 14:12:01 -0800657void tls_device_sk_destruct(struct sock *sk);
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -0700658int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
659void tls_device_free_resources_tx(struct sock *sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300660int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300661void tls_device_offload_cleanup_rx(struct sock *sk);
Jakub Kicinskif953d33b2019-06-10 21:40:02 -0700662void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
Jakub Kicinski8538d292019-10-04 16:19:22 -0700663void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
Jakub Kicinski4de30a82019-10-06 21:09:30 -0700664int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
665 struct sk_buff *skb, struct strp_msg *rxm);
Jakub Kicinski8d5a49e2019-12-17 14:12:01 -0800666
667static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
668{
669 if (!sk_fullsock(sk) ||
670 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
671 return false;
672 return tls_get_ctx(sk)->rx_conf == TLS_HW;
673}
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -0700674#else
675static inline void tls_device_init(void) {}
676static inline void tls_device_cleanup(void) {}
Boris Pismenny4799ac82018-07-13 14:33:43 +0300677
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -0700678static inline int
679tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
680{
681 return -EOPNOTSUPP;
682}
683
684static inline void tls_device_free_resources_tx(struct sock *sk) {}
685
686static inline int
687tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
688{
689 return -EOPNOTSUPP;
690}
691
692static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
693static inline void
694tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
695
Jakub Kicinski4de30a82019-10-06 21:09:30 -0700696static inline int
697tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
698 struct sk_buff *skb, struct strp_msg *rxm)
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -0700699{
700 return 0;
701}
702#endif
Dave Watson3c4d7552017-06-14 11:37:39 -0700703#endif /* _TLS_OFFLOAD_H */