blob: a528a082da73eddeedeaa6d154ca4559af72c2df [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _TLS_OFFLOAD_H
35#define _TLS_OFFLOAD_H
36
37#include <linux/types.h>
Dmitry V. Levinb9f3eb42017-11-14 06:30:11 +030038#include <asm/byteorder.h>
Vakul Garga54667f2018-01-31 21:34:37 +053039#include <linux/crypto.h>
Dmitry V. Levinb9f3eb42017-11-14 06:30:11 +030040#include <linux/socket.h>
41#include <linux/tcp.h>
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020042#include <linux/skmsg.h>
43
Dmitry V. Levinb9f3eb42017-11-14 06:30:11 +030044#include <net/tcp.h>
Dave Watsonc46234e2018-03-22 10:10:35 -070045#include <net/strparser.h>
Vakul Garga42055e2018-09-21 09:46:13 +053046#include <crypto/aead.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070047#include <uapi/linux/tls.h>
48
49
50/* Maximum data size carried in a TLS record */
51#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
52
53#define TLS_HEADER_SIZE 5
54#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
55
56#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
57
58#define TLS_RECORD_TYPE_DATA 0x17
59
60#define TLS_AAD_SPACE_SIZE 13
Atul Guptadd0bed12018-03-31 21:41:52 +053061#define TLS_DEVICE_NAME_MAX 32
62
63/*
64 * This structure defines the routines for Inline TLS driver.
65 * The following routines are optional and filled with a
66 * null pointer if not defined.
67 *
68 * @name: Its the name of registered Inline tls device
69 * @dev_list: Inline tls device list
70 * int (*feature)(struct tls_device *device);
71 * Called to return Inline TLS driver capability
72 *
73 * int (*hash)(struct tls_device *device, struct sock *sk);
74 * This function sets Inline driver for listen and program
75 * device specific functioanlity as required
76 *
77 * void (*unhash)(struct tls_device *device, struct sock *sk);
78 * This function cleans listen state set by Inline TLS driver
Atul Guptadf9d4a12018-12-11 02:20:09 -080079 *
80 * void (*release)(struct kref *kref);
81 * Release the registered device and allocated resources
82 * @kref: Number of reference to tls_device
Atul Guptadd0bed12018-03-31 21:41:52 +053083 */
84struct tls_device {
85 char name[TLS_DEVICE_NAME_MAX];
86 struct list_head dev_list;
87 int (*feature)(struct tls_device *device);
88 int (*hash)(struct tls_device *device, struct sock *sk);
89 void (*unhash)(struct tls_device *device, struct sock *sk);
Atul Guptadf9d4a12018-12-11 02:20:09 -080090 void (*release)(struct kref *kref);
91 struct kref kref;
Atul Guptadd0bed12018-03-31 21:41:52 +053092};
Dave Watson3c4d7552017-06-14 11:37:39 -070093
Boris Pismenny4799ac82018-07-13 14:33:43 +030094enum {
95 TLS_BASE,
96 TLS_SW,
97#ifdef CONFIG_TLS_DEVICE
98 TLS_HW,
99#endif
100 TLS_HW_RECORD,
101 TLS_NUM_CONFIG,
102};
103
Vakul Garga42055e2018-09-21 09:46:13 +0530104/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
105 * allocated or mapped for each TLS record. After encryption, the records are
106 * stores in a linked list.
107 */
108struct tls_rec {
109 struct list_head list;
Vakul Garg9932a292018-09-24 15:35:56 +0530110 int tx_ready;
Vakul Garga42055e2018-09-21 09:46:13 +0530111 int tx_flags;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530112 int inplace_crypto;
Dave Watson3c4d7552017-06-14 11:37:39 -0700113
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200114 struct sk_msg msg_plaintext;
115 struct sk_msg msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530116
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200117 /* AAD | msg_plaintext.sg.data | sg_tag */
118 struct scatterlist sg_aead_in[2];
119 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
120 struct scatterlist sg_aead_out[2];
Vakul Garga42055e2018-09-21 09:46:13 +0530121
Dave Watson130b3922019-01-30 21:58:31 +0000122 char content_type;
123 struct scatterlist sg_content_type;
124
Vakul Garga42055e2018-09-21 09:46:13 +0530125 char aad_space[TLS_AAD_SPACE_SIZE];
Dave Watson32eb67b2019-01-27 00:57:38 +0000126 u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE +
127 TLS_CIPHER_AES_GCM_128_SALT_SIZE];
Vakul Garga42055e2018-09-21 09:46:13 +0530128 struct aead_request aead_req;
129 u8 aead_req_ctx[];
130};
131
Vakul Garg2b794c42019-02-23 08:42:37 +0000132struct tls_msg {
133 struct strp_msg rxm;
134 u8 control;
135};
136
Vakul Garga42055e2018-09-21 09:46:13 +0530137struct tx_work {
138 struct delayed_work work;
139 struct sock *sk;
140};
141
142struct tls_sw_context_tx {
143 struct crypto_aead *aead_send;
144 struct crypto_wait async_wait;
145 struct tx_work tx_work;
146 struct tls_rec *open_rec;
Vakul Garg9932a292018-09-24 15:35:56 +0530147 struct list_head tx_list;
Vakul Garga42055e2018-09-21 09:46:13 +0530148 atomic_t encrypt_pending;
149 int async_notify;
Dave Watson5b053e12019-01-30 22:08:21 +0000150 int async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530151
152#define BIT_TX_SCHEDULED 0
153 unsigned long tx_bitmask;
Dave Watson3c4d7552017-06-14 11:37:39 -0700154};
155
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300156struct tls_sw_context_rx {
157 struct crypto_aead *aead_recv;
158 struct crypto_wait async_wait;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300159 struct strparser strp;
Vakul Garg692d7b52019-01-16 10:40:16 +0000160 struct sk_buff_head rx_list; /* list of decrypted 'data' records */
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300161 void (*saved_data_ready)(struct sock *sk);
John Fastabend924ad652018-10-13 02:46:00 +0200162
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300163 struct sk_buff *recv_pkt;
164 u8 control;
Vakul Garg692d7b52019-01-16 10:40:16 +0000165 int async_capable;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300166 bool decrypted;
Vakul Garg94524d82018-08-29 15:26:55 +0530167 atomic_t decrypt_pending;
168 bool async_notify;
169};
170
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300171struct tls_record_info {
172 struct list_head list;
173 u32 end_seq;
174 int len;
175 int num_frags;
176 skb_frag_t frags[MAX_SKB_FRAGS];
177};
178
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300179struct tls_offload_context_tx {
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300180 struct crypto_aead *aead_send;
181 spinlock_t lock; /* protects records list */
182 struct list_head records_list;
183 struct tls_record_info *open_record;
184 struct tls_record_info *retransmit_hint;
185 u64 hint_record_sn;
186 u64 unacked_record_sn;
187
188 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
189 void (*sk_destruct)(struct sock *sk);
190 u8 driver_state[];
191 /* The TLS layer reserves room for driver specific state
192 * Currently the belief is that there is not enough
193 * driver specific state to justify another layer of indirection
194 */
195#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
196};
197
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300198#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
199 (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300200 TLS_DRIVER_STATE_SIZE)
201
Dave Watsondbe42552018-03-22 10:10:06 -0700202struct cipher_context {
Dave Watsondbe42552018-03-22 10:10:06 -0700203 char *iv;
Dave Watsondbe42552018-03-22 10:10:06 -0700204 char *rec_seq;
205};
206
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200207union tls_crypto_context {
208 struct tls_crypto_info info;
Dave Watsonfb99bce2019-01-30 21:58:05 +0000209 union {
210 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
211 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
212 };
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200213};
214
Vakul Garg4509de12019-02-14 07:11:35 +0000215struct tls_prot_info {
216 u16 version;
217 u16 cipher_type;
218 u16 prepend_size;
219 u16 tag_size;
220 u16 overhead_size;
221 u16 iv_size;
222 u16 rec_seq_size;
223 u16 aad_size;
224 u16 tail_size;
225};
226
Dave Watson3c4d7552017-06-14 11:37:39 -0700227struct tls_context {
Vakul Garg4509de12019-02-14 07:11:35 +0000228 struct tls_prot_info prot_info;
229
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200230 union tls_crypto_context crypto_send;
231 union tls_crypto_context crypto_recv;
Dave Watson3c4d7552017-06-14 11:37:39 -0700232
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300233 struct list_head list;
234 struct net_device *netdev;
235 refcount_t refcount;
Dave Watson3c4d7552017-06-14 11:37:39 -0700236
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300237 void *priv_ctx_tx;
238 void *priv_ctx_rx;
239
240 u8 tx_conf:3;
241 u8 rx_conf:3;
Ilya Lesokhin6d882072017-11-13 10:22:45 +0200242
Dave Watsondbe42552018-03-22 10:10:06 -0700243 struct cipher_context tx;
Dave Watsonc46234e2018-03-22 10:10:35 -0700244 struct cipher_context rx;
Dave Watson3c4d7552017-06-14 11:37:39 -0700245
246 struct scatterlist *partially_sent_record;
247 u16 partially_sent_offset;
Vakul Garga42055e2018-09-21 09:46:13 +0530248
Dave Watson3c4d7552017-06-14 11:37:39 -0700249 unsigned long flags;
Dave Watsonc212d2c2018-05-01 13:05:39 -0700250 bool in_tcp_sendpages;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200251 bool pending_open_record_frags;
Dave Watson3c4d7552017-06-14 11:37:39 -0700252
Dave Watson3c4d7552017-06-14 11:37:39 -0700253 int (*push_pending_record)(struct sock *sk, int flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700254
255 void (*sk_write_space)(struct sock *sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300256 void (*sk_destruct)(struct sock *sk);
Dave Watson3c4d7552017-06-14 11:37:39 -0700257 void (*sk_proto_close)(struct sock *sk, long timeout);
258
259 int (*setsockopt)(struct sock *sk, int level,
260 int optname, char __user *optval,
261 unsigned int optlen);
262 int (*getsockopt)(struct sock *sk, int level,
263 int optname, char __user *optval,
264 int __user *optlen);
Atul Guptadd0bed12018-03-31 21:41:52 +0530265 int (*hash)(struct sock *sk);
266 void (*unhash)(struct sock *sk);
Dave Watson3c4d7552017-06-14 11:37:39 -0700267};
268
Boris Pismenny4799ac82018-07-13 14:33:43 +0300269struct tls_offload_context_rx {
270 /* sw must be the first member of tls_offload_context_rx */
271 struct tls_sw_context_rx sw;
272 atomic64_t resync_req;
273 u8 driver_state[];
274 /* The TLS layer reserves room for driver specific state
275 * Currently the belief is that there is not enough
276 * driver specific state to justify another layer of indirection
277 */
278};
279
280#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
281 (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
282 TLS_DRIVER_STATE_SIZE)
283
Dave Watson3c4d7552017-06-14 11:37:39 -0700284int wait_on_pending_writer(struct sock *sk, long *timeo);
285int tls_sk_query(struct sock *sk, int optname, char __user *optval,
286 int __user *optlen);
287int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
288 unsigned int optlen);
289
Dave Watsonc46234e2018-03-22 10:10:35 -0700290int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700291int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
292int tls_sw_sendpage(struct sock *sk, struct page *page,
293 int offset, size_t size, int flags);
294void tls_sw_close(struct sock *sk, long timeout);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300295void tls_sw_free_resources_tx(struct sock *sk);
296void tls_sw_free_resources_rx(struct sock *sk);
Boris Pismenny39f56e12018-07-13 14:33:41 +0300297void tls_sw_release_resources_rx(struct sock *sk);
Dave Watsonc46234e2018-03-22 10:10:35 -0700298int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
299 int nonblock, int flags, int *addr_len);
John Fastabend924ad652018-10-13 02:46:00 +0200300bool tls_sw_stream_read(const struct sock *sk);
Dave Watsonc46234e2018-03-22 10:10:35 -0700301ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
302 struct pipe_inode_info *pipe,
303 size_t len, unsigned int flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700304
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300305int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
306int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
307int tls_device_sendpage(struct sock *sk, struct page *page,
308 int offset, size_t size, int flags);
309void tls_device_sk_destruct(struct sock *sk);
310void tls_device_init(void);
311void tls_device_cleanup(void);
Vakul Garga42055e2018-09-21 09:46:13 +0530312int tls_tx_records(struct sock *sk, int flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700313
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300314struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300315 u32 seq, u64 *p_record_sn);
316
317static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
318{
319 return rec->len == 0;
320}
321
322static inline u32 tls_record_start_seq(struct tls_record_info *rec)
323{
324 return rec->end_seq - rec->len;
325}
326
327void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700328int tls_push_sg(struct sock *sk, struct tls_context *ctx,
329 struct scatterlist *sg, u16 first_offset,
330 int flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530331int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
332 int flags);
333
Vakul Garg2b794c42019-02-23 08:42:37 +0000334static inline struct tls_msg *tls_msg(struct sk_buff *skb)
335{
336 return (struct tls_msg *)strp_msg(skb);
337}
338
Boris Pismenny94850252019-02-27 17:38:03 +0200339static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
Dave Watson3c4d7552017-06-14 11:37:39 -0700340{
Boris Pismenny94850252019-02-27 17:38:03 +0200341 return !!ctx->partially_sent_record;
Dave Watson3c4d7552017-06-14 11:37:39 -0700342}
343
344static inline int tls_complete_pending_work(struct sock *sk,
345 struct tls_context *ctx,
346 int flags, long *timeo)
347{
348 int rc = 0;
349
350 if (unlikely(sk->sk_write_pending))
351 rc = wait_on_pending_writer(sk, timeo);
352
Boris Pismenny94850252019-02-27 17:38:03 +0200353 if (!rc && tls_is_partially_sent_record(ctx))
354 rc = tls_push_partial_record(sk, ctx, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700355
356 return rc;
357}
358
Dave Watson3c4d7552017-06-14 11:37:39 -0700359static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
360{
361 return tls_ctx->pending_open_record_frags;
362}
363
Vakul Garg9932a292018-09-24 15:35:56 +0530364static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
Vakul Garga42055e2018-09-21 09:46:13 +0530365{
366 struct tls_rec *rec;
Vakul Garga42055e2018-09-21 09:46:13 +0530367
Vakul Garg9932a292018-09-24 15:35:56 +0530368 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
Vakul Garga42055e2018-09-21 09:46:13 +0530369 if (!rec)
370 return false;
371
Vakul Garg9932a292018-09-24 15:35:56 +0530372 return READ_ONCE(rec->tx_ready);
Vakul Garga42055e2018-09-21 09:46:13 +0530373}
374
Boris Pismenny4799ac82018-07-13 14:33:43 +0300375struct sk_buff *
376tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
377 struct sk_buff *skb);
378
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300379static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
380{
Boris Pismenny4799ac82018-07-13 14:33:43 +0300381#ifdef CONFIG_SOCK_VALIDATE_XMIT
382 return sk_fullsock(sk) &
383 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
384 &tls_validate_xmit_skb);
385#else
386 return false;
387#endif
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300388}
389
Dave Watsonf4a8e432018-03-22 10:10:15 -0700390static inline void tls_err_abort(struct sock *sk, int err)
Dave Watson3c4d7552017-06-14 11:37:39 -0700391{
Dave Watsonf4a8e432018-03-22 10:10:15 -0700392 sk->sk_err = err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700393 sk->sk_error_report(sk);
394}
395
396static inline bool tls_bigint_increment(unsigned char *seq, int len)
397{
398 int i;
399
400 for (i = len - 1; i >= 0; i--) {
401 ++seq[i];
402 if (seq[i] != 0)
403 break;
404 }
405
406 return (i == -1);
407}
408
Vakul Garg4509de12019-02-14 07:11:35 +0000409static inline struct tls_context *tls_get_ctx(const struct sock *sk)
410{
411 struct inet_connection_sock *icsk = inet_csk(sk);
412
413 return icsk->icsk_ulp_data;
414}
415
Dave Watson3c4d7552017-06-14 11:37:39 -0700416static inline void tls_advance_record_sn(struct sock *sk,
Dave Watson130b3922019-01-30 21:58:31 +0000417 struct cipher_context *ctx,
418 int version)
Dave Watson3c4d7552017-06-14 11:37:39 -0700419{
Vakul Garg4509de12019-02-14 07:11:35 +0000420 struct tls_context *tls_ctx = tls_get_ctx(sk);
421 struct tls_prot_info *prot = &tls_ctx->prot_info;
422
423 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
Dave Watsonf4a8e432018-03-22 10:10:15 -0700424 tls_err_abort(sk, EBADMSG);
Dave Watson130b3922019-01-30 21:58:31 +0000425
426 if (version != TLS_1_3_VERSION) {
427 tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
Vakul Garg4509de12019-02-14 07:11:35 +0000428 prot->iv_size);
Dave Watson130b3922019-01-30 21:58:31 +0000429 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700430}
431
432static inline void tls_fill_prepend(struct tls_context *ctx,
433 char *buf,
434 size_t plaintext_len,
Dave Watson130b3922019-01-30 21:58:31 +0000435 unsigned char record_type,
436 int version)
Dave Watson3c4d7552017-06-14 11:37:39 -0700437{
Vakul Garg4509de12019-02-14 07:11:35 +0000438 struct tls_prot_info *prot = &ctx->prot_info;
439 size_t pkt_len, iv_size = prot->iv_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700440
Vakul Garg4509de12019-02-14 07:11:35 +0000441 pkt_len = plaintext_len + prot->tag_size;
Dave Watson130b3922019-01-30 21:58:31 +0000442 if (version != TLS_1_3_VERSION) {
443 pkt_len += iv_size;
444
445 memcpy(buf + TLS_NONCE_OFFSET,
446 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
447 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700448
449 /* we cover nonce explicit here as well, so buf should be of
450 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
451 */
Dave Watson130b3922019-01-30 21:58:31 +0000452 buf[0] = version == TLS_1_3_VERSION ?
453 TLS_RECORD_TYPE_DATA : record_type;
454 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
455 buf[1] = TLS_1_2_VERSION_MINOR;
456 buf[2] = TLS_1_2_VERSION_MAJOR;
Dave Watson3c4d7552017-06-14 11:37:39 -0700457 /* we can use IV for nonce explicit according to spec */
458 buf[3] = pkt_len >> 8;
459 buf[4] = pkt_len & 0xFF;
Dave Watson3c4d7552017-06-14 11:37:39 -0700460}
461
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200462static inline void tls_make_aad(char *buf,
463 size_t size,
464 char *record_sequence,
465 int record_sequence_size,
Dave Watson130b3922019-01-30 21:58:31 +0000466 unsigned char record_type,
467 int version)
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200468{
Dave Watson130b3922019-01-30 21:58:31 +0000469 if (version != TLS_1_3_VERSION) {
470 memcpy(buf, record_sequence, record_sequence_size);
471 buf += 8;
472 } else {
473 size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
474 }
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200475
Dave Watson130b3922019-01-30 21:58:31 +0000476 buf[0] = version == TLS_1_3_VERSION ?
477 TLS_RECORD_TYPE_DATA : record_type;
478 buf[1] = TLS_1_2_VERSION_MAJOR;
479 buf[2] = TLS_1_2_VERSION_MINOR;
480 buf[3] = size >> 8;
481 buf[4] = size & 0xFF;
482}
483
484static inline void xor_iv_with_seq(int version, char *iv, char *seq)
485{
486 int i;
487
488 if (version == TLS_1_3_VERSION) {
489 for (i = 0; i < 8; i++)
490 iv[i + 4] ^= seq[i];
491 }
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200492}
493
Dave Watson3c4d7552017-06-14 11:37:39 -0700494
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300495static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
Dave Watson3c4d7552017-06-14 11:37:39 -0700496 const struct tls_context *tls_ctx)
497{
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300498 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
499}
500
501static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
502 const struct tls_context *tls_ctx)
503{
504 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
Dave Watson3c4d7552017-06-14 11:37:39 -0700505}
506
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300507static inline struct tls_offload_context_tx *
508tls_offload_ctx_tx(const struct tls_context *tls_ctx)
Dave Watson3c4d7552017-06-14 11:37:39 -0700509{
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300510 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
Dave Watson3c4d7552017-06-14 11:37:39 -0700511}
512
John Fastabend0608c692018-12-20 11:35:35 -0800513static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
514{
515 struct tls_context *ctx = tls_get_ctx(sk);
516
517 if (!ctx)
518 return false;
519 return !!tls_sw_ctx_tx(ctx);
520}
521
Boris Pismenny4799ac82018-07-13 14:33:43 +0300522static inline struct tls_offload_context_rx *
523tls_offload_ctx_rx(const struct tls_context *tls_ctx)
524{
525 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
526}
527
528/* The TLS context is valid until sk_destruct is called */
529static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
530{
531 struct tls_context *tls_ctx = tls_get_ctx(sk);
532 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
533
534 atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
535}
536
537
Dave Watson3c4d7552017-06-14 11:37:39 -0700538int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
539 unsigned char *record_type);
Atul Guptadd0bed12018-03-31 21:41:52 +0530540void tls_register_device(struct tls_device *device);
541void tls_unregister_device(struct tls_device *device);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300542int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +0300543int decrypt_skb(struct sock *sk, struct sk_buff *skb,
544 struct scatterlist *sgout);
Dave Watson3c4d7552017-06-14 11:37:39 -0700545
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300546struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
547 struct net_device *dev,
548 struct sk_buff *skb);
549
550int tls_sw_fallback_init(struct sock *sk,
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300551 struct tls_offload_context_tx *offload_ctx,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300552 struct tls_crypto_info *crypto_info);
553
Boris Pismenny4799ac82018-07-13 14:33:43 +0300554int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
555
556void tls_device_offload_cleanup_rx(struct sock *sk);
557void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
558
Dave Watson3c4d7552017-06-14 11:37:39 -0700559#endif /* _TLS_OFFLOAD_H */