| 1 | /* |
| 2 | * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. |
| 3 | * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. |
| 4 | * |
| 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| 7 | * General Public License (GPL) Version 2, available from the file |
| 8 | * COPYING in the main directory of this source tree, or the |
| 9 | * OpenIB.org BSD license below: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * - Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * - Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
| 34 | #ifndef _TLS_OFFLOAD_H |
| 35 | #define _TLS_OFFLOAD_H |
| 36 | |
| 37 | #include <linux/types.h> |
| 38 | #include <asm/byteorder.h> |
| 39 | #include <linux/crypto.h> |
| 40 | #include <linux/socket.h> |
| 41 | #include <linux/tcp.h> |
| 42 | #include <linux/mutex.h> |
| 43 | #include <linux/netdevice.h> |
| 44 | #include <linux/rcupdate.h> |
| 45 | |
| 46 | #include <net/net_namespace.h> |
| 47 | #include <net/tcp.h> |
| 48 | #include <net/strparser.h> |
| 49 | #include <crypto/aead.h> |
| 50 | #include <uapi/linux/tls.h> |
| 51 | |
| 52 | struct tls_rec; |
| 53 | |
| 54 | /* Maximum data size carried in a TLS record */ |
| 55 | #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14) |
| 56 | /* Minimum record size limit as per RFC8449 */ |
| 57 | #define TLS_MIN_RECORD_SIZE_LIM ((size_t)1 << 6) |
| 58 | |
| 59 | #define 5 |
| 60 | #define TLS_NONCE_OFFSET TLS_HEADER_SIZE |
| 61 | |
| 62 | #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type) |
| 63 | |
| 64 | #define TLS_HANDSHAKE_KEYUPDATE 24 /* rfc8446 B.3: Key update */ |
| 65 | |
| 66 | #define TLS_AAD_SPACE_SIZE 13 |
| 67 | |
| 68 | #define TLS_MAX_IV_SIZE 16 |
| 69 | #define TLS_MAX_SALT_SIZE 4 |
| 70 | #define TLS_TAG_SIZE 16 |
| 71 | #define TLS_MAX_REC_SEQ_SIZE 8 |
| 72 | #define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE |
| 73 | |
| 74 | /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. |
| 75 | * |
| 76 | * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] |
| 77 | * |
| 78 | * The field 'length' is encoded in field 'b0' as '(length width - 1)'. |
| 79 | * Hence b0 contains (3 - 1) = 2. |
| 80 | */ |
| 81 | #define TLS_AES_CCM_IV_B0_BYTE 2 |
| 82 | #define TLS_SM4_CCM_IV_B0_BYTE 2 |
| 83 | |
| 84 | enum { |
| 85 | TLS_BASE, |
| 86 | TLS_SW, |
| 87 | TLS_HW, |
| 88 | TLS_HW_RECORD, |
| 89 | TLS_NUM_CONFIG, |
| 90 | }; |
| 91 | |
| 92 | struct tx_work { |
| 93 | struct delayed_work work; |
| 94 | struct sock *sk; |
| 95 | }; |
| 96 | |
| 97 | struct tls_sw_context_tx { |
| 98 | struct crypto_aead *aead_send; |
| 99 | struct crypto_wait async_wait; |
| 100 | struct tx_work tx_work; |
| 101 | struct tls_rec *open_rec; |
| 102 | struct list_head tx_list; |
| 103 | atomic_t encrypt_pending; |
| 104 | u8 async_capable:1; |
| 105 | |
| 106 | #define BIT_TX_SCHEDULED 0 |
| 107 | #define BIT_TX_CLOSING 1 |
| 108 | unsigned long tx_bitmask; |
| 109 | }; |
| 110 | |
| 111 | struct tls_strparser { |
| 112 | struct sock *sk; |
| 113 | |
| 114 | u32 mark : 8; |
| 115 | u32 stopped : 1; |
| 116 | u32 copy_mode : 1; |
| 117 | u32 mixed_decrypted : 1; |
| 118 | |
| 119 | bool msg_ready; |
| 120 | |
| 121 | struct strp_msg stm; |
| 122 | |
| 123 | struct sk_buff *anchor; |
| 124 | struct work_struct work; |
| 125 | }; |
| 126 | |
| 127 | struct tls_sw_context_rx { |
| 128 | struct crypto_aead *aead_recv; |
| 129 | struct crypto_wait async_wait; |
| 130 | struct sk_buff_head rx_list; /* list of decrypted 'data' records */ |
| 131 | void (*saved_data_ready)(struct sock *sk); |
| 132 | |
| 133 | u8 reader_present; |
| 134 | u8 async_capable:1; |
| 135 | u8 zc_capable:1; |
| 136 | u8 reader_contended:1; |
| 137 | bool key_update_pending; |
| 138 | |
| 139 | struct tls_strparser strp; |
| 140 | |
| 141 | atomic_t decrypt_pending; |
| 142 | struct sk_buff_head async_hold; |
| 143 | struct wait_queue_head wq; |
| 144 | }; |
| 145 | |
| 146 | struct tls_record_info { |
| 147 | struct list_head list; |
| 148 | u32 end_seq; |
| 149 | int len; |
| 150 | int num_frags; |
| 151 | skb_frag_t frags[MAX_SKB_FRAGS]; |
| 152 | }; |
| 153 | |
| 154 | #define TLS_DRIVER_STATE_SIZE_TX 16 |
| 155 | struct tls_offload_context_tx { |
| 156 | struct crypto_aead *aead_send; |
| 157 | spinlock_t lock; /* protects records list */ |
| 158 | struct list_head records_list; |
| 159 | struct tls_record_info *open_record; |
| 160 | struct tls_record_info *retransmit_hint; |
| 161 | u64 hint_record_sn; |
| 162 | u64 unacked_record_sn; |
| 163 | |
| 164 | struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; |
| 165 | void (*sk_destruct)(struct sock *sk); |
| 166 | struct work_struct destruct_work; |
| 167 | struct tls_context *ctx; |
| 168 | /* The TLS layer reserves room for driver specific state |
| 169 | * Currently the belief is that there is not enough |
| 170 | * driver specific state to justify another layer of indirection |
| 171 | */ |
| 172 | u8 driver_state[TLS_DRIVER_STATE_SIZE_TX] __aligned(8); |
| 173 | }; |
| 174 | |
| 175 | enum tls_context_flags { |
| 176 | /* tls_device_down was called after the netdev went down, device state |
| 177 | * was released, and kTLS works in software, even though rx_conf is |
| 178 | * still TLS_HW (needed for transition). |
| 179 | */ |
| 180 | TLS_RX_DEV_DEGRADED = 0, |
| 181 | /* Unlike RX where resync is driven entirely by the core in TX only |
| 182 | * the driver knows when things went out of sync, so we need the flag |
| 183 | * to be atomic. |
| 184 | */ |
| 185 | TLS_TX_SYNC_SCHED = 1, |
| 186 | /* tls_dev_del was called for the RX side, device state was released, |
| 187 | * but tls_ctx->netdev might still be kept, because TX-side driver |
| 188 | * resources might not be released yet. Used to prevent the second |
| 189 | * tls_dev_del call in tls_device_down if it happens simultaneously. |
| 190 | */ |
| 191 | TLS_RX_DEV_CLOSED = 2, |
| 192 | }; |
| 193 | |
| 194 | struct cipher_context { |
| 195 | char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE]; |
| 196 | char rec_seq[TLS_MAX_REC_SEQ_SIZE]; |
| 197 | }; |
| 198 | |
| 199 | union tls_crypto_context { |
| 200 | struct tls_crypto_info info; |
| 201 | union { |
| 202 | struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; |
| 203 | struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; |
| 204 | struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; |
| 205 | struct tls12_crypto_info_sm4_gcm sm4_gcm; |
| 206 | struct tls12_crypto_info_sm4_ccm sm4_ccm; |
| 207 | }; |
| 208 | }; |
| 209 | |
| 210 | struct tls_prot_info { |
| 211 | u16 version; |
| 212 | u16 cipher_type; |
| 213 | u16 prepend_size; |
| 214 | u16 tag_size; |
| 215 | u16 overhead_size; |
| 216 | u16 iv_size; |
| 217 | u16 salt_size; |
| 218 | u16 rec_seq_size; |
| 219 | u16 aad_size; |
| 220 | u16 tail_size; |
| 221 | }; |
| 222 | |
| 223 | struct tls_context { |
| 224 | /* read-only cache line */ |
| 225 | struct tls_prot_info prot_info; |
| 226 | |
| 227 | u8 tx_conf:3; |
| 228 | u8 rx_conf:3; |
| 229 | u8 zerocopy_sendfile:1; |
| 230 | u8 rx_no_pad:1; |
| 231 | u16 tx_max_payload_len; |
| 232 | |
| 233 | int (*push_pending_record)(struct sock *sk, int flags); |
| 234 | void (*sk_write_space)(struct sock *sk); |
| 235 | |
| 236 | void *priv_ctx_tx; |
| 237 | void *priv_ctx_rx; |
| 238 | |
| 239 | struct net_device __rcu *netdev; |
| 240 | |
| 241 | /* rw cache line */ |
| 242 | struct cipher_context tx; |
| 243 | struct cipher_context rx; |
| 244 | |
| 245 | struct scatterlist *partially_sent_record; |
| 246 | u16 partially_sent_offset; |
| 247 | |
| 248 | bool splicing_pages; |
| 249 | bool pending_open_record_frags; |
| 250 | |
| 251 | struct mutex tx_lock; /* protects partially_sent_* fields and |
| 252 | * per-type TX fields |
| 253 | */ |
| 254 | unsigned long flags; |
| 255 | |
| 256 | /* cache cold stuff */ |
| 257 | struct proto *sk_proto; |
| 258 | struct sock *sk; |
| 259 | |
| 260 | void (*sk_destruct)(struct sock *sk); |
| 261 | |
| 262 | union tls_crypto_context crypto_send; |
| 263 | union tls_crypto_context crypto_recv; |
| 264 | |
| 265 | struct list_head list; |
| 266 | refcount_t refcount; |
| 267 | struct rcu_head rcu; |
| 268 | }; |
| 269 | |
| 270 | enum tls_offload_ctx_dir { |
| 271 | TLS_OFFLOAD_CTX_DIR_RX, |
| 272 | TLS_OFFLOAD_CTX_DIR_TX, |
| 273 | }; |
| 274 | |
| 275 | struct tlsdev_ops { |
| 276 | int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, |
| 277 | enum tls_offload_ctx_dir direction, |
| 278 | struct tls_crypto_info *crypto_info, |
| 279 | u32 start_offload_tcp_sn); |
| 280 | void (*tls_dev_del)(struct net_device *netdev, |
| 281 | struct tls_context *ctx, |
| 282 | enum tls_offload_ctx_dir direction); |
| 283 | int (*tls_dev_resync)(struct net_device *netdev, |
| 284 | struct sock *sk, u32 seq, u8 *rcd_sn, |
| 285 | enum tls_offload_ctx_dir direction); |
| 286 | }; |
| 287 | |
| 288 | enum tls_offload_sync_type { |
| 289 | TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, |
| 290 | TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, |
| 291 | TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2, |
| 292 | }; |
| 293 | |
| 294 | #define TLS_DEVICE_RESYNC_NH_START_IVAL 2 |
| 295 | #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 |
| 296 | |
| 297 | #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13 |
| 298 | struct tls_offload_resync_async { |
| 299 | atomic64_t req; |
| 300 | u16 loglen; |
| 301 | u16 rcd_delta; |
| 302 | u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; |
| 303 | }; |
| 304 | |
| 305 | #define TLS_DRIVER_STATE_SIZE_RX 8 |
| 306 | struct tls_offload_context_rx { |
| 307 | /* sw must be the first member of tls_offload_context_rx */ |
| 308 | struct tls_sw_context_rx sw; |
| 309 | enum tls_offload_sync_type resync_type; |
| 310 | /* this member is set regardless of resync_type, to avoid branches */ |
| 311 | u8 resync_nh_reset:1; |
| 312 | /* CORE_NEXT_HINT-only member, but use the hole here */ |
| 313 | u8 resync_nh_do_now:1; |
| 314 | union { |
| 315 | /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */ |
| 316 | struct { |
| 317 | atomic64_t resync_req; |
| 318 | }; |
| 319 | /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */ |
| 320 | struct { |
| 321 | u32 decrypted_failed; |
| 322 | u32 decrypted_tgt; |
| 323 | } resync_nh; |
| 324 | /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */ |
| 325 | struct { |
| 326 | struct tls_offload_resync_async *resync_async; |
| 327 | }; |
| 328 | }; |
| 329 | /* The TLS layer reserves room for driver specific state |
| 330 | * Currently the belief is that there is not enough |
| 331 | * driver specific state to justify another layer of indirection |
| 332 | */ |
| 333 | u8 driver_state[TLS_DRIVER_STATE_SIZE_RX] __aligned(8); |
| 334 | }; |
| 335 | |
| 336 | struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, |
| 337 | u32 seq, u64 *p_record_sn); |
| 338 | |
| 339 | static inline bool tls_record_is_start_marker(struct tls_record_info *rec) |
| 340 | { |
| 341 | return rec->len == 0; |
| 342 | } |
| 343 | |
| 344 | static inline u32 tls_record_start_seq(struct tls_record_info *rec) |
| 345 | { |
| 346 | return rec->end_seq - rec->len; |
| 347 | } |
| 348 | |
| 349 | struct sk_buff * |
| 350 | tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, |
| 351 | struct sk_buff *skb); |
| 352 | struct sk_buff * |
| 353 | tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev, |
| 354 | struct sk_buff *skb); |
| 355 | |
| 356 | static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb) |
| 357 | { |
| 358 | #ifdef CONFIG_TLS_DEVICE |
| 359 | struct sock *sk = skb->sk; |
| 360 | |
| 361 | return sk && sk_fullsock(sk) && |
| 362 | (smp_load_acquire(&sk->sk_validate_xmit_skb) == |
| 363 | &tls_validate_xmit_skb); |
| 364 | #else |
| 365 | return false; |
| 366 | #endif |
| 367 | } |
| 368 | |
| 369 | static inline struct tls_context *tls_get_ctx(const struct sock *sk) |
| 370 | { |
| 371 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 372 | |
| 373 | /* Use RCU on icsk_ulp_data only for sock diag code, |
| 374 | * TLS data path doesn't need rcu_dereference(). |
| 375 | */ |
| 376 | return (__force void *)icsk->icsk_ulp_data; |
| 377 | } |
| 378 | |
| 379 | static inline struct tls_sw_context_rx *tls_sw_ctx_rx( |
| 380 | const struct tls_context *tls_ctx) |
| 381 | { |
| 382 | return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx; |
| 383 | } |
| 384 | |
| 385 | static inline struct tls_sw_context_tx *tls_sw_ctx_tx( |
| 386 | const struct tls_context *tls_ctx) |
| 387 | { |
| 388 | return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; |
| 389 | } |
| 390 | |
| 391 | static inline struct tls_offload_context_tx * |
| 392 | tls_offload_ctx_tx(const struct tls_context *tls_ctx) |
| 393 | { |
| 394 | return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; |
| 395 | } |
| 396 | |
| 397 | static inline bool tls_sw_has_ctx_tx(const struct sock *sk) |
| 398 | { |
| 399 | struct tls_context *ctx; |
| 400 | |
| 401 | if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk)) |
| 402 | return false; |
| 403 | |
| 404 | ctx = tls_get_ctx(sk); |
| 405 | if (!ctx) |
| 406 | return false; |
| 407 | return !!tls_sw_ctx_tx(tls_ctx: ctx); |
| 408 | } |
| 409 | |
| 410 | static inline bool tls_sw_has_ctx_rx(const struct sock *sk) |
| 411 | { |
| 412 | struct tls_context *ctx; |
| 413 | |
| 414 | if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk)) |
| 415 | return false; |
| 416 | |
| 417 | ctx = tls_get_ctx(sk); |
| 418 | if (!ctx) |
| 419 | return false; |
| 420 | return !!tls_sw_ctx_rx(tls_ctx: ctx); |
| 421 | } |
| 422 | |
| 423 | static inline struct tls_offload_context_rx * |
| 424 | tls_offload_ctx_rx(const struct tls_context *tls_ctx) |
| 425 | { |
| 426 | return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; |
| 427 | } |
| 428 | |
| 429 | static inline void *__tls_driver_ctx(struct tls_context *tls_ctx, |
| 430 | enum tls_offload_ctx_dir direction) |
| 431 | { |
| 432 | if (direction == TLS_OFFLOAD_CTX_DIR_TX) |
| 433 | return tls_offload_ctx_tx(tls_ctx)->driver_state; |
| 434 | else |
| 435 | return tls_offload_ctx_rx(tls_ctx)->driver_state; |
| 436 | } |
| 437 | |
| 438 | static inline void * |
| 439 | tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) |
| 440 | { |
| 441 | return __tls_driver_ctx(tls_ctx: tls_get_ctx(sk), direction); |
| 442 | } |
| 443 | |
| 444 | #define RESYNC_REQ BIT(0) |
| 445 | #define RESYNC_REQ_ASYNC BIT(1) |
| 446 | /* The TLS context is valid until sk_destruct is called */ |
| 447 | static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) |
| 448 | { |
| 449 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 450 | struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); |
| 451 | |
| 452 | atomic64_set(v: &rx_ctx->resync_req, i: ((u64)ntohl(seq) << 32) | RESYNC_REQ); |
| 453 | } |
| 454 | |
| 455 | /* Log all TLS record header TCP sequences in [seq, seq+len] */ |
| 456 | static inline void |
| 457 | tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async, |
| 458 | __be32 seq, u16 len) |
| 459 | { |
| 460 | atomic64_set(v: &resync_async->req, i: ((u64)ntohl(seq) << 32) | |
| 461 | ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); |
| 462 | resync_async->loglen = 0; |
| 463 | resync_async->rcd_delta = 0; |
| 464 | } |
| 465 | |
| 466 | static inline void |
| 467 | tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async, |
| 468 | __be32 seq) |
| 469 | { |
| 470 | atomic64_set(v: &resync_async->req, i: ((u64)ntohl(seq) << 32) | RESYNC_REQ); |
| 471 | } |
| 472 | |
| 473 | static inline void |
| 474 | tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async) |
| 475 | { |
| 476 | atomic64_set(v: &resync_async->req, i: 0); |
| 477 | } |
| 478 | |
| 479 | static inline void |
| 480 | tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) |
| 481 | { |
| 482 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 483 | |
| 484 | tls_offload_ctx_rx(tls_ctx)->resync_type = type; |
| 485 | } |
| 486 | |
| 487 | /* Driver's seq tracking has to be disabled until resync succeeded */ |
| 488 | static inline bool tls_offload_tx_resync_pending(struct sock *sk) |
| 489 | { |
| 490 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 491 | bool ret; |
| 492 | |
| 493 | ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags); |
| 494 | smp_mb__after_atomic(); |
| 495 | return ret; |
| 496 | } |
| 497 | |
| 498 | struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); |
| 499 | |
| 500 | #ifdef CONFIG_TLS_DEVICE |
| 501 | void tls_device_sk_destruct(struct sock *sk); |
| 502 | void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq); |
| 503 | |
| 504 | static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) |
| 505 | { |
| 506 | if (!sk_fullsock(sk) || |
| 507 | smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct) |
| 508 | return false; |
| 509 | return tls_get_ctx(sk)->rx_conf == TLS_HW; |
| 510 | } |
| 511 | #endif |
| 512 | #endif /* _TLS_OFFLOAD_H */ |
| 513 | |