diff options
-rw-r--r-- | include/net/tls.h | 72 | ||||
-rw-r--r-- | include/uapi/linux/tls.h | 19 | ||||
-rw-r--r-- | net/tls/tls_device.c | 5 | ||||
-rw-r--r-- | net/tls/tls_device_fallback.c | 3 | ||||
-rw-r--r-- | net/tls/tls_main.c | 36 | ||||
-rw-r--r-- | net/tls/tls_sw.c | 244 | ||||
-rw-r--r-- | tools/testing/selftests/net/tls.c | 138 |
7 files changed, 417 insertions, 100 deletions
diff --git a/include/net/tls.h b/include/net/tls.h index 4592606e136a..004bf01ce868 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -119,6 +119,9 @@ struct tls_rec { /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ struct scatterlist sg_aead_out[2]; + char content_type; + struct scatterlist sg_content_type; + char aad_space[TLS_AAD_SPACE_SIZE]; u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE + TLS_CIPHER_AES_GCM_128_SALT_SIZE]; @@ -202,11 +205,16 @@ struct cipher_context { char *iv; u16 rec_seq_size; char *rec_seq; + u16 aad_size; + u16 tail_size; }; union tls_crypto_context { struct tls_crypto_info info; - struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + }; }; struct tls_context { @@ -393,49 +401,77 @@ static inline bool tls_bigint_increment(unsigned char *seq, int len) } static inline void tls_advance_record_sn(struct sock *sk, - struct cipher_context *ctx) + struct cipher_context *ctx, + int version) { if (tls_bigint_increment(ctx->rec_seq, ctx->rec_seq_size)) tls_err_abort(sk, EBADMSG); - tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, - ctx->iv_size); + + if (version != TLS_1_3_VERSION) { + tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, + ctx->iv_size); + } } static inline void tls_fill_prepend(struct tls_context *ctx, char *buf, size_t plaintext_len, - unsigned char record_type) + unsigned char record_type, + int version) { size_t pkt_len, iv_size = ctx->tx.iv_size; - pkt_len = plaintext_len + iv_size + ctx->tx.tag_size; + pkt_len = plaintext_len + ctx->tx.tag_size; + if (version != TLS_1_3_VERSION) { + pkt_len += iv_size; + + memcpy(buf + TLS_NONCE_OFFSET, + ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size); + } /* we cover nonce explicit here as well, so buf should be of * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE */ - buf[0] = record_type; - buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version); - buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version); + buf[0] = version == TLS_1_3_VERSION ? + TLS_RECORD_TYPE_DATA : record_type; + /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ + buf[1] = TLS_1_2_VERSION_MINOR; + buf[2] = TLS_1_2_VERSION_MAJOR; /* we can use IV for nonce explicit according to spec */ buf[3] = pkt_len >> 8; buf[4] = pkt_len & 0xFF; - memcpy(buf + TLS_NONCE_OFFSET, - ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size); } static inline void tls_make_aad(char *buf, size_t size, char *record_sequence, int record_sequence_size, - unsigned char record_type) + unsigned char record_type, + int version) +{ + if (version != TLS_1_3_VERSION) { + memcpy(buf, record_sequence, record_sequence_size); + buf += 8; + } else { + size += TLS_CIPHER_AES_GCM_128_TAG_SIZE; + } + + buf[0] = version == TLS_1_3_VERSION ? + TLS_RECORD_TYPE_DATA : record_type; + buf[1] = TLS_1_2_VERSION_MAJOR; + buf[2] = TLS_1_2_VERSION_MINOR; + buf[3] = size >> 8; + buf[4] = size & 0xFF; +} + +static inline void xor_iv_with_seq(int version, char *iv, char *seq) { - memcpy(buf, record_sequence, record_sequence_size); + int i; - buf[8] = record_type; - buf[9] = TLS_1_2_VERSION_MAJOR; - buf[10] = TLS_1_2_VERSION_MINOR; - buf[11] = size >> 8; - buf[12] = size & 0xFF; + if (version == TLS_1_3_VERSION) { + for (i = 0; i < 8; i++) + iv[i + 4] ^= seq[i]; + } } static inline struct tls_context *tls_get_ctx(const struct sock *sk) diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index ff02287495ac..401d6f01de6a 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -51,6 +51,10 @@ #define TLS_1_2_VERSION_MINOR 0x3 #define TLS_1_2_VERSION TLS_VERSION_NUMBER(TLS_1_2) +#define TLS_1_3_VERSION_MAJOR 0x3 +#define TLS_1_3_VERSION_MINOR 0x4 +#define TLS_1_3_VERSION TLS_VERSION_NUMBER(TLS_1_3) + /* Supported ciphers */ #define TLS_CIPHER_AES_GCM_128 51 #define TLS_CIPHER_AES_GCM_128_IV_SIZE 8 @@ -59,6 +63,13 @@ #define TLS_CIPHER_AES_GCM_128_TAG_SIZE 16 #define TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE 8 +#define TLS_CIPHER_AES_GCM_256 52 +#define TLS_CIPHER_AES_GCM_256_IV_SIZE 8 +#define TLS_CIPHER_AES_GCM_256_KEY_SIZE 32 +#define TLS_CIPHER_AES_GCM_256_SALT_SIZE 4 +#define TLS_CIPHER_AES_GCM_256_TAG_SIZE 16 +#define TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE 8 + #define TLS_SET_RECORD_TYPE 1 #define TLS_GET_RECORD_TYPE 2 @@ -75,4 +86,12 @@ struct tls12_crypto_info_aes_gcm_128 { unsigned char rec_seq[TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE]; }; +struct tls12_crypto_info_aes_gcm_256 { + struct tls_crypto_info info; + unsigned char iv[TLS_CIPHER_AES_GCM_256_IV_SIZE]; + unsigned char key[TLS_CIPHER_AES_GCM_256_KEY_SIZE]; + unsigned char salt[TLS_CIPHER_AES_GCM_256_SALT_SIZE]; + unsigned char rec_seq[TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE]; +}; + #endif /* _UAPI_LINUX_TLS_H */ diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index d753e362d2d9..7ee9008b2187 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -257,7 +257,8 @@ static int tls_push_record(struct sock *sk, tls_fill_prepend(ctx, skb_frag_address(frag), record->len - ctx->tx.prepend_size, - record_type); + record_type, + ctx->crypto_send.info.version); /* HW doesn't care about the data in the tag, because it fills it. */ dummy_tag_frag.page = skb_frag_page(frag); @@ -270,7 +271,7 @@ static int tls_push_record(struct sock *sk, spin_unlock_irq(&offload_ctx->lock); offload_ctx->open_record = NULL; set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); - tls_advance_record_sn(sk, &ctx->tx); + tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version); for (i = 0; i < record->num_frags; i++) { frag = &record->frags[i]; diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 450a6dbc5a88..54c3a758f2a7 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -73,7 +73,8 @@ static int tls_enc_record(struct aead_request *aead_req, len -= TLS_CIPHER_AES_GCM_128_IV_SIZE; tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE, - (char *)&rcd_sn, sizeof(rcd_sn), buf[0]); + (char *)&rcd_sn, sizeof(rcd_sn), buf[0], + TLS_1_2_VERSION); memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE, TLS_CIPHER_AES_GCM_128_IV_SIZE); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index d36d095cbcf0..d1c2fd9a3f63 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -372,6 +372,30 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, rc = -EFAULT; break; } + case TLS_CIPHER_AES_GCM_256: { + struct tls12_crypto_info_aes_gcm_256 * + crypto_info_aes_gcm_256 = + container_of(crypto_info, + struct tls12_crypto_info_aes_gcm_256, + info); + + if (len != sizeof(*crypto_info_aes_gcm_256)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(crypto_info_aes_gcm_256->iv, + ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, + TLS_CIPHER_AES_GCM_256_IV_SIZE); + memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq, + TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, + crypto_info_aes_gcm_256, + sizeof(*crypto_info_aes_gcm_256))) + rc = -EFAULT; + break; + } default: rc = -EINVAL; } @@ -412,6 +436,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, { struct tls_crypto_info *crypto_info; struct tls_context *ctx = tls_get_ctx(sk); + size_t optsize; int rc = 0; int conf; @@ -438,14 +463,19 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, } /* check version */ - if (crypto_info->version != TLS_1_2_VERSION) { + if (crypto_info->version != TLS_1_2_VERSION && + crypto_info->version != TLS_1_3_VERSION) { rc = -ENOTSUPP; goto err_crypto_info; } switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: { - if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { + case TLS_CIPHER_AES_GCM_128: + case TLS_CIPHER_AES_GCM_256: { + optsize = crypto_info->cipher_type == TLS_CIPHER_AES_GCM_128 ? + sizeof(struct tls12_crypto_info_aes_gcm_128) : + sizeof(struct tls12_crypto_info_aes_gcm_256); + if (optlen != optsize) { rc = -EINVAL; goto err_crypto_info; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 3f2a6af27e62..06d7ae97b929 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -120,6 +120,34 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len) return __skb_nsg(skb, offset, len, 0); } +static int padding_length(struct tls_sw_context_rx *ctx, + struct tls_context *tls_ctx, struct sk_buff *skb) +{ + struct strp_msg *rxm = strp_msg(skb); + int sub = 0; + + /* Determine zero-padding length */ + if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION) { + char content_type = 0; + int err; + int back = 17; + + while (content_type == 0) { + if (back > rxm->full_len) + return -EBADMSG; + err = skb_copy_bits(skb, + rxm->offset + rxm->full_len - back, + &content_type, 1); + if (content_type) + break; + sub++; + back++; + } + ctx->control = content_type; + } + return sub; +} + static void tls_decrypt_done(struct crypto_async_request *req, int err) { struct aead_request *aead_req = (struct aead_request *)req; @@ -142,7 +170,7 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) tls_err_abort(skb->sk, err); } else { struct strp_msg *rxm = strp_msg(skb); - + rxm->full_len -= padding_length(ctx, tls_ctx, skb); rxm->offset += tls_ctx->rx.prepend_size; rxm->full_len -= tls_ctx->rx.overhead_size; } @@ -185,7 +213,7 @@ static int tls_do_decryption(struct sock *sk, int ret; aead_request_set_tfm(aead_req, ctx->aead_recv); - aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); + aead_request_set_ad(aead_req, tls_ctx->rx.aad_size); aead_request_set_crypt(aead_req, sgin, sgout, data_len + tls_ctx->rx.tag_size, (u8 *)iv_recv); @@ -289,12 +317,12 @@ static struct tls_rec *tls_get_rec(struct sock *sk) sg_init_table(rec->sg_aead_in, 2); sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, - sizeof(rec->aad_space)); + tls_ctx->tx.aad_size); sg_unmark_end(&rec->sg_aead_in[1]); sg_init_table(rec->sg_aead_out, 2); sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, - sizeof(rec->aad_space)); + tls_ctx->tx.aad_size); sg_unmark_end(&rec->sg_aead_out[1]); return rec; @@ -448,6 +476,8 @@ static int tls_do_encryption(struct sock *sk, int rc; memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data)); + xor_iv_with_seq(tls_ctx->crypto_send.info.version, rec->iv_data, + tls_ctx->tx.rec_seq); sge->offset += tls_ctx->tx.prepend_size; sge->length -= tls_ctx->tx.prepend_size; @@ -455,7 +485,7 @@ static int tls_do_encryption(struct sock *sk, msg_en->sg.curr = start; aead_request_set_tfm(aead_req, ctx->aead_send); - aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); + aead_request_set_ad(aead_req, tls_ctx->tx.aad_size); aead_request_set_crypt(aead_req, rec->sg_aead_in, rec->sg_aead_out, data_len, rec->iv_data); @@ -483,7 +513,8 @@ static int tls_do_encryption(struct sock *sk, /* Unhook the record from context if encryption is not failure */ ctx->open_rec = NULL; - tls_advance_record_sn(sk, &tls_ctx->tx); + tls_advance_record_sn(sk, &tls_ctx->tx, + tls_ctx->crypto_send.info.version); return rc; } @@ -640,7 +671,17 @@ static int tls_push_record(struct sock *sk, int flags, i = msg_pl->sg.end; sk_msg_iter_var_prev(i); - sg_mark_end(sk_msg_elem(msg_pl, i)); + + rec->content_type = record_type; + if (tls_ctx->crypto_send.info.version == TLS_1_3_VERSION) { + /* Add content type to end of message. No padding added */ + sg_set_buf(&rec->sg_content_type, &rec->content_type, 1); + sg_mark_end(&rec->sg_content_type); + sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1, + &rec->sg_content_type); + } else { + sg_mark_end(sk_msg_elem(msg_pl, i)); + } i = msg_pl->sg.start; sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ? @@ -653,18 +694,22 @@ static int tls_push_record(struct sock *sk, int flags, i = msg_en->sg.start; sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); - tls_make_aad(rec->aad_space, msg_pl->sg.size, + tls_make_aad(rec->aad_space, msg_pl->sg.size + tls_ctx->tx.tail_size, tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size, - record_type); + record_type, + tls_ctx->crypto_send.info.version); tls_fill_prepend(tls_ctx, page_address(sg_page(&msg_en->sg.data[i])) + - msg_en->sg.data[i].offset, msg_pl->sg.size, - record_type); + msg_en->sg.data[i].offset, + msg_pl->sg.size + tls_ctx->tx.tail_size, + record_type, + tls_ctx->crypto_send.info.version); tls_ctx->pending_open_record_frags = false; - rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i); + rc = tls_do_encryption(sk, tls_ctx, ctx, req, + msg_pl->sg.size + tls_ctx->tx.tail_size, i); if (rc < 0) { if (rc != -EINPROGRESS) { tls_err_abort(sk, EBADMSG); @@ -1292,7 +1337,8 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, u8 *aad, *iv, *mem = NULL; struct scatterlist *sgin = NULL; struct scatterlist *sgout = NULL; - const int data_len = rxm->full_len - tls_ctx->rx.overhead_size; + const int data_len = rxm->full_len - tls_ctx->rx.overhead_size + + tls_ctx->rx.tail_size; if (*zc && (out_iov || out_sg)) { if (out_iov) @@ -1317,7 +1363,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); mem_size = aead_size + (nsg * sizeof(struct scatterlist)); - mem_size = mem_size + TLS_AAD_SPACE_SIZE; + mem_size = mem_size + tls_ctx->rx.aad_size; mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv); /* Allocate a single block of memory which contains @@ -1333,7 +1379,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, sgin = (struct scatterlist *)(mem + aead_size); sgout = sgin + n_sgin; aad = (u8 *)(sgout + n_sgout); - iv = aad + TLS_AAD_SPACE_SIZE; + iv = aad + tls_ctx->rx.aad_size; /* Prepare IV */ err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, @@ -1343,16 +1389,24 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, kfree(mem); return err; } - memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); + if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION) + memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv)); + else + memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); + + xor_iv_with_seq(tls_ctx->crypto_recv.info.version, iv, + tls_ctx->rx.rec_seq); /* Prepare AAD */ - tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size, + tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size + + tls_ctx->rx.tail_size, tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size, - ctx->control); + ctx->control, + tls_ctx->crypto_recv.info.version); /* Prepare sgin */ sg_init_table(sgin, n_sgin); - sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE); + sg_set_buf(&sgin[0], aad, tls_ctx->rx.aad_size); err = skb_to_sgvec(skb, &sgin[1], rxm->offset + tls_ctx->rx.prepend_size, rxm->full_len - tls_ctx->rx.prepend_size); @@ -1364,7 +1418,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, if (n_sgout) { if (out_iov) { sg_init_table(sgout, n_sgout); - sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE); + sg_set_buf(&sgout[0], aad, tls_ctx->rx.aad_size); *chunk = 0; err = tls_setup_from_iter(sk, out_iov, data_len, @@ -1405,6 +1459,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + int version = tls_ctx->crypto_recv.info.version; struct strp_msg *rxm = strp_msg(skb); int err = 0; @@ -1417,20 +1472,23 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async); if (err < 0) { if (err == -EINPROGRESS) - tls_advance_record_sn(sk, &tls_ctx->rx); + tls_advance_record_sn(sk, &tls_ctx->rx, + version); return err; } + + rxm->full_len -= padding_length(ctx, tls_ctx, skb); + + rxm->offset += tls_ctx->rx.prepend_size; + rxm->full_len -= tls_ctx->rx.overhead_size; + tls_advance_record_sn(sk, &tls_ctx->rx, version); + ctx->decrypted = true; + ctx->saved_data_ready(sk); } else { *zc = false; } - rxm->offset += tls_ctx->rx.prepend_size; - rxm->full_len -= tls_ctx->rx.overhead_size; - tls_advance_record_sn(sk, &tls_ctx->rx); - ctx->decrypted = true; - ctx->saved_data_ready(sk); - return err; } @@ -1609,6 +1667,26 @@ int tls_sw_recvmsg(struct sock *sk, rxm = strp_msg(skb); + to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size; + + if (to_decrypt <= len && !is_kvec && !is_peek && + ctx->control == TLS_RECORD_TYPE_DATA && + tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION) + zc = true; + + err = decrypt_skb_update(sk, skb, &msg->msg_iter, + &chunk, &zc, ctx->async_capable); + if (err < 0 && err != -EINPROGRESS) { + tls_err_abort(sk, EBADMSG); + goto recv_end; + } + + if (err == -EINPROGRESS) { + async = true; + num_async++; + goto pick_next_record; + } + if (!cmsg) { int cerr; @@ -1626,40 +1704,22 @@ int tls_sw_recvmsg(struct sock *sk, goto recv_end; } - to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size; - - if (to_decrypt <= len && !is_kvec && !is_peek) - zc = true; - - err = decrypt_skb_update(sk, skb, &msg->msg_iter, - &chunk, &zc, ctx->async_capable); - if (err < 0 && err != -EINPROGRESS) { - tls_err_abort(sk, EBADMSG); - goto recv_end; - } - - if (err == -EINPROGRESS) { - async = true; - num_async++; - goto pick_next_record; - } else { - if (!zc) { - if (rxm->full_len > len) { - retain_skb = true; - chunk = len; - } else { - chunk = rxm->full_len; - } + if (!zc) { + if (rxm->full_len > len) { + retain_skb = true; + chunk = len; + } else { + chunk = rxm->full_len; + } - err = skb_copy_datagram_msg(skb, rxm->offset, - msg, chunk); - if (err < 0) - goto recv_end; + err = skb_copy_datagram_msg(skb, rxm->offset, + msg, chunk); + if (err < 0) + goto recv_end; - if (!is_peek) { - rxm->offset = rxm->offset + chunk; - rxm->full_len = rxm->full_len - chunk; - } + if (!is_peek) { + rxm->offset = rxm->offset + chunk; + rxm->full_len = rxm->full_len - chunk; } } @@ -1759,15 +1819,15 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, if (!skb) goto splice_read_end; - /* splice does not support reading control messages */ - if (ctx->control != TLS_RECORD_TYPE_DATA) { - err = -ENOTSUPP; - goto splice_read_end; - } - if (!ctx->decrypted) { err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); + /* splice does not support reading control messages */ + if (ctx->control != TLS_RECORD_TYPE_DATA) { + err = -ENOTSUPP; + goto splice_read_end; + } + if (err < 0) { tls_err_abort(sk, EBADMSG); goto splice_read_end; @@ -1835,9 +1895,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) data_len = ((header[4] & 0xFF) | (header[3] << 8)); - cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size; + cipher_overhead = tls_ctx->rx.tag_size; + if (tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION) + cipher_overhead += tls_ctx->rx.iv_size; - if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) { + if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + + tls_ctx->rx.tail_size) { ret = -EMSGSIZE; goto read_failure; } @@ -1846,12 +1909,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) goto read_failure; } - if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) || - header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) { + /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ + if (header[1] != TLS_1_2_VERSION_MINOR || + header[2] != TLS_1_2_VERSION_MAJOR) { ret = -EINVAL; goto read_failure; } - #ifdef CONFIG_TLS_DEVICE handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset, *(u64*)tls_ctx->rx.rec_seq); @@ -1999,6 +2062,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) { struct tls_crypto_info *crypto_info; struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; + struct tls12_crypto_info_aes_gcm_256 *gcm_256_info; struct tls_sw_context_tx *sw_ctx_tx = NULL; struct tls_sw_context_rx *sw_ctx_rx = NULL; struct cipher_context *cctx; @@ -2006,7 +2070,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) struct strp_callbacks cb; u16 nonce_size, tag_size, iv_size, rec_seq_size; struct crypto_tfm *tfm; - char *iv, *rec_seq; + char *iv, *rec_seq, *key, *salt; + size_t keysize; int rc = 0; if (!ctx) { @@ -2067,6 +2132,24 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; gcm_128_info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE; + key = gcm_128_info->key; + salt = gcm_128_info->salt; + break; + } + case TLS_CIPHER_AES_GCM_256: { + nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; + tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE; + iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; + iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv; + rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE; + rec_seq = + ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq; + gcm_256_info = + (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; + keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE; + key = gcm_256_info->key; + salt = gcm_256_info->salt; break; } default: @@ -2080,9 +2163,19 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) goto free_priv; } + if (crypto_info->version == TLS_1_3_VERSION) { + nonce_size = 0; + cctx->aad_size = TLS_HEADER_SIZE; + cctx->tail_size = 1; + } else { + cctx->aad_size = TLS_AAD_SPACE_SIZE; + cctx->tail_size = 0; + } + cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; cctx->tag_size = tag_size; - cctx->overhead_size = cctx->prepend_size + cctx->tag_size; + cctx->overhead_size = cctx->prepend_size + cctx->tag_size + + cctx->tail_size; cctx->iv_size = iv_size; cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL); @@ -2090,7 +2183,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) rc = -ENOMEM; goto free_priv; } - memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); + /* Note: 128 & 256 bit salt are the same size */ + memcpy(cctx->iv, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); cctx->rec_seq_size = rec_seq_size; cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); @@ -2110,8 +2204,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) ctx->push_pending_record = tls_sw_push_pending_record; - rc = crypto_aead_setkey(*aead, gcm_128_info->key, - TLS_CIPHER_AES_GCM_128_KEY_SIZE); + rc = crypto_aead_setkey(*aead, key, keysize); + if (rc) goto free_aead; diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index ff68ed19c0ef..4ac50ccb3272 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -42,7 +42,7 @@ FIXTURE_SETUP(tls) len = sizeof(addr); memset(&tls12, 0, sizeof(tls12)); - tls12.info.version = TLS_1_2_VERSION; + tls12.info.version = TLS_1_3_VERSION; tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128; addr.sin_family = AF_INET; @@ -763,4 +763,140 @@ TEST_F(tls, control_msg) EXPECT_EQ(memcmp(buf, test_str, send_len), 0); } +TEST(keysizes) { + struct tls12_crypto_info_aes_gcm_256 tls12; + struct sockaddr_in addr; + int sfd, ret, fd, cfd; + socklen_t len; + bool notls; + + notls = false; + len = sizeof(addr); + + memset(&tls12, 0, sizeof(tls12)); + tls12.info.version = TLS_1_2_VERSION; + tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256; + + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl(INADDR_ANY); + addr.sin_port = 0; + + fd = socket(AF_INET, SOCK_STREAM, 0); + sfd = socket(AF_INET, SOCK_STREAM, 0); + + ret = bind(sfd, &addr, sizeof(addr)); + ASSERT_EQ(ret, 0); + ret = listen(sfd, 10); + ASSERT_EQ(ret, 0); + + ret = getsockname(sfd, &addr, &len); + ASSERT_EQ(ret, 0); + + ret = connect(fd, &addr, sizeof(addr)); + ASSERT_EQ(ret, 0); + + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + if (ret != 0) { + notls = true; + printf("Failure setting TCP_ULP, testing without tls\n"); + } + + if (!notls) { + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + } + + cfd = accept(sfd, &addr, &len); + ASSERT_GE(cfd, 0); + + if (!notls) { + ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", + sizeof("tls")); + EXPECT_EQ(ret, 0); + + ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + } + + close(sfd); + close(fd); + close(cfd); +} + +TEST(tls12) { + int fd, cfd; + bool notls; + + struct tls12_crypto_info_aes_gcm_128 tls12; + struct sockaddr_in addr; + socklen_t len; + int sfd, ret; + + notls = false; + len = sizeof(addr); + + memset(&tls12, 0, sizeof(tls12)); + tls12.info.version = TLS_1_2_VERSION; + tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128; + + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl(INADDR_ANY); + addr.sin_port = 0; + + fd = socket(AF_INET, SOCK_STREAM, 0); + sfd = socket(AF_INET, SOCK_STREAM, 0); + + ret = bind(sfd, &addr, sizeof(addr)); + ASSERT_EQ(ret, 0); + ret = listen(sfd, 10); + ASSERT_EQ(ret, 0); + + ret = getsockname(sfd, &addr, &len); + ASSERT_EQ(ret, 0); + + ret = connect(fd, &addr, sizeof(addr)); + ASSERT_EQ(ret, 0); + + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + if (ret != 0) { + notls = true; + printf("Failure setting TCP_ULP, testing without tls\n"); + } + + if (!notls) { + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, + sizeof(tls12)); + ASSERT_EQ(ret, 0); + } + + cfd = accept(sfd, &addr, &len); + ASSERT_GE(cfd, 0); + + if (!notls) { + ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", + sizeof("tls")); + ASSERT_EQ(ret, 0); + + ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, + sizeof(tls12)); + ASSERT_EQ(ret, 0); + } + + close(sfd); + + char const *test_str = "test_read"; + int send_len = 10; + char buf[10]; + + send_len = strlen(test_str) + 1; + EXPECT_EQ(send(fd, test_str, send_len, 0), send_len); + EXPECT_NE(recv(cfd, buf, send_len, 0), -1); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); + + close(fd); + close(cfd); +} + TEST_HARNESS_MAIN |