1 /*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include "../../ssl_local.h"
14 #include "../record_local.h"
15 #include "recmethod_local.h"
16
tls1_set_crypto_state(OSSL_RECORD_LAYER * rl,int level,unsigned char * key,size_t keylen,unsigned char * iv,size_t ivlen,unsigned char * mackey,size_t mackeylen,const EVP_CIPHER * ciph,size_t taglen,int mactype,const EVP_MD * md,const SSL_COMP * comp)17 static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
18 unsigned char *key, size_t keylen,
19 unsigned char *iv, size_t ivlen,
20 unsigned char *mackey, size_t mackeylen,
21 const EVP_CIPHER *ciph,
22 size_t taglen,
23 int mactype,
24 const EVP_MD *md,
25 const SSL_COMP *comp)
26 {
27 EVP_CIPHER_CTX *ciph_ctx;
28 EVP_PKEY *mac_key;
29
30 if (level != OSSL_RECORD_PROTECTION_LEVEL_APPLICATION)
31 return OSSL_RECORD_RETURN_FATAL;
32
33 if ((rl->enc_ctx = EVP_CIPHER_CTX_new()) == NULL) {
34 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_MALLOC_FAILURE);
35 return OSSL_RECORD_RETURN_FATAL;
36 }
37
38 ciph_ctx = rl->enc_ctx;
39
40 rl->md_ctx = EVP_MD_CTX_new();
41 if (rl->md_ctx == NULL) {
42 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
43 return OSSL_RECORD_RETURN_FATAL;
44 }
45 #ifndef OPENSSL_NO_COMP
46 if (comp != NULL) {
47 rl->expand = COMP_CTX_new(comp->method);
48 if (rl->expand == NULL) {
49 ERR_raise(ERR_LIB_SSL, SSL_R_COMPRESSION_LIBRARY_ERROR);
50 return OSSL_RECORD_RETURN_FATAL;
51 }
52 }
53 #endif
54
55 /*
56 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
57 * setting up the MAC key.
58 */
59 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) == 0) {
60 if (mactype == EVP_PKEY_HMAC) {
61 mac_key = EVP_PKEY_new_raw_private_key_ex(rl->libctx, "HMAC",
62 rl->propq, mackey,
63 mackeylen);
64 } else {
65 /*
66 * If its not HMAC then the only other types of MAC we support are
67 * the GOST MACs, so we need to use the old style way of creating
68 * a MAC key.
69 */
70 mac_key = EVP_PKEY_new_mac_key(mactype, NULL, mackey,
71 (int)mackeylen);
72 }
73 if (mac_key == NULL
74 || EVP_DigestSignInit_ex(rl->md_ctx, NULL, EVP_MD_get0_name(md),
75 rl->libctx, rl->propq, mac_key,
76 NULL) <= 0) {
77 EVP_PKEY_free(mac_key);
78 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
79 return OSSL_RECORD_RETURN_FATAL;
80 }
81 EVP_PKEY_free(mac_key);
82 }
83
84 if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_GCM_MODE) {
85 if (!EVP_DecryptInit_ex(ciph_ctx, ciph, NULL, key, NULL)
86 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_GCM_SET_IV_FIXED,
87 (int)ivlen, iv) <= 0) {
88 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
89 return OSSL_RECORD_RETURN_FATAL;
90 }
91 } else if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_CCM_MODE) {
92 if (!EVP_DecryptInit_ex(ciph_ctx, ciph, NULL, NULL, NULL)
93 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, 12,
94 NULL) <= 0
95 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG,
96 (int)taglen, NULL) <= 0
97 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_CCM_SET_IV_FIXED,
98 (int)ivlen, iv) <= 0
99 || !EVP_DecryptInit_ex(ciph_ctx, NULL, NULL, key, NULL)) {
100 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
101 return OSSL_RECORD_RETURN_FATAL;
102 }
103 } else {
104 if (!EVP_DecryptInit_ex(ciph_ctx, ciph, NULL, key, iv)) {
105 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
106 return OSSL_RECORD_RETURN_FATAL;
107 }
108 }
109 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
110 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) != 0
111 && mackeylen != 0
112 && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_MAC_KEY,
113 (int)mackeylen, mackey) <= 0) {
114 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
115 return OSSL_RECORD_RETURN_FATAL;
116 }
117 if (EVP_CIPHER_get0_provider(ciph) != NULL
118 && !ossl_set_tls_provider_parameters(rl, ciph_ctx, ciph, md))
119 return OSSL_RECORD_RETURN_FATAL;
120
121 return OSSL_RECORD_RETURN_SUCCESS;
122 }
123
124 #define MAX_PADDING 256
125 /*-
126 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
127 * internal error, but not otherwise. It is the responsibility of the caller to
128 * report a bad_record_mac - if appropriate (DTLS just drops the record).
129 *
130 * Returns:
131 * 0: if the record is publicly invalid, or an internal error, or AEAD
132 * decryption failed, or Encrypt-then-mac decryption failed.
133 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
134 */
tls1_cipher(OSSL_RECORD_LAYER * rl,SSL3_RECORD * recs,size_t n_recs,int sending,SSL_MAC_BUF * macs,size_t macsize)135 static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
136 int sending, SSL_MAC_BUF *macs, size_t macsize)
137 {
138 EVP_CIPHER_CTX *ds;
139 size_t reclen[SSL_MAX_PIPELINES];
140 unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN];
141 int i, pad = 0, tmpr, provided;
142 size_t bs, ctr, padnum, loop;
143 unsigned char padval;
144 const EVP_CIPHER *enc;
145
146 if (n_recs == 0) {
147 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
148 return 0;
149 }
150
151 if (EVP_MD_CTX_get0_md(rl->md_ctx)) {
152 int n = EVP_MD_CTX_get_size(rl->md_ctx);
153
154 if (!ossl_assert(n >= 0)) {
155 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
156 return 0;
157 }
158 }
159 ds = rl->enc_ctx;
160 if (!ossl_assert(rl->enc_ctx != NULL)) {
161 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
162 return 0;
163 }
164
165 enc = EVP_CIPHER_CTX_get0_cipher(rl->enc_ctx);
166
167 if (sending) {
168 int ivlen;
169
170 /* For TLSv1.1 and later explicit IV */
171 if (RLAYER_USE_EXPLICIT_IV(rl)
172 && EVP_CIPHER_get_mode(enc) == EVP_CIPH_CBC_MODE)
173 ivlen = EVP_CIPHER_get_iv_length(enc);
174 else
175 ivlen = 0;
176 if (ivlen > 1) {
177 for (ctr = 0; ctr < n_recs; ctr++) {
178 if (recs[ctr].data != recs[ctr].input) {
179 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
180 return 0;
181 } else if (RAND_bytes_ex(rl->libctx, recs[ctr].input,
182 ivlen, 0) <= 0) {
183 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
184 return 0;
185 }
186 }
187 }
188 }
189 if (!ossl_assert(enc != NULL)) {
190 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
191 return 0;
192 }
193
194 provided = (EVP_CIPHER_get0_provider(enc) != NULL);
195
196 bs = EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds));
197
198 if (n_recs > 1) {
199 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
200 & EVP_CIPH_FLAG_PIPELINE) == 0) {
201 /*
202 * We shouldn't have been called with pipeline data if the
203 * cipher doesn't support pipelining
204 */
205 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
206 return 0;
207 }
208 }
209 for (ctr = 0; ctr < n_recs; ctr++) {
210 reclen[ctr] = recs[ctr].length;
211
212 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
213 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0) {
214 unsigned char *seq;
215
216 seq = rl->sequence;
217
218 if (rl->isdtls) {
219 unsigned char dtlsseq[8], *p = dtlsseq;
220
221 s2n(rl->epoch, p);
222 memcpy(p, &seq[2], 6);
223 memcpy(buf[ctr], dtlsseq, 8);
224 } else {
225 memcpy(buf[ctr], seq, 8);
226 for (i = 7; i >= 0; i--) { /* increment */
227 ++seq[i];
228 if (seq[i] != 0)
229 break;
230 }
231 }
232
233 buf[ctr][8] = recs[ctr].type;
234 buf[ctr][9] = (unsigned char)(rl->version >> 8);
235 buf[ctr][10] = (unsigned char)(rl->version);
236 buf[ctr][11] = (unsigned char)(recs[ctr].length >> 8);
237 buf[ctr][12] = (unsigned char)(recs[ctr].length & 0xff);
238 pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD,
239 EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
240 if (pad <= 0) {
241 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
242 return 0;
243 }
244
245 if (sending) {
246 reclen[ctr] += pad;
247 recs[ctr].length += pad;
248 }
249 } else if ((bs != 1) && sending && !provided) {
250 /*
251 * We only do this for legacy ciphers. Provided ciphers add the
252 * padding on the provider side.
253 */
254 padnum = bs - (reclen[ctr] % bs);
255
256 /* Add weird padding of up to 256 bytes */
257
258 if (padnum > MAX_PADDING) {
259 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
260 return 0;
261 }
262 /* we need to add 'padnum' padding bytes of value padval */
263 padval = (unsigned char)(padnum - 1);
264 for (loop = reclen[ctr]; loop < reclen[ctr] + padnum; loop++)
265 recs[ctr].input[loop] = padval;
266 reclen[ctr] += padnum;
267 recs[ctr].length += padnum;
268 }
269
270 if (!sending) {
271 if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) {
272 /* Publicly invalid */
273 return 0;
274 }
275 }
276 }
277 if (n_recs > 1) {
278 unsigned char *data[SSL_MAX_PIPELINES];
279
280 /* Set the output buffers */
281 for (ctr = 0; ctr < n_recs; ctr++)
282 data[ctr] = recs[ctr].data;
283
284 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS,
285 (int)n_recs, data) <= 0) {
286 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
287 return 0;
288 }
289 /* Set the input buffers */
290 for (ctr = 0; ctr < n_recs; ctr++)
291 data[ctr] = recs[ctr].input;
292
293 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS,
294 (int)n_recs, data) <= 0
295 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS,
296 (int)n_recs, reclen) <= 0) {
297 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
298 return 0;
299 }
300 }
301
302 if (!rl->isdtls && rl->tlstree) {
303 unsigned char *seq;
304 int decrement_seq = 0;
305
306 /*
307 * When sending, seq is incremented after MAC calculation.
308 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
309 * Otherwise we have to decrease it in the implementation
310 */
311 if (sending && !rl->use_etm)
312 decrement_seq = 1;
313
314 seq = rl->sequence;
315 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_TLSTREE, decrement_seq, seq) <= 0) {
316 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
317 return 0;
318 }
319 }
320
321 if (provided) {
322 int outlen;
323
324 /* Provided cipher - we do not support pipelining on this path */
325 if (n_recs > 1) {
326 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
327 return 0;
328 }
329
330 if (!EVP_CipherUpdate(ds, recs[0].data, &outlen, recs[0].input,
331 (unsigned int)reclen[0]))
332 return 0;
333 recs[0].length = outlen;
334
335 /*
336 * The length returned from EVP_CipherUpdate above is the actual
337 * payload length. We need to adjust the data/input ptr to skip over
338 * any explicit IV
339 */
340 if (!sending) {
341 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
342 recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
343 recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
344 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
345 recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
346 recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
347 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
348 recs[0].data += bs;
349 recs[0].input += bs;
350 recs[0].orig_len -= bs;
351 }
352
353 /* Now get a pointer to the MAC (if applicable) */
354 if (macs != NULL) {
355 OSSL_PARAM params[2], *p = params;
356
357 /* Get the MAC */
358 macs[0].alloced = 0;
359
360 *p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
361 (void **)&macs[0].mac,
362 macsize);
363 *p = OSSL_PARAM_construct_end();
364
365 if (!EVP_CIPHER_CTX_get_params(ds, params)) {
366 /* Shouldn't normally happen */
367 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR,
368 ERR_R_INTERNAL_ERROR);
369 return 0;
370 }
371 }
372 }
373 } else {
374 /* Legacy cipher */
375
376 tmpr = EVP_Cipher(ds, recs[0].data, recs[0].input,
377 (unsigned int)reclen[0]);
378 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
379 & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0
380 ? (tmpr < 0)
381 : (tmpr == 0)) {
382 /* AEAD can fail to verify MAC */
383 return 0;
384 }
385
386 if (!sending) {
387 for (ctr = 0; ctr < n_recs; ctr++) {
388 /* Adjust the record to remove the explicit IV/MAC/Tag */
389 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
390 recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
391 recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
392 recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
393 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
394 recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
395 recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
396 recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
397 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
398 if (recs[ctr].length < bs)
399 return 0;
400 recs[ctr].data += bs;
401 recs[ctr].input += bs;
402 recs[ctr].length -= bs;
403 recs[ctr].orig_len -= bs;
404 }
405
406 /*
407 * If using Mac-then-encrypt, then this will succeed but
408 * with a random MAC if padding is invalid
409 */
410 if (!tls1_cbc_remove_padding_and_mac(&recs[ctr].length,
411 recs[ctr].orig_len,
412 recs[ctr].data,
413 (macs != NULL) ? &macs[ctr].mac : NULL,
414 (macs != NULL) ? &macs[ctr].alloced
415 : NULL,
416 bs,
417 pad ? (size_t)pad : macsize,
418 (EVP_CIPHER_get_flags(enc)
419 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0,
420 rl->libctx))
421 return 0;
422 }
423 }
424 }
425 return 1;
426 }
427
tls1_mac(OSSL_RECORD_LAYER * rl,SSL3_RECORD * rec,unsigned char * md,int sending)428 static int tls1_mac(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rec, unsigned char *md,
429 int sending)
430 {
431 unsigned char *seq = rl->sequence;
432 EVP_MD_CTX *hash;
433 size_t md_size;
434 int i;
435 EVP_MD_CTX *hmac = NULL, *mac_ctx;
436 unsigned char header[13];
437 int t;
438 int ret = 0;
439
440 hash = rl->md_ctx;
441
442 t = EVP_MD_CTX_get_size(hash);
443 if (!ossl_assert(t >= 0))
444 return 0;
445 md_size = t;
446
447 if (rl->stream_mac) {
448 mac_ctx = hash;
449 } else {
450 hmac = EVP_MD_CTX_new();
451 if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) {
452 goto end;
453 }
454 mac_ctx = hmac;
455 }
456
457 if (!rl->isdtls
458 && rl->tlstree
459 && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0)
460 goto end;
461
462 if (rl->isdtls) {
463 unsigned char dtlsseq[8], *p = dtlsseq;
464
465 s2n(rl->epoch, p);
466 memcpy(p, &seq[2], 6);
467
468 memcpy(header, dtlsseq, 8);
469 } else {
470 memcpy(header, seq, 8);
471 }
472
473 header[8] = rec->type;
474 header[9] = (unsigned char)(rl->version >> 8);
475 header[10] = (unsigned char)(rl->version);
476 header[11] = (unsigned char)(rec->length >> 8);
477 header[12] = (unsigned char)(rec->length & 0xff);
478
479 if (!sending && !rl->use_etm
480 && EVP_CIPHER_CTX_get_mode(rl->enc_ctx) == EVP_CIPH_CBC_MODE
481 && ssl3_cbc_record_digest_supported(mac_ctx)) {
482 OSSL_PARAM tls_hmac_params[2], *p = tls_hmac_params;
483
484 *p++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE,
485 &rec->orig_len);
486 *p++ = OSSL_PARAM_construct_end();
487
488 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx),
489 tls_hmac_params))
490 goto end;
491 }
492
493 if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0
494 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0
495 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0)
496 goto end;
497
498 OSSL_TRACE_BEGIN(TLS) {
499 BIO_printf(trc_out, "seq:\n");
500 BIO_dump_indent(trc_out, seq, 8, 4);
501 BIO_printf(trc_out, "rec:\n");
502 BIO_dump_indent(trc_out, rec->data, rec->length, 4);
503 } OSSL_TRACE_END(TLS);
504
505 if (!rl->isdtls) {
506 for (i = 7; i >= 0; i--) {
507 ++seq[i];
508 if (seq[i] != 0)
509 break;
510 }
511 }
512 OSSL_TRACE_BEGIN(TLS) {
513 BIO_printf(trc_out, "md:\n");
514 BIO_dump_indent(trc_out, md, md_size, 4);
515 } OSSL_TRACE_END(TLS);
516 ret = 1;
517 end:
518 EVP_MD_CTX_free(hmac);
519 return ret;
520 }
521
522 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
523 struct record_functions_st tls_1_funcs = {
524 tls1_set_crypto_state,
525 tls_default_read_n,
526 tls_get_more_records,
527 tls1_cipher,
528 tls1_mac,
529 tls_default_set_protocol_version,
530 tls_default_validate_record_header,
531 tls_default_post_process_record
532 };
533
534 struct record_functions_st dtls_1_funcs = {
535 tls1_set_crypto_state,
536 tls_default_read_n,
537 dtls_get_more_records,
538 tls1_cipher,
539 tls1_mac,
540 tls_default_set_protocol_version,
541 NULL,
542 NULL
543 };
544