1 /*
2 * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include "internal/quic_record_tx.h"
11 #include "internal/qlog_event_helpers.h"
12 #include "internal/bio_addr.h"
13 #include "internal/common.h"
14 #include "quic_record_shared.h"
15 #include "internal/list.h"
16 #include "../ssl_local.h"
17
18 /*
19 * TXE
20 * ===
21 * Encrypted packets awaiting transmission are kept in TX Entries (TXEs), which
22 * are queued in linked lists just like TXEs.
23 */
24 typedef struct txe_st TXE;
25
26 struct txe_st {
27 OSSL_LIST_MEMBER(txe, TXE);
28 size_t data_len, alloc_len;
29
30 /*
31 * Destination and local addresses, as applicable. Both of these are only
32 * used if the family is not AF_UNSPEC.
33 */
34 BIO_ADDR peer, local;
35
36 /*
37 * alloc_len allocated bytes (of which data_len bytes are valid) follow this
38 * structure.
39 */
40 };
41
42 DEFINE_LIST_OF(txe, TXE);
43 typedef OSSL_LIST(txe) TXE_LIST;
44
txe_data(const TXE * e)45 static ossl_inline unsigned char *txe_data(const TXE *e)
46 {
47 return (unsigned char *)(e + 1);
48 }
49
50 /*
51 * QTX
52 * ===
53 */
54 struct ossl_qtx_st {
55 OSSL_LIB_CTX *libctx;
56 const char *propq;
57
58 /* Per encryption-level state. */
59 OSSL_QRL_ENC_LEVEL_SET el_set;
60
61 /* TX BIO. */
62 BIO *bio;
63
64 /* QLOG instance retrieval callback if in use, or NULL. */
65 QLOG *(*get_qlog_cb)(void *arg);
66 void *get_qlog_cb_arg;
67
68 /* TX maximum datagram payload length. */
69 size_t mdpl;
70
71 /*
72 * List of TXEs which are not currently in use. These are moved to the
73 * pending list (possibly via tx_cons first) as they are filled.
74 */
75 TXE_LIST free;
76
77 /*
78 * List of TXEs which are filled with completed datagrams ready to be
79 * transmitted.
80 */
81 TXE_LIST pending;
82 size_t pending_count; /* items in list */
83 size_t pending_bytes; /* sum(txe->data_len) in pending */
84
85 /*
86 * TXE which is under construction for coalescing purposes, if any.
87 * This TXE is neither on the free nor pending list. Once the datagram
88 * is completed, it is moved to the pending list.
89 */
90 TXE *cons;
91 size_t cons_count; /* num packets */
92
93 /*
94 * Number of packets transmitted in this key epoch. Used to enforce AEAD
95 * confidentiality limit.
96 */
97 uint64_t epoch_pkt_count;
98
99 /* Datagram counter. Increases monotonically per datagram (not per packet). */
100 uint64_t datagram_count;
101
102 ossl_mutate_packet_cb mutatecb;
103 ossl_finish_mutate_cb finishmutatecb;
104 void *mutatearg;
105
106 /* Message callback related arguments */
107 ossl_msg_cb msg_callback;
108 void *msg_callback_arg;
109 SSL *msg_callback_ssl;
110 };
111
112 /* Instantiates a new QTX. */
ossl_qtx_new(const OSSL_QTX_ARGS * args)113 OSSL_QTX *ossl_qtx_new(const OSSL_QTX_ARGS *args)
114 {
115 OSSL_QTX *qtx;
116
117 if (args->mdpl < QUIC_MIN_INITIAL_DGRAM_LEN)
118 return 0;
119
120 qtx = OPENSSL_zalloc(sizeof(OSSL_QTX));
121 if (qtx == NULL)
122 return 0;
123
124 qtx->libctx = args->libctx;
125 qtx->propq = args->propq;
126 qtx->bio = args->bio;
127 qtx->mdpl = args->mdpl;
128 qtx->get_qlog_cb = args->get_qlog_cb;
129 qtx->get_qlog_cb_arg = args->get_qlog_cb_arg;
130
131 return qtx;
132 }
133
qtx_cleanup_txl(TXE_LIST * l)134 static void qtx_cleanup_txl(TXE_LIST *l)
135 {
136 TXE *e, *enext;
137
138 for (e = ossl_list_txe_head(l); e != NULL; e = enext) {
139 enext = ossl_list_txe_next(e);
140 OPENSSL_free(e);
141 }
142 }
143
144 /* Frees the QTX. */
ossl_qtx_free(OSSL_QTX * qtx)145 void ossl_qtx_free(OSSL_QTX *qtx)
146 {
147 uint32_t i;
148
149 if (qtx == NULL)
150 return;
151
152 /* Free TXE queue data. */
153 qtx_cleanup_txl(&qtx->pending);
154 qtx_cleanup_txl(&qtx->free);
155 OPENSSL_free(qtx->cons);
156
157 /* Drop keying material and crypto resources. */
158 for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
159 ossl_qrl_enc_level_set_discard(&qtx->el_set, i);
160
161 OPENSSL_free(qtx);
162 }
163
164 /* Set mutator callbacks for test framework support */
ossl_qtx_set_mutator(OSSL_QTX * qtx,ossl_mutate_packet_cb mutatecb,ossl_finish_mutate_cb finishmutatecb,void * mutatearg)165 void ossl_qtx_set_mutator(OSSL_QTX *qtx, ossl_mutate_packet_cb mutatecb,
166 ossl_finish_mutate_cb finishmutatecb, void *mutatearg)
167 {
168 qtx->mutatecb = mutatecb;
169 qtx->finishmutatecb = finishmutatecb;
170 qtx->mutatearg = mutatearg;
171 }
172
ossl_qtx_set_qlog_cb(OSSL_QTX * qtx,QLOG * (* get_qlog_cb)(void * arg),void * get_qlog_cb_arg)173 void ossl_qtx_set_qlog_cb(OSSL_QTX *qtx, QLOG *(*get_qlog_cb)(void *arg),
174 void *get_qlog_cb_arg)
175 {
176 qtx->get_qlog_cb = get_qlog_cb;
177 qtx->get_qlog_cb_arg = get_qlog_cb_arg;
178 }
179
ossl_qtx_provide_secret(OSSL_QTX * qtx,uint32_t enc_level,uint32_t suite_id,EVP_MD * md,const unsigned char * secret,size_t secret_len)180 int ossl_qtx_provide_secret(OSSL_QTX *qtx,
181 uint32_t enc_level,
182 uint32_t suite_id,
183 EVP_MD *md,
184 const unsigned char *secret,
185 size_t secret_len)
186 {
187 if (enc_level >= QUIC_ENC_LEVEL_NUM)
188 return 0;
189
190 return ossl_qrl_enc_level_set_provide_secret(&qtx->el_set,
191 qtx->libctx,
192 qtx->propq,
193 enc_level,
194 suite_id,
195 md,
196 secret,
197 secret_len,
198 0,
199 /*is_tx=*/1);
200 }
201
ossl_qtx_discard_enc_level(OSSL_QTX * qtx,uint32_t enc_level)202 int ossl_qtx_discard_enc_level(OSSL_QTX *qtx, uint32_t enc_level)
203 {
204 if (enc_level >= QUIC_ENC_LEVEL_NUM)
205 return 0;
206
207 ossl_qrl_enc_level_set_discard(&qtx->el_set, enc_level);
208 return 1;
209 }
210
ossl_qtx_is_enc_level_provisioned(OSSL_QTX * qtx,uint32_t enc_level)211 int ossl_qtx_is_enc_level_provisioned(OSSL_QTX *qtx, uint32_t enc_level)
212 {
213 return ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1) != NULL;
214 }
215
216 /* Allocate a new TXE. */
qtx_alloc_txe(size_t alloc_len)217 static TXE *qtx_alloc_txe(size_t alloc_len)
218 {
219 TXE *txe;
220
221 if (alloc_len >= SIZE_MAX - sizeof(TXE))
222 return NULL;
223
224 txe = OPENSSL_malloc(sizeof(TXE) + alloc_len);
225 if (txe == NULL)
226 return NULL;
227
228 ossl_list_txe_init_elem(txe);
229 txe->alloc_len = alloc_len;
230 txe->data_len = 0;
231 return txe;
232 }
233
234 /*
235 * Ensures there is at least one TXE in the free list, allocating a new entry
236 * if necessary. The returned TXE is in the free list; it is not popped.
237 *
238 * alloc_len is a hint which may be used to determine the TXE size if allocation
239 * is necessary. Returns NULL on allocation failure.
240 */
qtx_ensure_free_txe(OSSL_QTX * qtx,size_t alloc_len)241 static TXE *qtx_ensure_free_txe(OSSL_QTX *qtx, size_t alloc_len)
242 {
243 TXE *txe;
244
245 txe = ossl_list_txe_head(&qtx->free);
246 if (txe != NULL)
247 return txe;
248
249 txe = qtx_alloc_txe(alloc_len);
250 if (txe == NULL)
251 return NULL;
252
253 ossl_list_txe_insert_tail(&qtx->free, txe);
254 return txe;
255 }
256
257 /*
258 * Resize the data buffer attached to an TXE to be n bytes in size. The address
259 * of the TXE might change; the new address is returned, or NULL on failure, in
260 * which case the original TXE remains valid.
261 */
qtx_resize_txe(OSSL_QTX * qtx,TXE_LIST * txl,TXE * txe,size_t n)262 static TXE *qtx_resize_txe(OSSL_QTX *qtx, TXE_LIST *txl, TXE *txe, size_t n)
263 {
264 TXE *txe2, *p;
265
266 /* Should never happen. */
267 if (txe == NULL)
268 return NULL;
269
270 if (n >= SIZE_MAX - sizeof(TXE))
271 return NULL;
272
273 /* Remove the item from the list to avoid accessing freed memory */
274 p = ossl_list_txe_prev(txe);
275 ossl_list_txe_remove(txl, txe);
276
277 /*
278 * NOTE: We do not clear old memory, although it does contain decrypted
279 * data.
280 */
281 txe2 = OPENSSL_realloc(txe, sizeof(TXE) + n);
282 if (txe2 == NULL || txe == txe2) {
283 if (p == NULL)
284 ossl_list_txe_insert_head(txl, txe);
285 else
286 ossl_list_txe_insert_after(txl, p, txe);
287 return txe2;
288 }
289
290 if (p == NULL)
291 ossl_list_txe_insert_head(txl, txe2);
292 else
293 ossl_list_txe_insert_after(txl, p, txe2);
294
295 if (qtx->cons == txe)
296 qtx->cons = txe2;
297
298 txe2->alloc_len = n;
299 return txe2;
300 }
301
302 /*
303 * Ensure the data buffer attached to an TXE is at least n bytes in size.
304 * Returns NULL on failure.
305 */
qtx_reserve_txe(OSSL_QTX * qtx,TXE_LIST * txl,TXE * txe,size_t n)306 static TXE *qtx_reserve_txe(OSSL_QTX *qtx, TXE_LIST *txl,
307 TXE *txe, size_t n)
308 {
309 if (txe->alloc_len >= n)
310 return txe;
311
312 return qtx_resize_txe(qtx, txl, txe, n);
313 }
314
315 /* Move a TXE from pending to free. */
qtx_pending_to_free(OSSL_QTX * qtx)316 static void qtx_pending_to_free(OSSL_QTX *qtx)
317 {
318 TXE *txe = ossl_list_txe_head(&qtx->pending);
319
320 assert(txe != NULL);
321 ossl_list_txe_remove(&qtx->pending, txe);
322 --qtx->pending_count;
323 qtx->pending_bytes -= txe->data_len;
324 ossl_list_txe_insert_tail(&qtx->free, txe);
325 }
326
327 /* Add a TXE not currently in any list to the pending list. */
qtx_add_to_pending(OSSL_QTX * qtx,TXE * txe)328 static void qtx_add_to_pending(OSSL_QTX *qtx, TXE *txe)
329 {
330 ossl_list_txe_insert_tail(&qtx->pending, txe);
331 ++qtx->pending_count;
332 qtx->pending_bytes += txe->data_len;
333 }
334
335 struct iovec_cur {
336 const OSSL_QTX_IOVEC *iovec;
337 size_t num_iovec, idx, byte_off, bytes_remaining;
338 };
339
iovec_total_bytes(const OSSL_QTX_IOVEC * iovec,size_t num_iovec)340 static size_t iovec_total_bytes(const OSSL_QTX_IOVEC *iovec,
341 size_t num_iovec)
342 {
343 size_t i, l = 0;
344
345 for (i = 0; i < num_iovec; ++i)
346 l += iovec[i].buf_len;
347
348 return l;
349 }
350
iovec_cur_init(struct iovec_cur * cur,const OSSL_QTX_IOVEC * iovec,size_t num_iovec)351 static void iovec_cur_init(struct iovec_cur *cur,
352 const OSSL_QTX_IOVEC *iovec,
353 size_t num_iovec)
354 {
355 cur->iovec = iovec;
356 cur->num_iovec = num_iovec;
357 cur->idx = 0;
358 cur->byte_off = 0;
359 cur->bytes_remaining = iovec_total_bytes(iovec, num_iovec);
360 }
361
362 /*
363 * Get an extent of bytes from the iovec cursor. *buf is set to point to the
364 * buffer and the number of bytes in length of the buffer is returned. This
365 * value may be less than the max_buf_len argument. If no more data is
366 * available, returns 0.
367 */
iovec_cur_get_buffer(struct iovec_cur * cur,const unsigned char ** buf,size_t max_buf_len)368 static size_t iovec_cur_get_buffer(struct iovec_cur *cur,
369 const unsigned char **buf,
370 size_t max_buf_len)
371 {
372 size_t l;
373
374 if (max_buf_len == 0) {
375 *buf = NULL;
376 return 0;
377 }
378
379 for (;;) {
380 if (cur->idx >= cur->num_iovec)
381 return 0;
382
383 l = cur->iovec[cur->idx].buf_len - cur->byte_off;
384 if (l > max_buf_len)
385 l = max_buf_len;
386
387 if (l > 0) {
388 *buf = cur->iovec[cur->idx].buf + cur->byte_off;
389 cur->byte_off += l;
390 cur->bytes_remaining -= l;
391 return l;
392 }
393
394 /*
395 * Zero-length iovec entry or we already consumed all of it, try the
396 * next iovec.
397 */
398 ++cur->idx;
399 cur->byte_off = 0;
400 }
401 }
402
403 /* Determines the size of the AEAD output given the input size. */
ossl_qtx_calculate_ciphertext_payload_len(OSSL_QTX * qtx,uint32_t enc_level,size_t plaintext_len,size_t * ciphertext_len)404 int ossl_qtx_calculate_ciphertext_payload_len(OSSL_QTX *qtx, uint32_t enc_level,
405 size_t plaintext_len,
406 size_t *ciphertext_len)
407 {
408 OSSL_QRL_ENC_LEVEL *el
409 = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
410 size_t tag_len;
411
412 if (el == NULL) {
413 *ciphertext_len = 0;
414 return 0;
415 }
416
417 /*
418 * We currently only support ciphers with a 1:1 mapping between plaintext
419 * and ciphertext size, save for authentication tag.
420 */
421 tag_len = ossl_qrl_get_suite_cipher_tag_len(el->suite_id);
422
423 *ciphertext_len = plaintext_len + tag_len;
424 return 1;
425 }
426
427 /* Determines the size of the AEAD input given the output size. */
ossl_qtx_calculate_plaintext_payload_len(OSSL_QTX * qtx,uint32_t enc_level,size_t ciphertext_len,size_t * plaintext_len)428 int ossl_qtx_calculate_plaintext_payload_len(OSSL_QTX *qtx, uint32_t enc_level,
429 size_t ciphertext_len,
430 size_t *plaintext_len)
431 {
432 OSSL_QRL_ENC_LEVEL *el
433 = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
434 size_t tag_len;
435
436 if (el == NULL) {
437 *plaintext_len = 0;
438 return 0;
439 }
440
441 tag_len = ossl_qrl_get_suite_cipher_tag_len(el->suite_id);
442
443 if (ciphertext_len <= tag_len) {
444 *plaintext_len = 0;
445 return 0;
446 }
447
448 *plaintext_len = ciphertext_len - tag_len;
449 return 1;
450 }
451
452 /* Any other error (including packet being too big for MDPL). */
453 #define QTX_FAIL_GENERIC (-1)
454
455 /*
456 * Returned where there is insufficient room in the datagram to write the
457 * packet.
458 */
459 #define QTX_FAIL_INSUFFICIENT_LEN (-2)
460
qtx_write_hdr(OSSL_QTX * qtx,const QUIC_PKT_HDR * hdr,TXE * txe,QUIC_PKT_HDR_PTRS * ptrs)461 static int qtx_write_hdr(OSSL_QTX *qtx, const QUIC_PKT_HDR *hdr, TXE *txe,
462 QUIC_PKT_HDR_PTRS *ptrs)
463 {
464 WPACKET wpkt;
465 size_t l = 0;
466 unsigned char *data = txe_data(txe) + txe->data_len;
467
468 if (!WPACKET_init_static_len(&wpkt, data, txe->alloc_len - txe->data_len, 0))
469 return 0;
470
471 if (!ossl_quic_wire_encode_pkt_hdr(&wpkt, hdr->dst_conn_id.id_len,
472 hdr, ptrs)
473 || !WPACKET_get_total_written(&wpkt, &l)) {
474 WPACKET_finish(&wpkt);
475 return 0;
476 }
477 WPACKET_finish(&wpkt);
478
479 if (qtx->msg_callback != NULL)
480 qtx->msg_callback(1, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, data, l,
481 qtx->msg_callback_ssl, qtx->msg_callback_arg);
482
483 txe->data_len += l;
484
485 return 1;
486 }
487
qtx_encrypt_into_txe(OSSL_QTX * qtx,struct iovec_cur * cur,TXE * txe,uint32_t enc_level,QUIC_PN pn,const unsigned char * hdr,size_t hdr_len,QUIC_PKT_HDR_PTRS * ptrs)488 static int qtx_encrypt_into_txe(OSSL_QTX *qtx, struct iovec_cur *cur, TXE *txe,
489 uint32_t enc_level, QUIC_PN pn,
490 const unsigned char *hdr, size_t hdr_len,
491 QUIC_PKT_HDR_PTRS *ptrs)
492 {
493 int l = 0, l2 = 0, nonce_len;
494 OSSL_QRL_ENC_LEVEL *el
495 = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
496 unsigned char nonce[EVP_MAX_IV_LENGTH];
497 size_t i;
498 EVP_CIPHER_CTX *cctx = NULL;
499
500 /* We should not have been called if we do not have key material. */
501 if (!ossl_assert(el != NULL)) {
502 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
503 return 0;
504 }
505
506 /*
507 * Have we already encrypted the maximum number of packets using the current
508 * key?
509 */
510 if (el->op_count >= ossl_qrl_get_suite_max_pkt(el->suite_id)) {
511 ERR_raise(ERR_LIB_SSL, SSL_R_MAXIMUM_ENCRYPTED_PKTS_REACHED);
512 return 0;
513 }
514
515 /*
516 * TX key update is simpler than for RX; once we initiate a key update, we
517 * never need the old keys, as we never deliberately send a packet with old
518 * keys. Thus the EL always uses keyslot 0 for the TX side.
519 */
520 cctx = el->cctx[0];
521 if (!ossl_assert(cctx != NULL)) {
522 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
523 return 0;
524 }
525
526 /* Construct nonce (nonce=IV ^ PN). */
527 nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
528 if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN))) {
529 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
530 return 0;
531 }
532
533 memcpy(nonce, el->iv[0], (size_t)nonce_len);
534 for (i = 0; i < sizeof(QUIC_PN); ++i)
535 nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
536
537 /* type and key will already have been setup; feed the IV. */
538 if (EVP_CipherInit_ex(cctx, NULL, NULL, NULL, nonce, /*enc=*/1) != 1) {
539 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
540 return 0;
541 }
542
543 /* Feed AAD data. */
544 if (EVP_CipherUpdate(cctx, NULL, &l, hdr, hdr_len) != 1) {
545 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
546 return 0;
547 }
548
549 /* Encrypt plaintext directly into TXE. */
550 for (;;) {
551 const unsigned char *src;
552 size_t src_len;
553
554 src_len = iovec_cur_get_buffer(cur, &src, SIZE_MAX);
555 if (src_len == 0)
556 break;
557
558 if (EVP_CipherUpdate(cctx, txe_data(txe) + txe->data_len,
559 &l, src, src_len) != 1) {
560 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
561 return 0;
562 }
563
564 #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
565 /* Ignore what we just encrypted and overwrite it with the plaintext */
566 memcpy(txe_data(txe) + txe->data_len, src, l);
567 #endif
568
569 assert(l > 0 && src_len == (size_t)l);
570 txe->data_len += src_len;
571 }
572
573 /* Finalise and get tag. */
574 if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
575 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
576 return 0;
577 }
578
579 if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_GET_TAG,
580 el->tag_len, txe_data(txe) + txe->data_len) != 1) {
581 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
582 return 0;
583 }
584
585 txe->data_len += el->tag_len;
586
587 /* Apply header protection. */
588 if (!ossl_quic_hdr_protector_encrypt(&el->hpr, ptrs))
589 return 0;
590
591 ++el->op_count;
592 return 1;
593 }
594
595 /*
596 * Append a packet to the TXE buffer, serializing and encrypting it in the
597 * process.
598 */
qtx_write(OSSL_QTX * qtx,const OSSL_QTX_PKT * pkt,TXE * txe,uint32_t enc_level,QUIC_PKT_HDR * hdr,const OSSL_QTX_IOVEC * iovec,size_t num_iovec)599 static int qtx_write(OSSL_QTX *qtx, const OSSL_QTX_PKT *pkt, TXE *txe,
600 uint32_t enc_level, QUIC_PKT_HDR *hdr,
601 const OSSL_QTX_IOVEC *iovec, size_t num_iovec)
602 {
603 int ret, needs_encrypt;
604 size_t hdr_len, pred_hdr_len, payload_len, pkt_len, space_left;
605 size_t min_len, orig_data_len;
606 struct iovec_cur cur;
607 QUIC_PKT_HDR_PTRS ptrs;
608 unsigned char *hdr_start;
609 OSSL_QRL_ENC_LEVEL *el = NULL;
610
611 /*
612 * Determine if the packet needs encryption and the minimum conceivable
613 * serialization length.
614 */
615 if (!ossl_quic_pkt_type_is_encrypted(hdr->type)) {
616 needs_encrypt = 0;
617 min_len = QUIC_MIN_VALID_PKT_LEN;
618 } else {
619 needs_encrypt = 1;
620 min_len = QUIC_MIN_VALID_PKT_LEN_CRYPTO;
621 el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
622 if (!ossl_assert(el != NULL)) /* should already have been checked */
623 return 0;
624 }
625
626 orig_data_len = txe->data_len;
627 space_left = txe->alloc_len - txe->data_len;
628 if (space_left < min_len) {
629 /* Not even a possibility of it fitting. */
630 ret = QTX_FAIL_INSUFFICIENT_LEN;
631 goto err;
632 }
633
634 /* Set some fields in the header we are responsible for. */
635 if (hdr->type == QUIC_PKT_TYPE_1RTT)
636 hdr->key_phase = (unsigned char)(el->key_epoch & 1);
637
638 /* Walk the iovecs to determine actual input payload length. */
639 iovec_cur_init(&cur, iovec, num_iovec);
640
641 if (cur.bytes_remaining == 0) {
642 /* No zero-length payloads allowed. */
643 ret = QTX_FAIL_GENERIC;
644 goto err;
645 }
646
647 /* Determine encrypted payload length. */
648 if (needs_encrypt)
649 ossl_qtx_calculate_ciphertext_payload_len(qtx, enc_level,
650 cur.bytes_remaining,
651 &payload_len);
652 else
653 payload_len = cur.bytes_remaining;
654
655 /* Determine header length. */
656 hdr->data = NULL;
657 hdr->len = payload_len;
658 pred_hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(hdr->dst_conn_id.id_len,
659 hdr);
660 if (pred_hdr_len == 0) {
661 ret = QTX_FAIL_GENERIC;
662 goto err;
663 }
664
665 /* We now definitively know our packet length. */
666 pkt_len = pred_hdr_len + payload_len;
667
668 if (pkt_len > space_left) {
669 ret = QTX_FAIL_INSUFFICIENT_LEN;
670 goto err;
671 }
672
673 if (ossl_quic_pkt_type_has_pn(hdr->type)) {
674 if (!ossl_quic_wire_encode_pkt_hdr_pn(pkt->pn,
675 hdr->pn,
676 hdr->pn_len)) {
677 ret = QTX_FAIL_GENERIC;
678 goto err;
679 }
680 }
681
682 /* Append the header to the TXE. */
683 hdr_start = txe_data(txe) + txe->data_len;
684 if (!qtx_write_hdr(qtx, hdr, txe, &ptrs)) {
685 ret = QTX_FAIL_GENERIC;
686 goto err;
687 }
688
689 hdr_len = (txe_data(txe) + txe->data_len) - hdr_start;
690 assert(hdr_len == pred_hdr_len);
691
692 if (!needs_encrypt) {
693 /* Just copy the payload across. */
694 const unsigned char *src;
695 size_t src_len;
696
697 for (;;) {
698 /* Buffer length has already been checked above. */
699 src_len = iovec_cur_get_buffer(&cur, &src, SIZE_MAX);
700 if (src_len == 0)
701 break;
702
703 memcpy(txe_data(txe) + txe->data_len, src, src_len);
704 txe->data_len += src_len;
705 }
706 } else {
707 /* Encrypt into TXE. */
708 if (!qtx_encrypt_into_txe(qtx, &cur, txe, enc_level, pkt->pn,
709 hdr_start, hdr_len, &ptrs)) {
710 ret = QTX_FAIL_GENERIC;
711 goto err;
712 }
713
714 assert(txe->data_len - orig_data_len == pkt_len);
715 }
716
717 return 1;
718
719 err:
720 /*
721 * Restore original length so we don't leave a half-written packet in the
722 * TXE.
723 */
724 txe->data_len = orig_data_len;
725 return ret;
726 }
727
qtx_ensure_cons(OSSL_QTX * qtx)728 static TXE *qtx_ensure_cons(OSSL_QTX *qtx)
729 {
730 TXE *txe = qtx->cons;
731
732 if (txe != NULL)
733 return txe;
734
735 txe = qtx_ensure_free_txe(qtx, qtx->mdpl);
736 if (txe == NULL)
737 return NULL;
738
739 ossl_list_txe_remove(&qtx->free, txe);
740 qtx->cons = txe;
741 qtx->cons_count = 0;
742 txe->data_len = 0;
743 return txe;
744 }
745
qtx_get_qlog(OSSL_QTX * qtx)746 static QLOG *qtx_get_qlog(OSSL_QTX *qtx)
747 {
748 if (qtx->get_qlog_cb == NULL)
749 return NULL;
750
751 return qtx->get_qlog_cb(qtx->get_qlog_cb_arg);
752 }
753
qtx_mutate_write(OSSL_QTX * qtx,const OSSL_QTX_PKT * pkt,TXE * txe,uint32_t enc_level)754 static int qtx_mutate_write(OSSL_QTX *qtx, const OSSL_QTX_PKT *pkt, TXE *txe,
755 uint32_t enc_level)
756 {
757 int ret;
758 QUIC_PKT_HDR *hdr;
759 const OSSL_QTX_IOVEC *iovec;
760 size_t num_iovec;
761
762 /* If we are running tests then mutate_packet may be non NULL */
763 if (qtx->mutatecb != NULL) {
764 if (!qtx->mutatecb(pkt->hdr, pkt->iovec, pkt->num_iovec, &hdr,
765 &iovec, &num_iovec, qtx->mutatearg))
766 return QTX_FAIL_GENERIC;
767 } else {
768 hdr = pkt->hdr;
769 iovec = pkt->iovec;
770 num_iovec = pkt->num_iovec;
771 }
772
773 ret = qtx_write(qtx, pkt, txe, enc_level,
774 hdr, iovec, num_iovec);
775 if (ret == 1)
776 ossl_qlog_event_transport_packet_sent(qtx_get_qlog(qtx), hdr, pkt->pn,
777 iovec, num_iovec,
778 qtx->datagram_count);
779
780 if (qtx->finishmutatecb != NULL)
781 qtx->finishmutatecb(qtx->mutatearg);
782
783 return ret;
784 }
785
addr_eq(const BIO_ADDR * a,const BIO_ADDR * b)786 static int addr_eq(const BIO_ADDR *a, const BIO_ADDR *b)
787 {
788 return ((a == NULL || BIO_ADDR_family(a) == AF_UNSPEC)
789 && (b == NULL || BIO_ADDR_family(b) == AF_UNSPEC))
790 || (a != NULL && b != NULL && memcmp(a, b, sizeof(*a)) == 0);
791 }
792
ossl_qtx_write_pkt(OSSL_QTX * qtx,const OSSL_QTX_PKT * pkt)793 int ossl_qtx_write_pkt(OSSL_QTX *qtx, const OSSL_QTX_PKT *pkt)
794 {
795 int ret;
796 int coalescing = (pkt->flags & OSSL_QTX_PKT_FLAG_COALESCE) != 0;
797 int was_coalescing;
798 TXE *txe;
799 uint32_t enc_level;
800
801 /* Must have EL configured, must have header. */
802 if (pkt->hdr == NULL)
803 return 0;
804
805 enc_level = ossl_quic_pkt_type_to_enc_level(pkt->hdr->type);
806
807 /* Some packet types must be in a packet all by themselves. */
808 if (!ossl_quic_pkt_type_can_share_dgram(pkt->hdr->type))
809 ossl_qtx_finish_dgram(qtx);
810 else if (enc_level >= QUIC_ENC_LEVEL_NUM
811 || ossl_qrl_enc_level_set_have_el(&qtx->el_set, enc_level) != 1) {
812 /* All other packet types are encrypted. */
813 return 0;
814 }
815
816 was_coalescing = (qtx->cons != NULL && qtx->cons->data_len > 0);
817 if (was_coalescing)
818 if (!addr_eq(&qtx->cons->peer, pkt->peer)
819 || !addr_eq(&qtx->cons->local, pkt->local)) {
820 /* Must stop coalescing if addresses have changed */
821 ossl_qtx_finish_dgram(qtx);
822 was_coalescing = 0;
823 }
824
825 for (;;) {
826 /*
827 * Start a new coalescing session or continue using the existing one and
828 * serialize/encrypt the packet. We always encrypt packets as soon as
829 * our caller gives them to us, which relieves the caller of any need to
830 * keep the plaintext around.
831 */
832 txe = qtx_ensure_cons(qtx);
833 if (txe == NULL)
834 return 0; /* allocation failure */
835
836 /*
837 * Ensure TXE has at least MDPL bytes allocated. This should only be
838 * possible if the MDPL has increased.
839 */
840 if (!qtx_reserve_txe(qtx, NULL, txe, qtx->mdpl))
841 return 0;
842
843 if (!was_coalescing) {
844 /* Set addresses in TXE. */
845 if (pkt->peer != NULL)
846 txe->peer = *pkt->peer;
847 else
848 BIO_ADDR_clear(&txe->peer);
849
850 if (pkt->local != NULL)
851 txe->local = *pkt->local;
852 else
853 BIO_ADDR_clear(&txe->local);
854 }
855
856 ret = qtx_mutate_write(qtx, pkt, txe, enc_level);
857 if (ret == 1) {
858 break;
859 } else if (ret == QTX_FAIL_INSUFFICIENT_LEN) {
860 if (was_coalescing) {
861 /*
862 * We failed due to insufficient length, so end the current
863 * datagram and try again.
864 */
865 ossl_qtx_finish_dgram(qtx);
866 was_coalescing = 0;
867 } else {
868 /*
869 * We failed due to insufficient length, but we were not
870 * coalescing/started with an empty datagram, so any future
871 * attempt to write this packet must also fail.
872 */
873 return 0;
874 }
875 } else {
876 return 0; /* other error */
877 }
878 }
879
880 ++qtx->cons_count;
881
882 /*
883 * Some packet types cannot have another packet come after them.
884 */
885 if (ossl_quic_pkt_type_must_be_last(pkt->hdr->type))
886 coalescing = 0;
887
888 if (!coalescing)
889 ossl_qtx_finish_dgram(qtx);
890
891 return 1;
892 }
893
894 /*
895 * Finish any incomplete datagrams for transmission which were flagged for
896 * coalescing. If there is no current coalescing datagram, this is a no-op.
897 */
ossl_qtx_finish_dgram(OSSL_QTX * qtx)898 void ossl_qtx_finish_dgram(OSSL_QTX *qtx)
899 {
900 TXE *txe = qtx->cons;
901
902 if (txe == NULL)
903 return;
904
905 if (txe->data_len == 0)
906 /*
907 * If we did not put anything in the datagram, just move it back to the
908 * free list.
909 */
910 ossl_list_txe_insert_tail(&qtx->free, txe);
911 else
912 qtx_add_to_pending(qtx, txe);
913
914 qtx->cons = NULL;
915 qtx->cons_count = 0;
916 ++qtx->datagram_count;
917 }
918
txe_to_msg(TXE * txe,BIO_MSG * msg)919 static void txe_to_msg(TXE *txe, BIO_MSG *msg)
920 {
921 msg->data = txe_data(txe);
922 msg->data_len = txe->data_len;
923 msg->flags = 0;
924 msg->peer
925 = BIO_ADDR_family(&txe->peer) != AF_UNSPEC ? &txe->peer : NULL;
926 msg->local
927 = BIO_ADDR_family(&txe->local) != AF_UNSPEC ? &txe->local : NULL;
928 }
929
930 #define MAX_MSGS_PER_SEND 32
931
ossl_qtx_flush_net(OSSL_QTX * qtx)932 int ossl_qtx_flush_net(OSSL_QTX *qtx)
933 {
934 BIO_MSG msg[MAX_MSGS_PER_SEND];
935 size_t wr, i, total_written = 0;
936 TXE *txe;
937 int res;
938
939 if (ossl_list_txe_head(&qtx->pending) == NULL)
940 return QTX_FLUSH_NET_RES_OK; /* Nothing to send. */
941
942 if (qtx->bio == NULL)
943 return QTX_FLUSH_NET_RES_PERMANENT_FAIL;
944
945 for (;;) {
946 for (txe = ossl_list_txe_head(&qtx->pending), i = 0;
947 txe != NULL && i < OSSL_NELEM(msg);
948 txe = ossl_list_txe_next(txe), ++i)
949 txe_to_msg(txe, &msg[i]);
950
951 if (!i)
952 /* Nothing to send. */
953 break;
954
955 ERR_set_mark();
956 res = BIO_sendmmsg(qtx->bio, msg, sizeof(BIO_MSG), i, 0, &wr);
957 if (res && wr == 0) {
958 /*
959 * Treat 0 messages sent as a transient error and just stop for now.
960 */
961 ERR_clear_last_mark();
962 break;
963 } else if (!res) {
964 /*
965 * We did not get anything, so further calls will probably not
966 * succeed either.
967 */
968 if (BIO_err_is_non_fatal(ERR_peek_last_error())) {
969 /* Transient error, just stop for now, clearing the error. */
970 ERR_pop_to_mark();
971 break;
972 } else {
973 /* Non-transient error, fail and do not clear the error. */
974 ERR_clear_last_mark();
975 return QTX_FLUSH_NET_RES_PERMANENT_FAIL;
976 }
977 }
978
979 ERR_clear_last_mark();
980
981 /*
982 * Remove everything which was successfully sent from the pending queue.
983 */
984 for (i = 0; i < wr; ++i) {
985 if (qtx->msg_callback != NULL)
986 qtx->msg_callback(1, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM,
987 msg[i].data, msg[i].data_len,
988 qtx->msg_callback_ssl,
989 qtx->msg_callback_arg);
990 qtx_pending_to_free(qtx);
991 }
992
993 total_written += wr;
994 }
995
996 return total_written > 0
997 ? QTX_FLUSH_NET_RES_OK
998 : QTX_FLUSH_NET_RES_TRANSIENT_FAIL;
999 }
1000
ossl_qtx_pop_net(OSSL_QTX * qtx,BIO_MSG * msg)1001 int ossl_qtx_pop_net(OSSL_QTX *qtx, BIO_MSG *msg)
1002 {
1003 TXE *txe = ossl_list_txe_head(&qtx->pending);
1004
1005 if (txe == NULL)
1006 return 0;
1007
1008 txe_to_msg(txe, msg);
1009 qtx_pending_to_free(qtx);
1010 return 1;
1011 }
1012
ossl_qtx_set_bio(OSSL_QTX * qtx,BIO * bio)1013 void ossl_qtx_set_bio(OSSL_QTX *qtx, BIO *bio)
1014 {
1015 qtx->bio = bio;
1016 }
1017
ossl_qtx_set_mdpl(OSSL_QTX * qtx,size_t mdpl)1018 int ossl_qtx_set_mdpl(OSSL_QTX *qtx, size_t mdpl)
1019 {
1020 if (mdpl < QUIC_MIN_INITIAL_DGRAM_LEN)
1021 return 0;
1022
1023 qtx->mdpl = mdpl;
1024 return 1;
1025 }
1026
ossl_qtx_get_mdpl(OSSL_QTX * qtx)1027 size_t ossl_qtx_get_mdpl(OSSL_QTX *qtx)
1028 {
1029 return qtx->mdpl;
1030 }
1031
ossl_qtx_get_queue_len_datagrams(OSSL_QTX * qtx)1032 size_t ossl_qtx_get_queue_len_datagrams(OSSL_QTX *qtx)
1033 {
1034 return qtx->pending_count;
1035 }
1036
ossl_qtx_get_queue_len_bytes(OSSL_QTX * qtx)1037 size_t ossl_qtx_get_queue_len_bytes(OSSL_QTX *qtx)
1038 {
1039 return qtx->pending_bytes;
1040 }
1041
ossl_qtx_get_cur_dgram_len_bytes(OSSL_QTX * qtx)1042 size_t ossl_qtx_get_cur_dgram_len_bytes(OSSL_QTX *qtx)
1043 {
1044 return qtx->cons != NULL ? qtx->cons->data_len : 0;
1045 }
1046
ossl_qtx_get_unflushed_pkt_count(OSSL_QTX * qtx)1047 size_t ossl_qtx_get_unflushed_pkt_count(OSSL_QTX *qtx)
1048 {
1049 return qtx->cons_count;
1050 }
1051
ossl_qtx_trigger_key_update(OSSL_QTX * qtx)1052 int ossl_qtx_trigger_key_update(OSSL_QTX *qtx)
1053 {
1054 return ossl_qrl_enc_level_set_key_update(&qtx->el_set,
1055 QUIC_ENC_LEVEL_1RTT);
1056 }
1057
ossl_qtx_get_cur_epoch_pkt_count(OSSL_QTX * qtx,uint32_t enc_level)1058 uint64_t ossl_qtx_get_cur_epoch_pkt_count(OSSL_QTX *qtx, uint32_t enc_level)
1059 {
1060 OSSL_QRL_ENC_LEVEL *el;
1061
1062 el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
1063 if (el == NULL)
1064 return UINT64_MAX;
1065
1066 return el->op_count;
1067 }
1068
ossl_qtx_get_max_epoch_pkt_count(OSSL_QTX * qtx,uint32_t enc_level)1069 uint64_t ossl_qtx_get_max_epoch_pkt_count(OSSL_QTX *qtx, uint32_t enc_level)
1070 {
1071 OSSL_QRL_ENC_LEVEL *el;
1072
1073 el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
1074 if (el == NULL)
1075 return UINT64_MAX;
1076
1077 return ossl_qrl_get_suite_max_pkt(el->suite_id);
1078 }
1079
ossl_qtx_set_msg_callback(OSSL_QTX * qtx,ossl_msg_cb msg_callback,SSL * msg_callback_ssl)1080 void ossl_qtx_set_msg_callback(OSSL_QTX *qtx, ossl_msg_cb msg_callback,
1081 SSL *msg_callback_ssl)
1082 {
1083 qtx->msg_callback = msg_callback;
1084 qtx->msg_callback_ssl = msg_callback_ssl;
1085 }
1086
ossl_qtx_set_msg_callback_arg(OSSL_QTX * qtx,void * msg_callback_arg)1087 void ossl_qtx_set_msg_callback_arg(OSSL_QTX *qtx, void *msg_callback_arg)
1088 {
1089 qtx->msg_callback_arg = msg_callback_arg;
1090 }
1091
ossl_qtx_get_key_epoch(OSSL_QTX * qtx)1092 uint64_t ossl_qtx_get_key_epoch(OSSL_QTX *qtx)
1093 {
1094 OSSL_QRL_ENC_LEVEL *el;
1095
1096 el = ossl_qrl_enc_level_set_get(&qtx->el_set, QUIC_ENC_LEVEL_1RTT, 1);
1097 if (el == NULL)
1098 return 0;
1099
1100 return el->key_epoch;
1101 }
1102