xref: /openssl/ssl/quic/quic_record_rx.c (revision b6461792)
1 /*
2  * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  */
9 
10 #include <openssl/ssl.h>
11 #include "internal/quic_record_rx.h"
12 #include "quic_record_shared.h"
13 #include "internal/common.h"
14 #include "internal/list.h"
15 #include "../ssl_local.h"
16 
17 /*
18  * Mark a packet in a bitfield.
19  *
20  * pkt_idx: index of packet within datagram.
21  */
pkt_mark(uint64_t * bitf,size_t pkt_idx)22 static ossl_inline void pkt_mark(uint64_t *bitf, size_t pkt_idx)
23 {
24     assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
25     *bitf |= ((uint64_t)1) << pkt_idx;
26 }
27 
28 /* Returns 1 if a packet is in the bitfield. */
pkt_is_marked(const uint64_t * bitf,size_t pkt_idx)29 static ossl_inline int pkt_is_marked(const uint64_t *bitf, size_t pkt_idx)
30 {
31     assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
32     return (*bitf & (((uint64_t)1) << pkt_idx)) != 0;
33 }
34 
35 /*
36  * RXE
37  * ===
38  *
39  * RX Entries (RXEs) store processed (i.e., decrypted) data received from the
40  * network. One RXE is used per received QUIC packet.
41  */
42 typedef struct rxe_st RXE;
43 
44 struct rxe_st {
45     OSSL_QRX_PKT        pkt;
46     OSSL_LIST_MEMBER(rxe, RXE);
47     size_t              data_len, alloc_len, refcount;
48 
49     /* Extra fields for per-packet information. */
50     QUIC_PKT_HDR        hdr; /* data/len are decrypted payload */
51 
52     /* Decoded packet number. */
53     QUIC_PN             pn;
54 
55     /* Addresses copied from URXE. */
56     BIO_ADDR            peer, local;
57 
58     /* Time we received the packet (not when we processed it). */
59     OSSL_TIME           time;
60 
61     /* Total length of the datagram which contained this packet. */
62     size_t              datagram_len;
63 
64     /*
65      * The key epoch the packet was received with. Always 0 for non-1-RTT
66      * packets.
67      */
68     uint64_t            key_epoch;
69 
70     /*
71      * Monotonically increases with each datagram received.
72      * For diagnostic use only.
73      */
74     uint64_t            datagram_id;
75 
76     /*
77      * alloc_len allocated bytes (of which data_len bytes are valid) follow this
78      * structure.
79      */
80 };
81 
82 DEFINE_LIST_OF(rxe, RXE);
83 typedef OSSL_LIST(rxe) RXE_LIST;
84 
rxe_data(const RXE * e)85 static ossl_inline unsigned char *rxe_data(const RXE *e)
86 {
87     return (unsigned char *)(e + 1);
88 }
89 
90 /*
91  * QRL
92  * ===
93  */
94 struct ossl_qrx_st {
95     OSSL_LIB_CTX               *libctx;
96     const char                 *propq;
97 
98     /* Demux to receive datagrams from. */
99     QUIC_DEMUX                 *demux;
100 
101     /* Length of connection IDs used in short-header packets in bytes. */
102     size_t                      short_conn_id_len;
103 
104     /* Maximum number of deferred datagrams buffered at any one time. */
105     size_t                      max_deferred;
106 
107     /* Current count of deferred datagrams. */
108     size_t                      num_deferred;
109 
110     /*
111      * List of URXEs which are filled with received encrypted data.
112      * These are returned to the DEMUX's free list as they are processed.
113      */
114     QUIC_URXE_LIST              urx_pending;
115 
116     /*
117      * List of URXEs which we could not decrypt immediately and which are being
118      * kept in case they can be decrypted later.
119      */
120     QUIC_URXE_LIST              urx_deferred;
121 
122     /*
123      * List of RXEs which are not currently in use. These are moved
124      * to the pending list as they are filled.
125      */
126     RXE_LIST                    rx_free;
127 
128     /*
129      * List of RXEs which are filled with decrypted packets ready to be passed
130      * to the user. A RXE is removed from all lists inside the QRL when passed
131      * to the user, then returned to the free list when the user returns it.
132      */
133     RXE_LIST                    rx_pending;
134 
135     /* Largest PN we have received and processed in a given PN space. */
136     QUIC_PN                     largest_pn[QUIC_PN_SPACE_NUM];
137 
138     /* Per encryption-level state. */
139     OSSL_QRL_ENC_LEVEL_SET      el_set;
140 
141     /* Bytes we have received since this counter was last cleared. */
142     uint64_t                    bytes_received;
143 
144     /*
145      * Number of forged packets we have received since the QRX was instantiated.
146      * Note that as per RFC 9001, this is connection-level state; it is not per
147      * EL and is not reset by a key update.
148      */
149     uint64_t                    forged_pkt_count;
150 
151     /*
152      * The PN the current key epoch started at, inclusive.
153      */
154     uint64_t                    cur_epoch_start_pn;
155 
156     /* Validation callback. */
157     ossl_qrx_late_validation_cb    *validation_cb;
158     void                           *validation_cb_arg;
159 
160     /* Key update callback. */
161     ossl_qrx_key_update_cb         *key_update_cb;
162     void                           *key_update_cb_arg;
163 
164     /* Initial key phase. For debugging use only; always 0 in real use. */
165     unsigned char                   init_key_phase_bit;
166 
167     /* Are we allowed to process 1-RTT packets yet? */
168     unsigned char                   allow_1rtt;
169 
170     /* Message callback related arguments */
171     ossl_msg_cb msg_callback;
172     void *msg_callback_arg;
173     SSL *msg_callback_ssl;
174 };
175 
ossl_qrx_new(const OSSL_QRX_ARGS * args)176 OSSL_QRX *ossl_qrx_new(const OSSL_QRX_ARGS *args)
177 {
178     OSSL_QRX *qrx;
179     size_t i;
180 
181     if (args->demux == NULL || args->max_deferred == 0)
182         return NULL;
183 
184     qrx = OPENSSL_zalloc(sizeof(OSSL_QRX));
185     if (qrx == NULL)
186         return NULL;
187 
188     for (i = 0; i < OSSL_NELEM(qrx->largest_pn); ++i)
189         qrx->largest_pn[i] = args->init_largest_pn[i];
190 
191     qrx->libctx                 = args->libctx;
192     qrx->propq                  = args->propq;
193     qrx->demux                  = args->demux;
194     qrx->short_conn_id_len      = args->short_conn_id_len;
195     qrx->init_key_phase_bit     = args->init_key_phase_bit;
196     qrx->max_deferred           = args->max_deferred;
197     return qrx;
198 }
199 
qrx_cleanup_rxl(RXE_LIST * l)200 static void qrx_cleanup_rxl(RXE_LIST *l)
201 {
202     RXE *e, *enext;
203 
204     for (e = ossl_list_rxe_head(l); e != NULL; e = enext) {
205         enext = ossl_list_rxe_next(e);
206         ossl_list_rxe_remove(l, e);
207         OPENSSL_free(e);
208     }
209 }
210 
qrx_cleanup_urxl(OSSL_QRX * qrx,QUIC_URXE_LIST * l)211 static void qrx_cleanup_urxl(OSSL_QRX *qrx, QUIC_URXE_LIST *l)
212 {
213     QUIC_URXE *e, *enext;
214 
215     for (e = ossl_list_urxe_head(l); e != NULL; e = enext) {
216         enext = ossl_list_urxe_next(e);
217         ossl_list_urxe_remove(l, e);
218         ossl_quic_demux_release_urxe(qrx->demux, e);
219     }
220 }
221 
ossl_qrx_free(OSSL_QRX * qrx)222 void ossl_qrx_free(OSSL_QRX *qrx)
223 {
224     uint32_t i;
225 
226     if (qrx == NULL)
227         return;
228 
229     /* Free RXE queue data. */
230     qrx_cleanup_rxl(&qrx->rx_free);
231     qrx_cleanup_rxl(&qrx->rx_pending);
232     qrx_cleanup_urxl(qrx, &qrx->urx_pending);
233     qrx_cleanup_urxl(qrx, &qrx->urx_deferred);
234 
235     /* Drop keying material and crypto resources. */
236     for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
237         ossl_qrl_enc_level_set_discard(&qrx->el_set, i);
238 
239     OPENSSL_free(qrx);
240 }
241 
ossl_qrx_inject_urxe(OSSL_QRX * qrx,QUIC_URXE * urxe)242 void ossl_qrx_inject_urxe(OSSL_QRX *qrx, QUIC_URXE *urxe)
243 {
244     /* Initialize our own fields inside the URXE and add to the pending list. */
245     urxe->processed     = 0;
246     urxe->hpr_removed   = 0;
247     urxe->deferred      = 0;
248     ossl_list_urxe_insert_tail(&qrx->urx_pending, urxe);
249 
250     if (qrx->msg_callback != NULL)
251         qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM, urxe + 1,
252                           urxe->data_len, qrx->msg_callback_ssl,
253                           qrx->msg_callback_arg);
254 }
255 
qrx_requeue_deferred(OSSL_QRX * qrx)256 static void qrx_requeue_deferred(OSSL_QRX *qrx)
257 {
258     QUIC_URXE *e;
259 
260     while ((e = ossl_list_urxe_head(&qrx->urx_deferred)) != NULL) {
261         ossl_list_urxe_remove(&qrx->urx_deferred, e);
262         ossl_list_urxe_insert_tail(&qrx->urx_pending, e);
263     }
264 }
265 
ossl_qrx_provide_secret(OSSL_QRX * qrx,uint32_t enc_level,uint32_t suite_id,EVP_MD * md,const unsigned char * secret,size_t secret_len)266 int ossl_qrx_provide_secret(OSSL_QRX *qrx, uint32_t enc_level,
267                             uint32_t suite_id, EVP_MD *md,
268                             const unsigned char *secret, size_t secret_len)
269 {
270     if (enc_level >= QUIC_ENC_LEVEL_NUM)
271         return 0;
272 
273     if (!ossl_qrl_enc_level_set_provide_secret(&qrx->el_set,
274                                                qrx->libctx,
275                                                qrx->propq,
276                                                enc_level,
277                                                suite_id,
278                                                md,
279                                                secret,
280                                                secret_len,
281                                                qrx->init_key_phase_bit,
282                                                /*is_tx=*/0))
283         return 0;
284 
285     /*
286      * Any packets we previously could not decrypt, we may now be able to
287      * decrypt, so move any datagrams containing deferred packets from the
288      * deferred to the pending queue.
289      */
290     qrx_requeue_deferred(qrx);
291     return 1;
292 }
293 
ossl_qrx_discard_enc_level(OSSL_QRX * qrx,uint32_t enc_level)294 int ossl_qrx_discard_enc_level(OSSL_QRX *qrx, uint32_t enc_level)
295 {
296     if (enc_level >= QUIC_ENC_LEVEL_NUM)
297         return 0;
298 
299     ossl_qrl_enc_level_set_discard(&qrx->el_set, enc_level);
300     return 1;
301 }
302 
303 /* Returns 1 if there are one or more pending RXEs. */
ossl_qrx_processed_read_pending(OSSL_QRX * qrx)304 int ossl_qrx_processed_read_pending(OSSL_QRX *qrx)
305 {
306     return !ossl_list_rxe_is_empty(&qrx->rx_pending);
307 }
308 
309 /* Returns 1 if there are yet-unprocessed packets. */
ossl_qrx_unprocessed_read_pending(OSSL_QRX * qrx)310 int ossl_qrx_unprocessed_read_pending(OSSL_QRX *qrx)
311 {
312     return !ossl_list_urxe_is_empty(&qrx->urx_pending)
313            || !ossl_list_urxe_is_empty(&qrx->urx_deferred);
314 }
315 
316 /* Pop the next pending RXE. Returns NULL if no RXE is pending. */
qrx_pop_pending_rxe(OSSL_QRX * qrx)317 static RXE *qrx_pop_pending_rxe(OSSL_QRX *qrx)
318 {
319     RXE *rxe = ossl_list_rxe_head(&qrx->rx_pending);
320 
321     if (rxe == NULL)
322         return NULL;
323 
324     ossl_list_rxe_remove(&qrx->rx_pending, rxe);
325     return rxe;
326 }
327 
328 /* Allocate a new RXE. */
qrx_alloc_rxe(size_t alloc_len)329 static RXE *qrx_alloc_rxe(size_t alloc_len)
330 {
331     RXE *rxe;
332 
333     if (alloc_len >= SIZE_MAX - sizeof(RXE))
334         return NULL;
335 
336     rxe = OPENSSL_malloc(sizeof(RXE) + alloc_len);
337     if (rxe == NULL)
338         return NULL;
339 
340     ossl_list_rxe_init_elem(rxe);
341     rxe->alloc_len = alloc_len;
342     rxe->data_len  = 0;
343     rxe->refcount  = 0;
344     return rxe;
345 }
346 
347 /*
348  * Ensures there is at least one RXE in the RX free list, allocating a new entry
349  * if necessary. The returned RXE is in the RX free list; it is not popped.
350  *
351  * alloc_len is a hint which may be used to determine the RXE size if allocation
352  * is necessary. Returns NULL on allocation failure.
353  */
qrx_ensure_free_rxe(OSSL_QRX * qrx,size_t alloc_len)354 static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len)
355 {
356     RXE *rxe;
357 
358     if (ossl_list_rxe_head(&qrx->rx_free) != NULL)
359         return ossl_list_rxe_head(&qrx->rx_free);
360 
361     rxe = qrx_alloc_rxe(alloc_len);
362     if (rxe == NULL)
363         return NULL;
364 
365     ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
366     return rxe;
367 }
368 
369 /*
370  * Resize the data buffer attached to an RXE to be n bytes in size. The address
371  * of the RXE might change; the new address is returned, or NULL on failure, in
372  * which case the original RXE remains valid.
373  */
qrx_resize_rxe(RXE_LIST * rxl,RXE * rxe,size_t n)374 static RXE *qrx_resize_rxe(RXE_LIST *rxl, RXE *rxe, size_t n)
375 {
376     RXE *rxe2, *p;
377 
378     /* Should never happen. */
379     if (rxe == NULL)
380         return NULL;
381 
382     if (n >= SIZE_MAX - sizeof(RXE))
383         return NULL;
384 
385     /* Remove the item from the list to avoid accessing freed memory */
386     p = ossl_list_rxe_prev(rxe);
387     ossl_list_rxe_remove(rxl, rxe);
388 
389     /* Should never resize an RXE which has been handed out. */
390     if (!ossl_assert(rxe->refcount == 0))
391         return NULL;
392 
393     /*
394      * NOTE: We do not clear old memory, although it does contain decrypted
395      * data.
396      */
397     rxe2 = OPENSSL_realloc(rxe, sizeof(RXE) + n);
398     if (rxe2 == NULL) {
399         /* Resize failed, restore old allocation. */
400         if (p == NULL)
401             ossl_list_rxe_insert_head(rxl, rxe);
402         else
403             ossl_list_rxe_insert_after(rxl, p, rxe);
404         return NULL;
405     }
406 
407     if (p == NULL)
408         ossl_list_rxe_insert_head(rxl, rxe2);
409     else
410         ossl_list_rxe_insert_after(rxl, p, rxe2);
411 
412     rxe2->alloc_len = n;
413     return rxe2;
414 }
415 
416 /*
417  * Ensure the data buffer attached to an RXE is at least n bytes in size.
418  * Returns NULL on failure.
419  */
qrx_reserve_rxe(RXE_LIST * rxl,RXE * rxe,size_t n)420 static RXE *qrx_reserve_rxe(RXE_LIST *rxl,
421                             RXE *rxe, size_t n)
422 {
423     if (rxe->alloc_len >= n)
424         return rxe;
425 
426     return qrx_resize_rxe(rxl, rxe, n);
427 }
428 
429 /* Return a RXE handed out to the user back to our freelist. */
qrx_recycle_rxe(OSSL_QRX * qrx,RXE * rxe)430 static void qrx_recycle_rxe(OSSL_QRX *qrx, RXE *rxe)
431 {
432     /* RXE should not be in any list */
433     assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
434     rxe->pkt.hdr    = NULL;
435     rxe->pkt.peer   = NULL;
436     rxe->pkt.local  = NULL;
437     ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
438 }
439 
440 /*
441  * Given a pointer to a pointer pointing to a buffer and the size of that
442  * buffer, copy the buffer into *prxe, expanding the RXE if necessary (its
443  * pointer may change due to realloc). *pi is the offset in bytes to copy the
444  * buffer to, and on success is updated to be the offset pointing after the
445  * copied buffer. *pptr is updated to point to the new location of the buffer.
446  */
qrx_relocate_buffer(OSSL_QRX * qrx,RXE ** prxe,size_t * pi,const unsigned char ** pptr,size_t buf_len)447 static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
448                                const unsigned char **pptr, size_t buf_len)
449 {
450     RXE *rxe;
451     unsigned char *dst;
452 
453     if (!buf_len)
454         return 1;
455 
456     if ((rxe = qrx_reserve_rxe(&qrx->rx_free, *prxe, *pi + buf_len)) == NULL)
457         return 0;
458 
459     *prxe = rxe;
460     dst = (unsigned char *)rxe_data(rxe) + *pi;
461 
462     memcpy(dst, *pptr, buf_len);
463     *pi += buf_len;
464     *pptr = dst;
465     return 1;
466 }
467 
qrx_determine_enc_level(const QUIC_PKT_HDR * hdr)468 static uint32_t qrx_determine_enc_level(const QUIC_PKT_HDR *hdr)
469 {
470     switch (hdr->type) {
471         case QUIC_PKT_TYPE_INITIAL:
472             return QUIC_ENC_LEVEL_INITIAL;
473         case QUIC_PKT_TYPE_HANDSHAKE:
474             return QUIC_ENC_LEVEL_HANDSHAKE;
475         case QUIC_PKT_TYPE_0RTT:
476             return QUIC_ENC_LEVEL_0RTT;
477         case QUIC_PKT_TYPE_1RTT:
478             return QUIC_ENC_LEVEL_1RTT;
479 
480         default:
481             assert(0);
482         case QUIC_PKT_TYPE_RETRY:
483         case QUIC_PKT_TYPE_VERSION_NEG:
484             return QUIC_ENC_LEVEL_INITIAL; /* not used */
485     }
486 }
487 
rxe_determine_pn_space(RXE * rxe)488 static uint32_t rxe_determine_pn_space(RXE *rxe)
489 {
490     uint32_t enc_level;
491 
492     enc_level = qrx_determine_enc_level(&rxe->hdr);
493     return ossl_quic_enc_level_to_pn_space(enc_level);
494 }
495 
qrx_validate_hdr_early(OSSL_QRX * qrx,RXE * rxe,const QUIC_CONN_ID * first_dcid)496 static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
497                                   const QUIC_CONN_ID *first_dcid)
498 {
499     /* Ensure version is what we want. */
500     if (rxe->hdr.version != QUIC_VERSION_1
501         && rxe->hdr.version != QUIC_VERSION_NONE)
502         return 0;
503 
504     /* Clients should never receive 0-RTT packets. */
505     if (rxe->hdr.type == QUIC_PKT_TYPE_0RTT)
506         return 0;
507 
508     /* Version negotiation and retry packets must be the first packet. */
509     if (first_dcid != NULL && !ossl_quic_pkt_type_can_share_dgram(rxe->hdr.type))
510         return 0;
511 
512     /*
513      * If this is not the first packet in a datagram, the destination connection
514      * ID must match the one in that packet.
515      */
516     if (first_dcid != NULL) {
517         if (!ossl_assert(first_dcid->id_len < QUIC_MAX_CONN_ID_LEN)
518             || !ossl_quic_conn_id_eq(first_dcid,
519                                      &rxe->hdr.dst_conn_id))
520         return 0;
521     }
522 
523     return 1;
524 }
525 
526 /* Validate header and decode PN. */
qrx_validate_hdr(OSSL_QRX * qrx,RXE * rxe)527 static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe)
528 {
529     int pn_space = rxe_determine_pn_space(rxe);
530 
531     if (!ossl_quic_wire_decode_pkt_hdr_pn(rxe->hdr.pn, rxe->hdr.pn_len,
532                                           qrx->largest_pn[pn_space],
533                                           &rxe->pn))
534         return 0;
535 
536     return 1;
537 }
538 
539 /* Late packet header validation. */
qrx_validate_hdr_late(OSSL_QRX * qrx,RXE * rxe)540 static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe)
541 {
542     int pn_space = rxe_determine_pn_space(rxe);
543 
544     /*
545      * Allow our user to decide whether to discard the packet before we try and
546      * decrypt it.
547      */
548     if (qrx->validation_cb != NULL
549         && !qrx->validation_cb(rxe->pn, pn_space, qrx->validation_cb_arg))
550         return 0;
551 
552     return 1;
553 }
554 
555 /*
556  * Retrieves the correct cipher context for an EL and key phase. Writes the key
557  * epoch number actually used for packet decryption to *rx_key_epoch.
558  */
qrx_get_cipher_ctx_idx(OSSL_QRX * qrx,OSSL_QRL_ENC_LEVEL * el,uint32_t enc_level,unsigned char key_phase_bit,uint64_t * rx_key_epoch,int * is_old_key)559 static size_t qrx_get_cipher_ctx_idx(OSSL_QRX *qrx, OSSL_QRL_ENC_LEVEL *el,
560                                      uint32_t enc_level,
561                                      unsigned char key_phase_bit,
562                                      uint64_t *rx_key_epoch,
563                                      int *is_old_key)
564 {
565     size_t idx;
566 
567     *is_old_key = 0;
568 
569     if (enc_level != QUIC_ENC_LEVEL_1RTT) {
570         *rx_key_epoch = 0;
571         return 0;
572     }
573 
574     if (!ossl_assert(key_phase_bit <= 1))
575         return SIZE_MAX;
576 
577     /*
578      * RFC 9001 requires that we not create timing channels which could reveal
579      * the decrypted value of the Key Phase bit. We usually handle this by
580      * keeping the cipher contexts for both the current and next key epochs
581      * around, so that we just select a cipher context blindly using the key
582      * phase bit, which is time-invariant.
583      *
584      * In the COOLDOWN state, we only have one keyslot/cipher context. RFC 9001
585      * suggests an implementation strategy to avoid creating a timing channel in
586      * this case:
587      *
588      *   Endpoints can use randomized packet protection keys in place of
589      *   discarded keys when key updates are not yet permitted.
590      *
591      * Rather than use a randomised key, we simply use our existing key as it
592      * will fail AEAD verification anyway. This avoids the need to keep around a
593      * dedicated garbage key.
594      *
595      * Note: Accessing different cipher contexts is technically not
596      * timing-channel safe due to microarchitectural side channels, but this is
597      * the best we can reasonably do and appears to be directly suggested by the
598      * RFC.
599      */
600     idx = (el->state == QRL_EL_STATE_PROV_COOLDOWN ? el->key_epoch & 1
601                                                    : key_phase_bit);
602 
603     /*
604      * We also need to determine the key epoch number which this index
605      * corresponds to. This is so we can report the key epoch number in the
606      * OSSL_QRX_PKT structure, which callers need to validate whether it was OK
607      * for a packet to be sent using a given key epoch's keys.
608      */
609     switch (el->state) {
610     case QRL_EL_STATE_PROV_NORMAL:
611         /*
612          * If we are in the NORMAL state, usually the KP bit will match the LSB
613          * of our key epoch, meaning no new key update is being signalled. If it
614          * does not match, this means the packet (purports to) belong to
615          * the next key epoch.
616          *
617          * IMPORTANT: The AEAD tag has not been verified yet when this function
618          * is called, so this code must be timing-channel safe, hence use of
619          * XOR. Moreover, the value output below is not yet authenticated.
620          */
621         *rx_key_epoch
622             = el->key_epoch + ((el->key_epoch & 1) ^ (uint64_t)key_phase_bit);
623         break;
624 
625     case QRL_EL_STATE_PROV_UPDATING:
626         /*
627          * If we are in the UPDATING state, usually the KP bit will match the
628          * LSB of our key epoch. If it does not match, this means that the
629          * packet (purports to) belong to the previous key epoch.
630          *
631          * As above, must be timing-channel safe.
632          */
633         *is_old_key = (el->key_epoch & 1) ^ (uint64_t)key_phase_bit;
634         *rx_key_epoch = el->key_epoch - (uint64_t)*is_old_key;
635         break;
636 
637     case QRL_EL_STATE_PROV_COOLDOWN:
638         /*
639          * If we are in COOLDOWN, there is only one key epoch we can possibly
640          * decrypt with, so just try that. If AEAD decryption fails, the
641          * value we output here isn't used anyway.
642          */
643         *rx_key_epoch = el->key_epoch;
644         break;
645     }
646 
647     return idx;
648 }
649 
650 /*
651  * Tries to decrypt a packet payload.
652  *
653  * Returns 1 on success or 0 on failure (which is permanent). The payload is
654  * decrypted from src and written to dst. The buffer dst must be of at least
655  * src_len bytes in length. The actual length of the output in bytes is written
656  * to *dec_len on success, which will always be equal to or less than (usually
657  * less than) src_len.
658  */
qrx_decrypt_pkt_body(OSSL_QRX * qrx,unsigned char * dst,const unsigned char * src,size_t src_len,size_t * dec_len,const unsigned char * aad,size_t aad_len,QUIC_PN pn,uint32_t enc_level,unsigned char key_phase_bit,uint64_t * rx_key_epoch)659 static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
660                                 const unsigned char *src,
661                                 size_t src_len, size_t *dec_len,
662                                 const unsigned char *aad, size_t aad_len,
663                                 QUIC_PN pn, uint32_t enc_level,
664                                 unsigned char key_phase_bit,
665                                 uint64_t *rx_key_epoch)
666 {
667     int l = 0, l2 = 0, is_old_key, nonce_len;
668     unsigned char nonce[EVP_MAX_IV_LENGTH];
669     size_t i, cctx_idx;
670     OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
671                                                         enc_level, 1);
672     EVP_CIPHER_CTX *cctx;
673 
674     if (src_len > INT_MAX || aad_len > INT_MAX)
675         return 0;
676 
677     /* We should not have been called if we do not have key material. */
678     if (!ossl_assert(el != NULL))
679         return 0;
680 
681     if (el->tag_len >= src_len)
682         return 0;
683 
684     /*
685      * If we have failed to authenticate a certain number of ciphertexts, refuse
686      * to decrypt any more ciphertexts.
687      */
688     if (qrx->forged_pkt_count >= ossl_qrl_get_suite_max_forged_pkt(el->suite_id))
689         return 0;
690 
691     cctx_idx = qrx_get_cipher_ctx_idx(qrx, el, enc_level, key_phase_bit,
692                                       rx_key_epoch, &is_old_key);
693     if (!ossl_assert(cctx_idx < OSSL_NELEM(el->cctx)))
694         return 0;
695 
696     if (is_old_key && pn >= qrx->cur_epoch_start_pn)
697         /*
698          * RFC 9001 s. 5.5: Once an endpoint successfully receives a packet with
699          * a given PN, it MUST discard all packets in the same PN space with
700          * higher PNs if they cannot be successfully unprotected with the same
701          * key, or -- if there is a key update -- a subsequent packet protection
702          * key.
703          *
704          * In other words, once a PN x triggers a KU, it is invalid for us to
705          * receive a packet with a newer PN y (y > x) using the old keys.
706          */
707         return 0;
708 
709     cctx = el->cctx[cctx_idx];
710 
711     /* Construct nonce (nonce=IV ^ PN). */
712     nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
713     if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN)))
714         return 0;
715 
716     memcpy(nonce, el->iv[cctx_idx], nonce_len);
717     for (i = 0; i < sizeof(QUIC_PN); ++i)
718         nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
719 
720     /* type and key will already have been setup; feed the IV. */
721     if (EVP_CipherInit_ex(cctx, NULL,
722                           NULL, NULL, nonce, /*enc=*/0) != 1)
723         return 0;
724 
725     /* Feed the AEAD tag we got so the cipher can validate it. */
726     if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_SET_TAG,
727                             el->tag_len,
728                             (unsigned char *)src + src_len - el->tag_len) != 1)
729         return 0;
730 
731     /* Feed AAD data. */
732     if (EVP_CipherUpdate(cctx, NULL, &l, aad, aad_len) != 1)
733         return 0;
734 
735     /* Feed encrypted packet body. */
736     if (EVP_CipherUpdate(cctx, dst, &l, src, src_len - el->tag_len) != 1)
737         return 0;
738 
739 #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
740     /*
741      * Throw away what we just decrypted and just use the ciphertext instead
742      * (which should be unencrypted)
743      */
744     memcpy(dst, src, l);
745 
746     /* Pretend to authenticate the tag but ignore it */
747     if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
748         /* We don't care */
749     }
750 #else
751     /* Ensure authentication succeeded. */
752     if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
753         /* Authentication failed, increment failed auth counter. */
754         ++qrx->forged_pkt_count;
755         return 0;
756     }
757 #endif
758 
759     *dec_len = l;
760     return 1;
761 }
762 
ignore_res(int x)763 static ossl_inline void ignore_res(int x)
764 {
765     /* No-op. */
766 }
767 
qrx_key_update_initiated(OSSL_QRX * qrx,QUIC_PN pn)768 static void qrx_key_update_initiated(OSSL_QRX *qrx, QUIC_PN pn)
769 {
770     if (!ossl_qrl_enc_level_set_key_update(&qrx->el_set, QUIC_ENC_LEVEL_1RTT))
771         /* We are already in RXKU, so we don't call the callback again. */
772         return;
773 
774     qrx->cur_epoch_start_pn = pn;
775 
776     if (qrx->key_update_cb != NULL)
777         qrx->key_update_cb(pn, qrx->key_update_cb_arg);
778 }
779 
780 /* Process a single packet in a datagram. */
qrx_process_pkt(OSSL_QRX * qrx,QUIC_URXE * urxe,PACKET * pkt,size_t pkt_idx,QUIC_CONN_ID * first_dcid,size_t datagram_len)781 static int qrx_process_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
782                            PACKET *pkt, size_t pkt_idx,
783                            QUIC_CONN_ID *first_dcid,
784                            size_t datagram_len)
785 {
786     RXE *rxe;
787     const unsigned char *eop = NULL;
788     size_t i, aad_len = 0, dec_len = 0;
789     PACKET orig_pkt = *pkt;
790     const unsigned char *sop = PACKET_data(pkt);
791     unsigned char *dst;
792     char need_second_decode = 0, already_processed = 0;
793     QUIC_PKT_HDR_PTRS ptrs;
794     uint32_t pn_space, enc_level;
795     OSSL_QRL_ENC_LEVEL *el = NULL;
796     uint64_t rx_key_epoch = UINT64_MAX;
797 
798     /*
799      * Get a free RXE. If we need to allocate a new one, use the packet length
800      * as a good ballpark figure.
801      */
802     rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(pkt));
803     if (rxe == NULL)
804         return 0;
805 
806     /* Have we already processed this packet? */
807     if (pkt_is_marked(&urxe->processed, pkt_idx))
808         already_processed = 1;
809 
810     /*
811      * Decode the header into the RXE structure. We first decrypt and read the
812      * unprotected part of the packet header (unless we already removed header
813      * protection, in which case we decode all of it).
814      */
815     need_second_decode = !pkt_is_marked(&urxe->hpr_removed, pkt_idx);
816     if (!ossl_quic_wire_decode_pkt_hdr(pkt,
817                                        qrx->short_conn_id_len,
818                                        need_second_decode, 0, &rxe->hdr, &ptrs))
819         goto malformed;
820 
821     /*
822      * Our successful decode above included an intelligible length and the
823      * PACKET is now pointing to the end of the QUIC packet.
824      */
825     eop = PACKET_data(pkt);
826 
827     /*
828      * Make a note of the first packet's DCID so we can later ensure the
829      * destination connection IDs of all packets in a datagram match.
830      */
831     if (pkt_idx == 0)
832         *first_dcid = rxe->hdr.dst_conn_id;
833 
834     /*
835      * Early header validation. Since we now know the packet length, we can also
836      * now skip over it if we already processed it.
837      */
838     if (already_processed
839         || !qrx_validate_hdr_early(qrx, rxe, pkt_idx == 0 ? NULL : first_dcid))
840         /*
841          * Already processed packets are handled identically to malformed
842          * packets; i.e., they are ignored.
843          */
844         goto malformed;
845 
846     if (!ossl_quic_pkt_type_is_encrypted(rxe->hdr.type)) {
847         /*
848          * Version negotiation and retry packets are a special case. They do not
849          * contain a payload which needs decrypting and have no header
850          * protection.
851          */
852 
853         /* Just copy the payload from the URXE to the RXE. */
854         if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len)) == NULL)
855             /*
856              * Allocation failure. EOP will be pointing to the end of the
857              * datagram so processing of this datagram will end here.
858              */
859             goto malformed;
860 
861         /* We are now committed to returning the packet. */
862         memcpy(rxe_data(rxe), rxe->hdr.data, rxe->hdr.len);
863         pkt_mark(&urxe->processed, pkt_idx);
864 
865         rxe->hdr.data   = rxe_data(rxe);
866         rxe->pn         = QUIC_PN_INVALID;
867 
868         rxe->data_len       = rxe->hdr.len;
869         rxe->datagram_len   = datagram_len;
870         rxe->key_epoch      = 0;
871         rxe->peer           = urxe->peer;
872         rxe->local          = urxe->local;
873         rxe->time           = urxe->time;
874         rxe->datagram_id    = urxe->datagram_id;
875 
876         /* Move RXE to pending. */
877         ossl_list_rxe_remove(&qrx->rx_free, rxe);
878         ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
879         return 0; /* success, did not defer */
880     }
881 
882     /* Determine encryption level of packet. */
883     enc_level = qrx_determine_enc_level(&rxe->hdr);
884 
885     /* If we do not have keying material for this encryption level yet, defer. */
886     switch (ossl_qrl_enc_level_set_have_el(&qrx->el_set, enc_level)) {
887         case 1:
888             /* We have keys. */
889             if (enc_level == QUIC_ENC_LEVEL_1RTT && !qrx->allow_1rtt)
890                 /*
891                  * But we cannot process 1-RTT packets until the handshake is
892                  * completed (RFC 9000 s. 5.7).
893                  */
894                 goto cannot_decrypt;
895 
896             break;
897         case 0:
898             /* No keys yet. */
899             goto cannot_decrypt;
900         default:
901             /* We already discarded keys for this EL, we will never process this.*/
902             goto malformed;
903     }
904 
905     /*
906      * We will copy any token included in the packet to the start of our RXE
907      * data buffer (so that we don't reference the URXE buffer any more and can
908      * recycle it). Track our position in the RXE buffer by index instead of
909      * pointer as the pointer may change as reallocs occur.
910      */
911     i = 0;
912 
913     /*
914      * rxe->hdr.data is now pointing at the (encrypted) packet payload. rxe->hdr
915      * also has fields pointing into the PACKET buffer which will be going away
916      * soon (the URXE will be reused for another incoming packet).
917      *
918      * Firstly, relocate some of these fields into the RXE as needed.
919      *
920      * Relocate token buffer and fix pointer.
921      */
922     if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL) {
923         const unsigned char *token = rxe->hdr.token;
924 
925         /*
926          * This may change the value of rxe and change the value of the token
927          * pointer as well. So we must make a temporary copy of the pointer to
928          * the token, and then copy it back into the new location of the rxe
929          */
930         if (!qrx_relocate_buffer(qrx, &rxe, &i, &token, rxe->hdr.token_len))
931             goto malformed;
932 
933         rxe->hdr.token = token;
934     }
935 
936     /* Now remove header protection. */
937     *pkt = orig_pkt;
938 
939     el = ossl_qrl_enc_level_set_get(&qrx->el_set, enc_level, 1);
940     assert(el != NULL); /* Already checked above */
941 
942     if (need_second_decode) {
943         if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
944             goto malformed;
945 
946         /*
947          * We have removed header protection, so don't attempt to do it again if
948          * the packet gets deferred and processed again.
949          */
950         pkt_mark(&urxe->hpr_removed, pkt_idx);
951 
952         /* Decode the now unprotected header. */
953         if (ossl_quic_wire_decode_pkt_hdr(pkt, qrx->short_conn_id_len,
954                                           0, 0, &rxe->hdr, NULL) != 1)
955             goto malformed;
956     }
957 
958     /* Validate header and decode PN. */
959     if (!qrx_validate_hdr(qrx, rxe))
960         goto malformed;
961 
962     if (qrx->msg_callback != NULL)
963         qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, sop,
964                           eop - sop - rxe->hdr.len, qrx->msg_callback_ssl,
965                           qrx->msg_callback_arg);
966 
967     /*
968      * The AAD data is the entire (unprotected) packet header including the PN.
969      * The packet header has been unprotected in place, so we can just reuse the
970      * PACKET buffer. The header ends where the payload begins.
971      */
972     aad_len = rxe->hdr.data - sop;
973 
974     /* Ensure the RXE buffer size is adequate for our payload. */
975     if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL) {
976         /*
977          * Allocation failure, treat as malformed and do not bother processing
978          * any further packets in the datagram as they are likely to also
979          * encounter allocation failures.
980          */
981         eop = NULL;
982         goto malformed;
983     }
984 
985     /*
986      * We decrypt the packet body to immediately after the token at the start of
987      * the RXE buffer (where present).
988      *
989      * Do the decryption from the PACKET (which points into URXE memory) to our
990      * RXE payload (single-copy decryption), then fixup the pointers in the
991      * header to point to our new buffer.
992      *
993      * If decryption fails this is considered a permanent error; we defer
994      * packets we don't yet have decryption keys for above, so if this fails,
995      * something has gone wrong with the handshake process or a packet has been
996      * corrupted.
997      */
998     dst = (unsigned char *)rxe_data(rxe) + i;
999     if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
1000                               &dec_len, sop, aad_len, rxe->pn, enc_level,
1001                               rxe->hdr.key_phase, &rx_key_epoch))
1002         goto malformed;
1003 
1004     /*
1005      * -----------------------------------------------------
1006      *   IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
1007      *              AND MUST BE TIMING-CHANNEL SAFE.
1008      * -----------------------------------------------------
1009      *
1010      * At this point, we have successfully authenticated the AEAD tag and no
1011      * longer need to worry about exposing the PN, PN length or Key Phase bit in
1012      * timing channels. Invoke any configured validation callback to allow for
1013      * rejection of duplicate PNs.
1014      */
1015     if (!qrx_validate_hdr_late(qrx, rxe))
1016         goto malformed;
1017 
1018     /* Check for a Key Phase bit differing from our expectation. */
1019     if (rxe->hdr.type == QUIC_PKT_TYPE_1RTT
1020         && rxe->hdr.key_phase != (el->key_epoch & 1))
1021         qrx_key_update_initiated(qrx, rxe->pn);
1022 
1023     /*
1024      * We have now successfully decrypted the packet payload. If there are
1025      * additional packets in the datagram, it is possible we will fail to
1026      * decrypt them and need to defer them until we have some key material we
1027      * don't currently possess. If this happens, the URXE will be moved to the
1028      * deferred queue. Since a URXE corresponds to one datagram, which may
1029      * contain multiple packets, we must ensure any packets we have already
1030      * processed in the URXE are not processed again (this is an RFC
1031      * requirement). We do this by marking the nth packet in the datagram as
1032      * processed.
1033      *
1034      * We are now committed to returning this decrypted packet to the user,
1035      * meaning we now consider the packet processed and must mark it
1036      * accordingly.
1037      */
1038     pkt_mark(&urxe->processed, pkt_idx);
1039 
1040     /*
1041      * Update header to point to the decrypted buffer, which may be shorter
1042      * due to AEAD tags, block padding, etc.
1043      */
1044     rxe->hdr.data       = dst;
1045     rxe->hdr.len        = dec_len;
1046     rxe->data_len       = dec_len;
1047     rxe->datagram_len   = datagram_len;
1048     rxe->key_epoch      = rx_key_epoch;
1049 
1050     /* We processed the PN successfully, so update largest processed PN. */
1051     pn_space = rxe_determine_pn_space(rxe);
1052     if (rxe->pn > qrx->largest_pn[pn_space])
1053         qrx->largest_pn[pn_space] = rxe->pn;
1054 
1055     /* Copy across network addresses and RX time from URXE to RXE. */
1056     rxe->peer           = urxe->peer;
1057     rxe->local          = urxe->local;
1058     rxe->time           = urxe->time;
1059     rxe->datagram_id    = urxe->datagram_id;
1060 
1061     /* Move RXE to pending. */
1062     ossl_list_rxe_remove(&qrx->rx_free, rxe);
1063     ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
1064     return 0; /* success, did not defer; not distinguished from failure */
1065 
1066 cannot_decrypt:
1067     /*
1068      * We cannot process this packet right now (but might be able to later). We
1069      * MUST attempt to process any other packets in the datagram, so defer it
1070      * and skip over it.
1071      */
1072     assert(eop != NULL && eop >= PACKET_data(pkt));
1073     /*
1074      * We don't care if this fails as it will just result in the packet being at
1075      * the end of the datagram buffer.
1076      */
1077     ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
1078     return 1; /* deferred */
1079 
1080 malformed:
1081     if (eop != NULL) {
1082         /*
1083          * This packet cannot be processed and will never be processable. We
1084          * were at least able to decode its header and determine its length, so
1085          * we can skip over it and try to process any subsequent packets in the
1086          * datagram.
1087          *
1088          * Mark as processed as an optimization.
1089          */
1090         assert(eop >= PACKET_data(pkt));
1091         pkt_mark(&urxe->processed, pkt_idx);
1092         /* We don't care if this fails (see above) */
1093         ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
1094     } else {
1095         /*
1096          * This packet cannot be processed and will never be processable.
1097          * Because even its header is not intelligible, we cannot examine any
1098          * further packets in the datagram because its length cannot be
1099          * discerned.
1100          *
1101          * Advance over the entire remainder of the datagram, and mark it as
1102          * processed as an optimization.
1103          */
1104         pkt_mark(&urxe->processed, pkt_idx);
1105         /* We don't care if this fails (see above) */
1106         ignore_res(PACKET_forward(pkt, PACKET_remaining(pkt)));
1107     }
1108     return 0; /* failure, did not defer; not distinguished from success */
1109 }
1110 
1111 /* Process a datagram which was received. */
qrx_process_datagram(OSSL_QRX * qrx,QUIC_URXE * e,const unsigned char * data,size_t data_len)1112 static int qrx_process_datagram(OSSL_QRX *qrx, QUIC_URXE *e,
1113                                 const unsigned char *data,
1114                                 size_t data_len)
1115 {
1116     int have_deferred = 0;
1117     PACKET pkt;
1118     size_t pkt_idx = 0;
1119     QUIC_CONN_ID first_dcid = { 255 };
1120 
1121     qrx->bytes_received += data_len;
1122 
1123     if (!PACKET_buf_init(&pkt, data, data_len))
1124         return 0;
1125 
1126     for (; PACKET_remaining(&pkt) > 0; ++pkt_idx) {
1127         /*
1128          * A packet smaller than the minimum possible QUIC packet size is not
1129          * considered valid. We also ignore more than a certain number of
1130          * packets within the same datagram.
1131          */
1132         if (PACKET_remaining(&pkt) < QUIC_MIN_VALID_PKT_LEN
1133             || pkt_idx >= QUIC_MAX_PKT_PER_URXE)
1134             break;
1135 
1136         /*
1137          * We note whether packet processing resulted in a deferral since
1138          * this means we need to move the URXE to the deferred list rather
1139          * than the free list after we're finished dealing with it for now.
1140          *
1141          * However, we don't otherwise care here whether processing succeeded or
1142          * failed, as the RFC says even if a packet in a datagram is malformed,
1143          * we should still try to process any packets following it.
1144          *
1145          * In the case where the packet is so malformed we can't determine its
1146          * length, qrx_process_pkt will take care of advancing to the end of
1147          * the packet, so we will exit the loop automatically in this case.
1148          */
1149         if (qrx_process_pkt(qrx, e, &pkt, pkt_idx, &first_dcid, data_len))
1150             have_deferred = 1;
1151     }
1152 
1153     /* Only report whether there were any deferrals. */
1154     return have_deferred;
1155 }
1156 
1157 /* Process a single pending URXE. */
qrx_process_one_urxe(OSSL_QRX * qrx,QUIC_URXE * e)1158 static int qrx_process_one_urxe(OSSL_QRX *qrx, QUIC_URXE *e)
1159 {
1160     int was_deferred;
1161 
1162     /* The next URXE we process should be at the head of the pending list. */
1163     if (!ossl_assert(e == ossl_list_urxe_head(&qrx->urx_pending)))
1164         return 0;
1165 
1166     /*
1167      * Attempt to process the datagram. The return value indicates only if
1168      * processing of the datagram was deferred. If we failed to process the
1169      * datagram, we do not attempt to process it again and silently eat the
1170      * error.
1171      */
1172     was_deferred = qrx_process_datagram(qrx, e, ossl_quic_urxe_data(e),
1173                                         e->data_len);
1174 
1175     /*
1176      * Remove the URXE from the pending list and return it to
1177      * either the free or deferred list.
1178      */
1179     ossl_list_urxe_remove(&qrx->urx_pending, e);
1180     if (was_deferred > 0 &&
1181             (e->deferred || qrx->num_deferred < qrx->max_deferred)) {
1182         ossl_list_urxe_insert_tail(&qrx->urx_deferred, e);
1183         if (!e->deferred) {
1184             e->deferred = 1;
1185             ++qrx->num_deferred;
1186         }
1187     } else {
1188         if (e->deferred) {
1189             e->deferred = 0;
1190             --qrx->num_deferred;
1191         }
1192         ossl_quic_demux_release_urxe(qrx->demux, e);
1193     }
1194 
1195     return 1;
1196 }
1197 
1198 /* Process any pending URXEs to generate pending RXEs. */
qrx_process_pending_urxl(OSSL_QRX * qrx)1199 static int qrx_process_pending_urxl(OSSL_QRX *qrx)
1200 {
1201     QUIC_URXE *e;
1202 
1203     while ((e = ossl_list_urxe_head(&qrx->urx_pending)) != NULL)
1204         if (!qrx_process_one_urxe(qrx, e))
1205             return 0;
1206 
1207     return 1;
1208 }
1209 
ossl_qrx_read_pkt(OSSL_QRX * qrx,OSSL_QRX_PKT ** ppkt)1210 int ossl_qrx_read_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT **ppkt)
1211 {
1212     RXE *rxe;
1213 
1214     if (!ossl_qrx_processed_read_pending(qrx)) {
1215         if (!qrx_process_pending_urxl(qrx))
1216             return 0;
1217 
1218         if (!ossl_qrx_processed_read_pending(qrx))
1219             return 0;
1220     }
1221 
1222     rxe = qrx_pop_pending_rxe(qrx);
1223     if (!ossl_assert(rxe != NULL))
1224         return 0;
1225 
1226     assert(rxe->refcount == 0);
1227     rxe->refcount = 1;
1228 
1229     rxe->pkt.hdr            = &rxe->hdr;
1230     rxe->pkt.pn             = rxe->pn;
1231     rxe->pkt.time           = rxe->time;
1232     rxe->pkt.datagram_len   = rxe->datagram_len;
1233     rxe->pkt.peer
1234         = BIO_ADDR_family(&rxe->peer) != AF_UNSPEC ? &rxe->peer : NULL;
1235     rxe->pkt.local
1236         = BIO_ADDR_family(&rxe->local) != AF_UNSPEC ? &rxe->local : NULL;
1237     rxe->pkt.key_epoch      = rxe->key_epoch;
1238     rxe->pkt.datagram_id    = rxe->datagram_id;
1239     rxe->pkt.qrx            = qrx;
1240     *ppkt = &rxe->pkt;
1241 
1242     return 1;
1243 }
1244 
ossl_qrx_pkt_release(OSSL_QRX_PKT * pkt)1245 void ossl_qrx_pkt_release(OSSL_QRX_PKT *pkt)
1246 {
1247     RXE *rxe;
1248 
1249     if (pkt == NULL)
1250         return;
1251 
1252     rxe = (RXE *)pkt;
1253     assert(rxe->refcount > 0);
1254     if (--rxe->refcount == 0)
1255         qrx_recycle_rxe(pkt->qrx, rxe);
1256 }
1257 
ossl_qrx_pkt_up_ref(OSSL_QRX_PKT * pkt)1258 void ossl_qrx_pkt_up_ref(OSSL_QRX_PKT *pkt)
1259 {
1260     RXE *rxe = (RXE *)pkt;
1261 
1262     assert(rxe->refcount > 0);
1263     ++rxe->refcount;
1264 }
1265 
ossl_qrx_get_bytes_received(OSSL_QRX * qrx,int clear)1266 uint64_t ossl_qrx_get_bytes_received(OSSL_QRX *qrx, int clear)
1267 {
1268     uint64_t v = qrx->bytes_received;
1269 
1270     if (clear)
1271         qrx->bytes_received = 0;
1272 
1273     return v;
1274 }
1275 
ossl_qrx_set_late_validation_cb(OSSL_QRX * qrx,ossl_qrx_late_validation_cb * cb,void * cb_arg)1276 int ossl_qrx_set_late_validation_cb(OSSL_QRX *qrx,
1277                                     ossl_qrx_late_validation_cb *cb,
1278                                     void *cb_arg)
1279 {
1280     qrx->validation_cb       = cb;
1281     qrx->validation_cb_arg   = cb_arg;
1282     return 1;
1283 }
1284 
ossl_qrx_set_key_update_cb(OSSL_QRX * qrx,ossl_qrx_key_update_cb * cb,void * cb_arg)1285 int ossl_qrx_set_key_update_cb(OSSL_QRX *qrx,
1286                                ossl_qrx_key_update_cb *cb,
1287                                void *cb_arg)
1288 {
1289     qrx->key_update_cb      = cb;
1290     qrx->key_update_cb_arg  = cb_arg;
1291     return 1;
1292 }
1293 
ossl_qrx_get_key_epoch(OSSL_QRX * qrx)1294 uint64_t ossl_qrx_get_key_epoch(OSSL_QRX *qrx)
1295 {
1296     OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1297                                                         QUIC_ENC_LEVEL_1RTT, 1);
1298 
1299     return el == NULL ? UINT64_MAX : el->key_epoch;
1300 }
1301 
ossl_qrx_key_update_timeout(OSSL_QRX * qrx,int normal)1302 int ossl_qrx_key_update_timeout(OSSL_QRX *qrx, int normal)
1303 {
1304     OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1305                                                         QUIC_ENC_LEVEL_1RTT, 1);
1306 
1307     if (el == NULL)
1308         return 0;
1309 
1310     if (el->state == QRL_EL_STATE_PROV_UPDATING
1311         && !ossl_qrl_enc_level_set_key_update_done(&qrx->el_set,
1312                                                    QUIC_ENC_LEVEL_1RTT))
1313         return 0;
1314 
1315     if (normal && el->state == QRL_EL_STATE_PROV_COOLDOWN
1316         && !ossl_qrl_enc_level_set_key_cooldown_done(&qrx->el_set,
1317                                                      QUIC_ENC_LEVEL_1RTT))
1318         return 0;
1319 
1320     return 1;
1321 }
1322 
ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX * qrx)1323 uint64_t ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX *qrx)
1324 {
1325     return qrx->forged_pkt_count;
1326 }
1327 
ossl_qrx_get_max_forged_pkt_count(OSSL_QRX * qrx,uint32_t enc_level)1328 uint64_t ossl_qrx_get_max_forged_pkt_count(OSSL_QRX *qrx,
1329                                            uint32_t enc_level)
1330 {
1331     OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1332                                                         enc_level, 1);
1333 
1334     return el == NULL ? UINT64_MAX
1335         : ossl_qrl_get_suite_max_forged_pkt(el->suite_id);
1336 }
1337 
ossl_qrx_allow_1rtt_processing(OSSL_QRX * qrx)1338 void ossl_qrx_allow_1rtt_processing(OSSL_QRX *qrx)
1339 {
1340     if (qrx->allow_1rtt)
1341         return;
1342 
1343     qrx->allow_1rtt = 1;
1344     qrx_requeue_deferred(qrx);
1345 }
1346 
ossl_qrx_set_msg_callback(OSSL_QRX * qrx,ossl_msg_cb msg_callback,SSL * msg_callback_ssl)1347 void ossl_qrx_set_msg_callback(OSSL_QRX *qrx, ossl_msg_cb msg_callback,
1348                                SSL *msg_callback_ssl)
1349 {
1350     qrx->msg_callback = msg_callback;
1351     qrx->msg_callback_ssl = msg_callback_ssl;
1352 }
1353 
ossl_qrx_set_msg_callback_arg(OSSL_QRX * qrx,void * msg_callback_arg)1354 void ossl_qrx_set_msg_callback_arg(OSSL_QRX *qrx, void *msg_callback_arg)
1355 {
1356     qrx->msg_callback_arg = msg_callback_arg;
1357 }
1358