xref: /openssl/crypto/hashtable/hashtable.c (revision 8951ee06)
1 /*
2  * Copyright 2024 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  *
9  *
10  *
11  * Notes On hash table design and layout
12  * This hashtable uses a hopscotch algorithm to do indexing.  The data structure
13  * looks as follows:
14  *
15  *   hash          +--------------+
16  *   value+------->+ HT_VALUE     |
17  *      +          +--------------+
18  *  +-------+
19  *  |       |
20  *  +---------------------------------------------------------+
21  *  |       |       |       |       |                         |
22  *  | entry | entry | entry | entry |                         |
23  *  |       |       |       |       |                         |
24  *  +---------------------------------------------------------+
25  *  |                               |                         |
26  *  |                               |                         |
27  *  +---------------------------------------------------------+
28  *  |              +                             +            +
29  *  |        neighborhood[0]               neighborhood[1]    |
30  *  |                                                         |
31  *  |                                                         |
32  *  +---------------------------------------------------------+
33  *                              |
34  *                              +
35  *                         neighborhoods
36  *
37  * On lookup/insert/delete, the items key is hashed to a 64 bit value
38  * and the result is masked to provide an index into the neighborhoods
39  * table.  Once a neighborhood is determined, an in-order search is done
40  * of the elements in the neighborhood indexes entries for a matching hash
41  * value, if found, the corresponding HT_VALUE is used for the respective
42  * operation.  The number of entries in a neighborhood is determined at build
43  * time based on the cacheline size of the target CPU.  The intent is for a
44  * neighborhood to have all entries in the neighborhood fit into a single cache
45  * line to speed up lookups.  If all entries in a neighborhood are in use at the
46  * time of an insert, the table is expanded and rehashed.
47  *
48  * Lockless reads hash table is based on the same design but does not
49  * allow growing and deletion. Thus subsequent neighborhoods are always
50  * searched for a match until an empty entry is found.
51  */
52 
53 #include <string.h>
54 #include <internal/rcu.h>
55 #include <internal/hashtable.h>
56 #include <openssl/rand.h>
57 
58 /*
59  * gcc defines __SANITIZE_THREAD__
60  * but clang uses the feature attributes api
61  * map the latter to the former
62  */
63 #if defined(__clang__) && defined(__has_feature)
64 # if __has_feature(thread_sanitizer)
65 #  define __SANITIZE_THREADS__
66 # endif
67 #endif
68 
69 #ifdef __SANITIZE_THREADS__
70 # include <sanitizer/tsan_interface.h>
71 #endif
72 
73 #include "internal/numbers.h"
74 /*
75  * When we do a lookup/insert/delete, there is a high likelihood
76  * that we will iterate over at least part of the neighborhood list
77  * As such, because we design a neighborhood entry to fit into a single
78  * cache line it is advantageous, when supported to fetch the entire
79  * structure for faster lookups
80  */
81 #if defined(__GNUC__) || defined(__CLANG__)
82 # define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries)
83 # define PREFETCH(x) __builtin_prefetch(x)
84 #else
85 # define PREFETCH_NEIGHBORHOOD(x)
86 # define PREFETCH(x)
87 #endif
88 
fnv1a_hash(uint8_t * key,size_t len)89 static ossl_unused uint64_t fnv1a_hash(uint8_t *key, size_t len)
90 {
91     uint64_t hash = 0xcbf29ce484222325ULL;
92     size_t i;
93 
94     for (i = 0; i < len; i++) {
95         hash ^= key[i];
96         hash *= 0x00000100000001B3ULL;
97     }
98     return hash;
99 }
100 
101 /*
102  * Define our neighborhood list length
103  * Note: It should always be a power of 2
104  */
105 #define DEFAULT_NEIGH_LEN_LOG 4
106 #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG)
107 
108 /*
109  * For now assume cache line size is 64 bytes
110  */
111 #define CACHE_LINE_BYTES 64
112 #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES
113 
114 #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st))
115 /*
116  * Defines our chains of values
117  */
118 struct ht_internal_value_st {
119     HT_VALUE value;
120     HT *ht;
121 };
122 
123 struct ht_neighborhood_entry_st {
124     uint64_t hash;
125     struct ht_internal_value_st *value;
126 };
127 
128 struct ht_neighborhood_st {
129     struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN];
130 };
131 
132 /*
133  * Updates to data in this struct
134  * require an rcu sync after modification
135  * prior to free
136  */
137 struct ht_mutable_data_st {
138     struct ht_neighborhood_st *neighborhoods;
139     void *neighborhood_ptr_to_free;
140     uint64_t neighborhood_mask;
141 };
142 
143 /*
144  * Private data may be updated on the write
145  * side only, and so do not require rcu sync
146  */
147 struct ht_write_private_data_st {
148     size_t neighborhood_len;
149     size_t value_count;
150     int need_sync;
151 };
152 
153 struct ht_internal_st {
154     HT_CONFIG config;
155     CRYPTO_RCU_LOCK *lock;
156     CRYPTO_RWLOCK *atomic_lock;
157     struct ht_mutable_data_st *md;
158     struct ht_write_private_data_st wpd;
159 };
160 
161 static void free_value(struct ht_internal_value_st *v);
162 
alloc_new_neighborhood_list(size_t len,void ** freeptr)163 static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len,
164                                                               void **freeptr)
165 {
166     struct ht_neighborhood_st *ret;
167 
168     ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len,
169                                 CACHE_LINE_BYTES, freeptr);
170 
171     /* fall back to regular malloc */
172     if (ret == NULL) {
173         ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len);
174         if (ret == NULL)
175             return NULL;
176     }
177     memset(ret, 0, sizeof(struct ht_neighborhood_st) * len);
178     return ret;
179 }
180 
internal_free_nop(HT_VALUE * v)181 static void internal_free_nop(HT_VALUE *v)
182 {
183     return;
184 }
185 
ossl_ht_new(const HT_CONFIG * conf)186 HT *ossl_ht_new(const HT_CONFIG *conf)
187 {
188     HT *new = OPENSSL_zalloc(sizeof(*new));
189 
190     if (new == NULL)
191         return NULL;
192 
193     new->atomic_lock = CRYPTO_THREAD_lock_new();
194     if (new->atomic_lock == NULL)
195         goto err;
196 
197     memcpy(&new->config, conf, sizeof(*conf));
198 
199     if (new->config.init_neighborhoods != 0) {
200         new->wpd.neighborhood_len = new->config.init_neighborhoods;
201         /* round up to the next power of 2 */
202         new->wpd.neighborhood_len--;
203         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1;
204         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2;
205         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4;
206         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8;
207         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16;
208         new->wpd.neighborhood_len++;
209     } else {
210         new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
211     }
212 
213     if (new->config.ht_free_fn == NULL)
214         new->config.ht_free_fn = internal_free_nop;
215 
216     new->md = OPENSSL_zalloc(sizeof(*new->md));
217     if (new->md == NULL)
218         goto err;
219 
220     new->md->neighborhoods =
221         alloc_new_neighborhood_list(new->wpd.neighborhood_len,
222                                     &new->md->neighborhood_ptr_to_free);
223     if (new->md->neighborhoods == NULL)
224         goto err;
225     new->md->neighborhood_mask = new->wpd.neighborhood_len - 1;
226 
227     new->lock = ossl_rcu_lock_new(1, conf->ctx);
228     if (new->lock == NULL)
229         goto err;
230 
231     if (new->config.ht_hash_fn == NULL)
232         new->config.ht_hash_fn = fnv1a_hash;
233 
234     return new;
235 
236 err:
237     CRYPTO_THREAD_lock_free(new->atomic_lock);
238     ossl_rcu_lock_free(new->lock);
239     if (new->md != NULL)
240         OPENSSL_free(new->md->neighborhood_ptr_to_free);
241     OPENSSL_free(new->md);
242     OPENSSL_free(new);
243     return NULL;
244 }
245 
ossl_ht_read_lock(HT * htable)246 void ossl_ht_read_lock(HT *htable)
247 {
248     ossl_rcu_read_lock(htable->lock);
249 }
250 
ossl_ht_read_unlock(HT * htable)251 void ossl_ht_read_unlock(HT *htable)
252 {
253     ossl_rcu_read_unlock(htable->lock);
254 }
255 
ossl_ht_write_lock(HT * htable)256 void ossl_ht_write_lock(HT *htable)
257 {
258     ossl_rcu_write_lock(htable->lock);
259     htable->wpd.need_sync = 0;
260 }
261 
ossl_ht_write_unlock(HT * htable)262 void ossl_ht_write_unlock(HT *htable)
263 {
264     int need_sync = htable->wpd.need_sync;
265 
266     htable->wpd.need_sync = 0;
267     ossl_rcu_write_unlock(htable->lock);
268     if (need_sync)
269         ossl_synchronize_rcu(htable->lock);
270 }
271 
free_oldmd(void * arg)272 static void free_oldmd(void *arg)
273 {
274     struct ht_mutable_data_st *oldmd = arg;
275     size_t i, j;
276     size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1;
277     struct ht_internal_value_st *v;
278 
279     for (i = 0; i < neighborhood_len; i++) {
280         PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]);
281         for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
282             if (oldmd->neighborhoods[i].entries[j].value != NULL) {
283                 v = oldmd->neighborhoods[i].entries[j].value;
284                 v->ht->config.ht_free_fn((HT_VALUE *)v);
285                 free_value(v);
286             }
287         }
288     }
289 
290     OPENSSL_free(oldmd->neighborhood_ptr_to_free);
291     OPENSSL_free(oldmd);
292 }
293 
ossl_ht_flush_internal(HT * h)294 static int ossl_ht_flush_internal(HT *h)
295 {
296     struct ht_mutable_data_st *newmd = NULL;
297     struct ht_mutable_data_st *oldmd = NULL;
298 
299     newmd = OPENSSL_zalloc(sizeof(*newmd));
300     if (newmd == NULL)
301         return 0;
302 
303     newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN,
304                                                        &newmd->neighborhood_ptr_to_free);
305     if (newmd->neighborhoods == NULL) {
306         OPENSSL_free(newmd);
307         return 0;
308     }
309 
310     newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1;
311 
312     /* Swap the old and new mutable data sets */
313     oldmd = ossl_rcu_deref(&h->md);
314     ossl_rcu_assign_ptr(&h->md, &newmd);
315 
316     /* Set the number of entries to 0 */
317     h->wpd.value_count = 0;
318     h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
319 
320     ossl_rcu_call(h->lock, free_oldmd, oldmd);
321     h->wpd.need_sync = 1;
322     return 1;
323 }
324 
ossl_ht_flush(HT * h)325 int ossl_ht_flush(HT *h)
326 {
327     return ossl_ht_flush_internal(h);
328 }
329 
ossl_ht_free(HT * h)330 void ossl_ht_free(HT *h)
331 {
332     if (h == NULL)
333         return;
334 
335     ossl_ht_write_lock(h);
336     ossl_ht_flush_internal(h);
337     ossl_ht_write_unlock(h);
338     /* Freeing the lock does a final sync for us */
339     CRYPTO_THREAD_lock_free(h->atomic_lock);
340     ossl_rcu_lock_free(h->lock);
341     OPENSSL_free(h->md->neighborhood_ptr_to_free);
342     OPENSSL_free(h->md);
343     OPENSSL_free(h);
344     return;
345 }
346 
ossl_ht_count(HT * h)347 size_t ossl_ht_count(HT *h)
348 {
349     size_t count;
350 
351     count = h->wpd.value_count;
352     return count;
353 }
354 
ossl_ht_foreach_until(HT * h,int (* cb)(HT_VALUE * obj,void * arg),void * arg)355 void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg),
356                            void *arg)
357 {
358     size_t i, j;
359     struct ht_mutable_data_st *md;
360 
361     md = ossl_rcu_deref(&h->md);
362     for (i = 0; i < md->neighborhood_mask + 1; i++) {
363         PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]);
364         for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
365             if (md->neighborhoods[i].entries[j].value != NULL) {
366                 if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg))
367                     goto out;
368             }
369         }
370     }
371 out:
372     return;
373 }
374 
ossl_ht_filter(HT * h,size_t max_len,int (* filter)(HT_VALUE * obj,void * arg),void * arg)375 HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len,
376                                      int (*filter)(HT_VALUE *obj, void *arg),
377                                      void *arg)
378 {
379     struct ht_mutable_data_st *md;
380     HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST)
381                                          + (sizeof(HT_VALUE *) * max_len));
382     size_t i, j;
383     struct ht_internal_value_st *v;
384 
385     if (list == NULL)
386         return NULL;
387 
388     /*
389      * The list array lives just beyond the end of
390      * the struct
391      */
392     list->list = (HT_VALUE **)(list + 1);
393 
394     md = ossl_rcu_deref(&h->md);
395     for (i = 0; i < md->neighborhood_mask + 1; i++) {
396         PREFETCH_NEIGHBORHOOD(md->neighborhoods[i+1]);
397         for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
398             v = md->neighborhoods[i].entries[j].value;
399             if (v != NULL && filter((HT_VALUE *)v, arg)) {
400                 list->list[list->list_len++] = (HT_VALUE *)v;
401                 if (list->list_len == max_len)
402                     goto out;
403             }
404         }
405     }
406 out:
407     return list;
408 }
409 
ossl_ht_value_list_free(HT_VALUE_LIST * list)410 void ossl_ht_value_list_free(HT_VALUE_LIST *list)
411 {
412     OPENSSL_free(list);
413 }
414 
compare_hash(uint64_t hash1,uint64_t hash2)415 static int compare_hash(uint64_t hash1, uint64_t hash2)
416 {
417     return (hash1 == hash2);
418 }
419 
free_old_neigh_table(void * arg)420 static void free_old_neigh_table(void *arg)
421 {
422     struct ht_mutable_data_st *oldmd = arg;
423 
424     OPENSSL_free(oldmd->neighborhood_ptr_to_free);
425     OPENSSL_free(oldmd);
426 }
427 
428 /*
429  * Increase hash table bucket list
430  * must be called with write_lock held
431  */
grow_hashtable(HT * h,size_t oldsize)432 static int grow_hashtable(HT *h, size_t oldsize)
433 {
434     struct ht_mutable_data_st *newmd;
435     struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md);
436     int rc = 0;
437     uint64_t oldi, oldj, newi, newj;
438     uint64_t oldhash;
439     struct ht_internal_value_st *oldv;
440     int rehashed;
441     size_t newsize = oldsize * 2;
442 
443     if (h->config.lockless_reads)
444         goto out;
445 
446     if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL)
447         goto out;
448 
449     /* bucket list is always a power of 2 */
450     newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2,
451                                                        &newmd->neighborhood_ptr_to_free);
452     if (newmd->neighborhoods == NULL)
453         goto out_free;
454 
455     /* being a power of 2 makes for easy mask computation */
456     newmd->neighborhood_mask = (newsize - 1);
457 
458     /*
459      * Now we need to start rehashing entries
460      * Note we don't need to use atomics here as the new
461      * mutable data hasn't been published
462      */
463     for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) {
464         PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]);
465         for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) {
466             oldv = oldmd->neighborhoods[oldi].entries[oldj].value;
467             if (oldv == NULL)
468                 continue;
469             oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash;
470             newi = oldhash & newmd->neighborhood_mask;
471             rehashed = 0;
472             for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) {
473                 if (newmd->neighborhoods[newi].entries[newj].value == NULL) {
474                     newmd->neighborhoods[newi].entries[newj].value = oldv;
475                     newmd->neighborhoods[newi].entries[newj].hash = oldhash;
476                     rehashed = 1;
477                     break;
478                 }
479             }
480             if (rehashed == 0) {
481                 /* we ran out of space in a neighborhood, grow again */
482                 OPENSSL_free(newmd->neighborhoods);
483                 OPENSSL_free(newmd);
484                 return grow_hashtable(h, newsize);
485             }
486         }
487     }
488     /*
489      * Now that our entries are all hashed into the new bucket list
490      * update our bucket_len and target_max_load
491      */
492     h->wpd.neighborhood_len = newsize;
493 
494     /*
495      * Now we replace the old mutable data with the new
496      */
497     ossl_rcu_assign_ptr(&h->md, &newmd);
498     ossl_rcu_call(h->lock, free_old_neigh_table, oldmd);
499     h->wpd.need_sync = 1;
500     /*
501      * And we're done
502      */
503     rc = 1;
504 
505 out:
506     return rc;
507 out_free:
508     OPENSSL_free(newmd->neighborhoods);
509     OPENSSL_free(newmd);
510     goto out;
511 }
512 
free_old_ht_value(void * arg)513 static void free_old_ht_value(void *arg)
514 {
515     HT_VALUE *h = (HT_VALUE *)arg;
516 
517     /*
518      * Note, this is only called on replacement,
519      * the caller is responsible for freeing the
520      * held data, we just need to free the wrapping
521      * struct here
522      */
523     OPENSSL_free(h);
524 }
525 
match_key(HT_KEY * a,HT_KEY * b)526 static ossl_inline int match_key(HT_KEY *a, HT_KEY *b)
527 {
528     /*
529      * keys match if they are both present, the same size
530      * and compare equal in memory
531      */
532     PREFETCH(a->keybuf);
533     PREFETCH(b->keybuf);
534     if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize)
535         return !memcmp(a->keybuf, b->keybuf, a->keysize);
536 
537     return 1;
538 }
539 
ossl_ht_insert_locked(HT * h,uint64_t hash,struct ht_internal_value_st * newval,HT_VALUE ** olddata)540 static int ossl_ht_insert_locked(HT *h, uint64_t hash,
541                                  struct ht_internal_value_st *newval,
542                                  HT_VALUE **olddata)
543 {
544     struct ht_mutable_data_st *md = h->md;
545     uint64_t neigh_idx_start = hash & md->neighborhood_mask;
546     uint64_t neigh_idx = neigh_idx_start;
547     size_t j;
548     uint64_t ihash;
549     HT_VALUE *ival;
550     size_t empty_idx = SIZE_MAX;
551     int lockless_reads = h->config.lockless_reads;
552 
553     do {
554         PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
555 
556         for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
557             ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
558             if (ival == NULL) {
559                 empty_idx = j;
560                 /* lockless_reads implies no deletion, we can break out */
561                 if (lockless_reads)
562                     goto not_found;
563                 continue;
564             }
565             if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
566                                     &ihash, h->atomic_lock))
567                 return 0;
568             if (compare_hash(hash, ihash) && match_key(&newval->value.key,
569                                                        &ival->key)) {
570                 if (olddata == NULL) {
571                     /* This would insert a duplicate -> fail */
572                     return 0;
573                 }
574                 /* Do a replacement */
575                 if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash,
576                                          hash, h->atomic_lock))
577                     return 0;
578                 *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value;
579                 ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value,
580                                     &newval);
581                 ossl_rcu_call(h->lock, free_old_ht_value, *olddata);
582                 h->wpd.need_sync = 1;
583                 return 1;
584             }
585         }
586         if (!lockless_reads)
587             break;
588         /* Continue search in subsequent neighborhoods */
589         neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
590     } while (neigh_idx != neigh_idx_start);
591 
592  not_found:
593     /* If we get to here, its just an insert */
594     if (empty_idx == SIZE_MAX)
595         return -1; /* out of space */
596     if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash,
597                              hash, h->atomic_lock))
598         return 0;
599     h->wpd.value_count++;
600     ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value,
601                         &newval);
602     return 1;
603 }
604 
alloc_new_value(HT * h,HT_KEY * key,void * data,uintptr_t * type)605 static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key,
606                                                     void *data,
607                                                     uintptr_t *type)
608 {
609     struct ht_internal_value_st *tmp;
610     size_t nvsize = sizeof(*tmp);
611 
612     if (h->config.collision_check == 1)
613         nvsize += key->keysize;
614 
615     tmp = OPENSSL_malloc(nvsize);
616 
617     if (tmp == NULL)
618         return NULL;
619 
620     tmp->ht = h;
621     tmp->value.value = data;
622     tmp->value.type_id = type;
623     tmp->value.key.keybuf = NULL;
624     if (h->config.collision_check) {
625         tmp->value.key.keybuf = (uint8_t *)(tmp + 1);
626         tmp->value.key.keysize = key->keysize;
627         memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize);
628     }
629 
630 
631     return tmp;
632 }
633 
free_value(struct ht_internal_value_st * v)634 static void free_value(struct ht_internal_value_st *v)
635 {
636     OPENSSL_free(v);
637 }
638 
ossl_ht_insert(HT * h,HT_KEY * key,HT_VALUE * data,HT_VALUE ** olddata)639 int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata)
640 {
641     struct ht_internal_value_st *newval = NULL;
642     uint64_t hash;
643     int rc = 0;
644     int i;
645 
646     if (data->value == NULL)
647         goto out;
648 
649     newval = alloc_new_value(h, key, data->value, data->type_id);
650     if (newval == NULL)
651         goto out;
652 
653     /*
654      * we have to take our lock here to prevent other changes
655      * to the bucket list
656      */
657     hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
658 
659     for (i = 0;
660          (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1
661          && i < 4;
662          ++i)
663         if (!grow_hashtable(h, h->wpd.neighborhood_len)) {
664             rc = -1;
665             break;
666         }
667 
668     if (rc <= 0)
669         free_value(newval);
670 
671 out:
672     return rc;
673 }
674 
ossl_ht_get(HT * h,HT_KEY * key)675 HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key)
676 {
677     struct ht_mutable_data_st *md;
678     uint64_t hash;
679     uint64_t neigh_idx_start;
680     uint64_t neigh_idx;
681     struct ht_internal_value_st *ival = NULL;
682     size_t j;
683     uint64_t ehash;
684     int lockless_reads = h->config.lockless_reads;
685 
686     hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
687 
688     md = ossl_rcu_deref(&h->md);
689     neigh_idx = neigh_idx_start = hash & md->neighborhood_mask;
690     do {
691         PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
692         for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
693             ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
694             if (ival == NULL) {
695                 if (lockless_reads)
696                     /* lockless_reads implies no deletion, we can break out */
697                     return NULL;
698                 continue;
699             }
700             if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
701                                     &ehash, h->atomic_lock))
702                 return NULL;
703             if (compare_hash(hash, ehash) && match_key(&ival->value.key, key))
704                 return (HT_VALUE *)ival;
705         }
706         if (!lockless_reads)
707             break;
708         /* Continue search in subsequent neighborhoods */
709         neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
710     } while (neigh_idx != neigh_idx_start);
711 
712     return NULL;
713 }
714 
free_old_entry(void * arg)715 static void free_old_entry(void *arg)
716 {
717     struct ht_internal_value_st *v = arg;
718 
719     v->ht->config.ht_free_fn((HT_VALUE *)v);
720     free_value(v);
721 }
722 
ossl_ht_delete(HT * h,HT_KEY * key)723 int ossl_ht_delete(HT *h, HT_KEY *key)
724 {
725     uint64_t hash;
726     uint64_t neigh_idx;
727     size_t j;
728     struct ht_internal_value_st *v = NULL;
729     HT_VALUE *nv = NULL;
730     int rc = 0;
731 
732     if (h->config.lockless_reads)
733         return 0;
734 
735     hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
736 
737     neigh_idx = hash & h->md->neighborhood_mask;
738     PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]);
739     for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
740         v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value;
741         if (v == NULL)
742             continue;
743         if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash)
744             && match_key(key, &v->value.key)) {
745             if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash,
746                                      0, h->atomic_lock))
747                 break;
748             h->wpd.value_count--;
749             ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value,
750                                 &nv);
751             rc = 1;
752             break;
753         }
754     }
755     if (rc == 1) {
756         ossl_rcu_call(h->lock, free_old_entry, v);
757         h->wpd.need_sync = 1;
758     }
759     return rc;
760 }
761