xref: /openssl/crypto/hashtable/hashtable.c (revision 8f250985)
1 /*
2  * Copyright 2024 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  *
9  *
10  *
11  * Notes On hash table design and layout
12  * This hashtable uses a hopscotch algorithm to do indexing.  The data structure
13  * looks as follows:
14  *
15  *   hash          +--------------+
16  *   value+------->+ HT_VALUE     |
17  *      +          +--------------+
18  *  +-------+
19  *  |       |
20  *  +---------------------------------------------------------+
21  *  |       |       |       |       |                         |
22  *  | entry | entry | entry | entry |                         |
23  *  |       |       |       |       |                         |
24  *  +---------------------------------------------------------+
25  *  |                               |                         |
26  *  |                               |                         |
27  *  +---------------------------------------------------------+
28  *  |              +                             +            +
29  *  |        neighborhood[0]               neighborhood[1]    |
30  *  |                                                         |
31  *  |                                                         |
32  *  +---------------------------------------------------------+
33  *                              |
34  *                              +
35  *                         neighborhoods
36  *
37  * On lookup/insert/delete, the items key is hashed to a 64 bit value
38  * and the result is masked to provide an index into the neighborhoods
39  * table.  Once a neighborhood is determined, an in-order search is done
40  * of the elements in the neighborhood indexes entries for a matching hash
41  * value, if found, the corresponding HT_VALUE is used for the respective
42  * operation.  The number of entries in a neighborhood is determined at build
43  * time based on the cacheline size of the target CPU.  The intent is for a
44  * neighborhood to have all entries in the neighborhood fit into a single cache
45  * line to speed up lookups.  If all entries in a neighborhood are in use at the
46  * time of an insert, the table is expanded and rehashed.
47  */
48 
49 #include <string.h>
50 #include <internal/rcu.h>
51 #include <internal/hashtable.h>
52 #include <openssl/rand.h>
53 
54 /*
55  * gcc defines __SANITIZE_THREAD__
56  * but clang uses the feature attributes api
57  * map the latter to the former
58  */
59 #if defined(__clang__) && defined(__has_feature)
60 # if __has_feature(thread_sanitizer)
61 #  define __SANITIZE_THREADS__
62 # endif
63 #endif
64 
65 #ifdef __SANITIZE_THREADS__
66 # include <sanitizer/tsan_interface.h>
67 #endif
68 
69 #include "internal/numbers.h"
70 /*
71  * When we do a lookup/insert/delete, there is a high likelihood
72  * that we will iterate over at least part of the neighborhood list
73  * As such, because we design a neighborhood entry to fit into a single
74  * cache line it is advantageous, when supported to fetch the entire
75  * structure for faster lookups
76  */
77 #if defined(__GNUC__) || defined(__CLANG__)
78 #define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries)
79 #else
80 #define PREFETCH_NEIGHBORHOOD(x)
81 #endif
82 
fnv1a_hash(uint8_t * key,size_t len)83 static ossl_unused uint64_t fnv1a_hash(uint8_t *key, size_t len)
84 {
85     uint64_t hash = 0xcbf29ce484222325ULL;
86     size_t i;
87 
88     for (i = 0; i < len; i++) {
89         hash ^= key[i];
90         hash *= 0x00000100000001B3ULL;
91     }
92     return hash;
93 }
94 
95 /*
96  * Define our neighborhood list length
97  * Note: It should always be a power of 2
98  */
99 #define DEFAULT_NEIGH_LEN_LOG 4
100 #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG)
101 
102 /*
103  * For now assume cache line size is 64 bytes
104  */
105 #define CACHE_LINE_BYTES 64
106 #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES
107 
108 #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st))
109 /*
110  * Defines our chains of values
111  */
112 struct ht_internal_value_st {
113     HT_VALUE value;
114     HT *ht;
115 };
116 
117 struct ht_neighborhood_entry_st {
118     uint64_t hash;
119     struct ht_internal_value_st *value;
120 };
121 
122 struct ht_neighborhood_st {
123     struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN];
124 };
125 
126 /*
127  * Updates to data in this struct
128  * require an rcu sync after modification
129  * prior to free
130  */
131 struct ht_mutable_data_st {
132     struct ht_neighborhood_st *neighborhoods;
133     void *neighborhood_ptr_to_free;
134     uint64_t neighborhood_mask;
135 };
136 
137 /*
138  * Private data may be updated on the write
139  * side only, and so do not require rcu sync
140  */
141 struct ht_write_private_data_st {
142     size_t neighborhood_len;
143     size_t value_count;
144     int need_sync;
145 };
146 
147 struct ht_internal_st {
148     HT_CONFIG config;
149     CRYPTO_RCU_LOCK *lock;
150     CRYPTO_RWLOCK *atomic_lock;
151     struct ht_mutable_data_st *md;
152     struct ht_write_private_data_st wpd;
153 };
154 
155 static void free_value(struct ht_internal_value_st *v);
156 
alloc_new_neighborhood_list(size_t len,void ** freeptr)157 static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len,
158                                                               void **freeptr)
159 {
160     struct ht_neighborhood_st *ret;
161 
162     ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len,
163                                 CACHE_LINE_BYTES, freeptr);
164 
165     /* fall back to regular malloc */
166     if (ret == NULL) {
167         ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len);
168         if (ret == NULL)
169             return NULL;
170     }
171     memset(ret, 0, sizeof(struct ht_neighborhood_st) * len);
172     return ret;
173 }
174 
internal_free_nop(HT_VALUE * v)175 static void internal_free_nop(HT_VALUE *v)
176 {
177     return;
178 }
179 
ossl_ht_new(HT_CONFIG * conf)180 HT *ossl_ht_new(HT_CONFIG *conf)
181 {
182     HT *new = OPENSSL_zalloc(sizeof(*new));
183 
184     if (new == NULL)
185         return NULL;
186 
187     new->atomic_lock = CRYPTO_THREAD_lock_new();
188     if (new->atomic_lock == NULL)
189         goto err;
190 
191     memcpy(&new->config, conf, sizeof(*conf));
192 
193     if (new->config.init_neighborhoods != 0) {
194         new->wpd.neighborhood_len = new->config.init_neighborhoods;
195         /* round up to the next power of 2 */
196         new->wpd.neighborhood_len--;
197         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1;
198         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2;
199         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4;
200         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8;
201         new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16;
202         new->wpd.neighborhood_len++;
203     } else {
204         new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
205     }
206 
207     if (new->config.ht_free_fn == NULL)
208         new->config.ht_free_fn = internal_free_nop;
209 
210     new->md = OPENSSL_zalloc(sizeof(*new->md));
211     if (new->md == NULL)
212         goto err;
213 
214     new->md->neighborhoods =
215         alloc_new_neighborhood_list(new->wpd.neighborhood_len,
216                                     &new->md->neighborhood_ptr_to_free);
217     if (new->md->neighborhoods == NULL)
218         goto err;
219     new->md->neighborhood_mask = new->wpd.neighborhood_len - 1;
220 
221     new->lock = ossl_rcu_lock_new(1, conf->ctx);
222     if (new->lock == NULL)
223         goto err;
224 
225     if (new->config.ht_hash_fn == NULL)
226         new->config.ht_hash_fn = fnv1a_hash;
227 
228     return new;
229 
230 err:
231     CRYPTO_THREAD_lock_free(new->atomic_lock);
232     ossl_rcu_lock_free(new->lock);
233     if (new->md != NULL)
234         OPENSSL_free(new->md->neighborhood_ptr_to_free);
235     OPENSSL_free(new->md);
236     OPENSSL_free(new);
237     return NULL;
238 }
239 
ossl_ht_read_lock(HT * htable)240 void ossl_ht_read_lock(HT *htable)
241 {
242     ossl_rcu_read_lock(htable->lock);
243 }
244 
ossl_ht_read_unlock(HT * htable)245 void ossl_ht_read_unlock(HT *htable)
246 {
247     ossl_rcu_read_unlock(htable->lock);
248 }
249 
ossl_ht_write_lock(HT * htable)250 void ossl_ht_write_lock(HT *htable)
251 {
252     ossl_rcu_write_lock(htable->lock);
253     htable->wpd.need_sync = 0;
254 }
255 
ossl_ht_write_unlock(HT * htable)256 void ossl_ht_write_unlock(HT *htable)
257 {
258     int need_sync = htable->wpd.need_sync;
259 
260     htable->wpd.need_sync = 0;
261     ossl_rcu_write_unlock(htable->lock);
262     if (need_sync)
263         ossl_synchronize_rcu(htable->lock);
264 }
265 
free_oldmd(void * arg)266 static void free_oldmd(void *arg)
267 {
268     struct ht_mutable_data_st *oldmd = arg;
269     size_t i, j;
270     size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1;
271     struct ht_internal_value_st *v;
272 
273     for (i = 0; i < neighborhood_len; i++) {
274         PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]);
275         for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
276             if (oldmd->neighborhoods[i].entries[j].value != NULL) {
277                 v = oldmd->neighborhoods[i].entries[j].value;
278                 v->ht->config.ht_free_fn((HT_VALUE *)v);
279                 free_value(v);
280             }
281         }
282     }
283 
284     OPENSSL_free(oldmd->neighborhood_ptr_to_free);
285     OPENSSL_free(oldmd);
286 }
287 
ossl_ht_flush_internal(HT * h)288 static int ossl_ht_flush_internal(HT *h)
289 {
290     struct ht_mutable_data_st *newmd = NULL;
291     struct ht_mutable_data_st *oldmd = NULL;
292 
293     newmd = OPENSSL_zalloc(sizeof(*newmd));
294     if (newmd == NULL)
295         return 0;
296 
297     newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN,
298                                                        &newmd->neighborhood_ptr_to_free);
299     if (newmd->neighborhoods == NULL) {
300         OPENSSL_free(newmd);
301         return 0;
302     }
303 
304     newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1;
305 
306     /* Swap the old and new mutable data sets */
307     oldmd = ossl_rcu_deref(&h->md);
308     ossl_rcu_assign_ptr(&h->md, &newmd);
309 
310     /* Set the number of entries to 0 */
311     h->wpd.value_count = 0;
312     h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
313 
314     ossl_rcu_call(h->lock, free_oldmd, oldmd);
315     h->wpd.need_sync = 1;
316     return 1;
317 }
318 
ossl_ht_flush(HT * h)319 int ossl_ht_flush(HT *h)
320 {
321     return ossl_ht_flush_internal(h);
322 }
323 
ossl_ht_free(HT * h)324 void ossl_ht_free(HT *h)
325 {
326     if (h == NULL)
327         return;
328 
329     ossl_ht_write_lock(h);
330     ossl_ht_flush_internal(h);
331     ossl_ht_write_unlock(h);
332     /* Freeing the lock does a final sync for us */
333     CRYPTO_THREAD_lock_free(h->atomic_lock);
334     ossl_rcu_lock_free(h->lock);
335     OPENSSL_free(h->md->neighborhood_ptr_to_free);
336     OPENSSL_free(h->md);
337     OPENSSL_free(h);
338     return;
339 }
340 
ossl_ht_count(HT * h)341 size_t ossl_ht_count(HT *h)
342 {
343     size_t count;
344 
345     count = h->wpd.value_count;
346     return count;
347 }
348 
ossl_ht_foreach_until(HT * h,int (* cb)(HT_VALUE * obj,void * arg),void * arg)349 void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg),
350                            void *arg)
351 {
352     size_t i, j;
353     struct ht_mutable_data_st *md;
354 
355     md = ossl_rcu_deref(&h->md);
356     for (i = 0; i < md->neighborhood_mask + 1; i++) {
357         PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]);
358         for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
359             if (md->neighborhoods[i].entries[j].value != NULL) {
360                 if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg))
361                     goto out;
362             }
363         }
364     }
365 out:
366     return;
367 }
368 
ossl_ht_filter(HT * h,size_t max_len,int (* filter)(HT_VALUE * obj,void * arg),void * arg)369 HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len,
370                                      int (*filter)(HT_VALUE *obj, void *arg),
371                                      void *arg)
372 {
373     struct ht_mutable_data_st *md;
374     HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST)
375                                          + (sizeof(HT_VALUE *) * max_len));
376     size_t i, j;
377     struct ht_internal_value_st *v;
378 
379     if (list == NULL)
380         return NULL;
381 
382     /*
383      * The list array lives just beyond the end of
384      * the struct
385      */
386     list->list = (HT_VALUE **)(list + 1);
387 
388     md = ossl_rcu_deref(&h->md);
389     for (i = 0; i < md->neighborhood_mask + 1; i++) {
390         PREFETCH_NEIGHBORHOOD(md->neighborhoods[i+1]);
391         for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
392             v = md->neighborhoods[i].entries[j].value;
393             if (v != NULL && filter((HT_VALUE *)v, arg)) {
394                 list->list[list->list_len++] = (HT_VALUE *)v;
395                 if (list->list_len == max_len)
396                     goto out;
397             }
398         }
399     }
400 out:
401     return list;
402 }
403 
ossl_ht_value_list_free(HT_VALUE_LIST * list)404 void ossl_ht_value_list_free(HT_VALUE_LIST *list)
405 {
406     OPENSSL_free(list);
407 }
408 
compare_hash(uint64_t hash1,uint64_t hash2)409 static int compare_hash(uint64_t hash1, uint64_t hash2)
410 {
411     return (hash1 == hash2);
412 }
413 
free_old_neigh_table(void * arg)414 static void free_old_neigh_table(void *arg)
415 {
416     struct ht_mutable_data_st *oldmd = arg;
417 
418     OPENSSL_free(oldmd->neighborhood_ptr_to_free);
419     OPENSSL_free(oldmd);
420 }
421 
422 /*
423  * Increase hash table bucket list
424  * must be called with write_lock held
425  */
grow_hashtable(HT * h,size_t oldsize)426 static int grow_hashtable(HT *h, size_t oldsize)
427 {
428     struct ht_mutable_data_st *newmd = OPENSSL_zalloc(sizeof(*newmd));
429     struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md);
430     int rc = 0;
431     uint64_t oldi, oldj, newi, newj;
432     uint64_t oldhash;
433     struct ht_internal_value_st *oldv;
434     int rehashed;
435     size_t newsize = oldsize * 2;
436 
437     if (newmd == NULL)
438         goto out;
439 
440     /* bucket list is always a power of 2 */
441     newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2,
442                                                        &newmd->neighborhood_ptr_to_free);
443     if (newmd->neighborhoods == NULL)
444         goto out_free;
445 
446     /* being a power of 2 makes for easy mask computation */
447     newmd->neighborhood_mask = (newsize - 1);
448 
449     /*
450      * Now we need to start rehashing entries
451      * Note we don't need to use atomics here as the new
452      * mutable data hasn't been published
453      */
454     for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) {
455         PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]);
456         for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) {
457             oldv = oldmd->neighborhoods[oldi].entries[oldj].value;
458             if (oldv == NULL)
459                 continue;
460             oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash;
461             newi = oldhash & newmd->neighborhood_mask;
462             rehashed = 0;
463             for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) {
464                 if (newmd->neighborhoods[newi].entries[newj].value == NULL) {
465                     newmd->neighborhoods[newi].entries[newj].value = oldv;
466                     newmd->neighborhoods[newi].entries[newj].hash = oldhash;
467                     rehashed = 1;
468                     break;
469                 }
470             }
471             if (rehashed == 0) {
472                 /* we ran out of space in a neighborhood, grow again */
473                 OPENSSL_free(newmd->neighborhoods);
474                 OPENSSL_free(newmd);
475                 return grow_hashtable(h, newsize);
476             }
477         }
478     }
479     /*
480      * Now that our entries are all hashed into the new bucket list
481      * update our bucket_len and target_max_load
482      */
483     h->wpd.neighborhood_len = newsize;
484 
485     /*
486      * Now we replace the old mutable data with the new
487      */
488     oldmd = ossl_rcu_deref(&h->md);
489     ossl_rcu_assign_ptr(&h->md, &newmd);
490     ossl_rcu_call(h->lock, free_old_neigh_table, oldmd);
491     h->wpd.need_sync = 1;
492     /*
493      * And we're done
494      */
495     rc = 1;
496 
497 out:
498     return rc;
499 out_free:
500     OPENSSL_free(newmd->neighborhoods);
501     OPENSSL_free(newmd);
502     goto out;
503 }
504 
free_old_ht_value(void * arg)505 static void free_old_ht_value(void *arg)
506 {
507     HT_VALUE *h = (HT_VALUE *)arg;
508 
509     /*
510      * Note, this is only called on replacement,
511      * the caller is responsible for freeing the
512      * held data, we just need to free the wrapping
513      * struct here
514      */
515     OPENSSL_free(h);
516 }
517 
ossl_ht_insert_locked(HT * h,uint64_t hash,struct ht_internal_value_st * newval,HT_VALUE ** olddata)518 static int ossl_ht_insert_locked(HT *h, uint64_t hash,
519                                  struct ht_internal_value_st *newval,
520                                  HT_VALUE **olddata)
521 {
522     struct ht_mutable_data_st *md = h->md;
523     uint64_t neigh_idx = hash & md->neighborhood_mask;
524     size_t j;
525     uint64_t ihash;
526     HT_VALUE *ival;
527     size_t empty_idx = SIZE_MAX;
528 
529     PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
530 
531     for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
532         ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
533         CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
534                            &ihash, h->atomic_lock);
535         if (ival == NULL)
536             empty_idx = j;
537         if (compare_hash(hash, ihash)) {
538             if (olddata == NULL) {
539                 /* invalid */
540                 return 0;
541             }
542             /* Do a replacement */
543             CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash,
544                                 hash, h->atomic_lock);
545             *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value;
546             ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value,
547                                 &newval);
548             ossl_rcu_call(h->lock, free_old_ht_value, *olddata);
549             h->wpd.need_sync = 1;
550             return 1;
551         }
552     }
553     /* If we get to here, its just an insert */
554     if (empty_idx == SIZE_MAX)
555         return -1; /* out of space */
556     h->wpd.value_count++;
557     CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash,
558                         hash, h->atomic_lock);
559     ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value,
560                         &newval);
561     return 1;
562 }
563 
alloc_new_value(HT * h,HT_KEY * key,void * data,uintptr_t * type)564 static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key,
565                                                     void *data,
566                                                     uintptr_t *type)
567 {
568     struct ht_internal_value_st *new;
569     struct ht_internal_value_st *tmp;
570 
571     new  = OPENSSL_malloc(sizeof(*new));
572 
573     if (new == NULL)
574         return NULL;
575 
576     tmp = (struct ht_internal_value_st *)ossl_rcu_deref(&new);
577     tmp->ht = h;
578     tmp->value.value = data;
579     tmp->value.type_id = type;
580 
581     return tmp;
582 }
583 
free_value(struct ht_internal_value_st * v)584 static void free_value(struct ht_internal_value_st *v)
585 {
586     OPENSSL_free(v);
587 }
588 
ossl_ht_insert(HT * h,HT_KEY * key,HT_VALUE * data,HT_VALUE ** olddata)589 int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata)
590 {
591     struct ht_internal_value_st *newval = NULL;
592     uint64_t hash;
593     int rc = 0;
594 
595     if (data->value == NULL)
596         goto out;
597 
598     newval = alloc_new_value(h, key, data->value, data->type_id);
599     if (newval == NULL)
600         goto out;
601 
602     /*
603      * we have to take our lock here to prevent other changes
604      * to the bucket list
605      */
606     hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
607 
608 try_again:
609     rc = ossl_ht_insert_locked(h, hash, newval, olddata);
610 
611     if (rc == -1) {
612         grow_hashtable(h, h->wpd.neighborhood_len);
613         goto try_again;
614     }
615 
616     if (rc == 0)
617         free_value(newval);
618 
619 out:
620     return rc;
621 }
622 
ossl_ht_get(HT * h,HT_KEY * key)623 HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key)
624 {
625     struct ht_mutable_data_st *md;
626     uint64_t hash;
627     uint64_t neigh_idx;
628     struct ht_internal_value_st *vidx = NULL;
629     size_t j;
630     uint64_t ehash;
631     HT_VALUE *ret = NULL;
632 
633     hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
634 
635     md = ossl_rcu_deref(&h->md);
636     neigh_idx = hash & md->neighborhood_mask;
637     PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
638     for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
639         CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
640                            &ehash, h->atomic_lock);
641         if (compare_hash(hash, ehash)) {
642             CRYPTO_atomic_load((uint64_t *)&md->neighborhoods[neigh_idx].entries[j].value,
643                                (uint64_t *)&vidx, h->atomic_lock);
644             ret = (HT_VALUE *)vidx;
645             break;
646         }
647     }
648 
649     return ret;
650 }
651 
free_old_entry(void * arg)652 static void free_old_entry(void *arg)
653 {
654     struct ht_internal_value_st *v = arg;
655 
656     v->ht->config.ht_free_fn((HT_VALUE *)v);
657     free_value(v);
658 }
659 
ossl_ht_delete(HT * h,HT_KEY * key)660 int ossl_ht_delete(HT *h, HT_KEY *key)
661 {
662     uint64_t hash;
663     uint64_t neigh_idx;
664     size_t j;
665     struct ht_internal_value_st *v = NULL;
666     HT_VALUE *nv = NULL;
667     int rc = 0;
668 
669     hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
670 
671     neigh_idx = hash & h->md->neighborhood_mask;
672     PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]);
673     for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
674         if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash)) {
675             h->wpd.value_count--;
676             CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash,
677                                 0, h->atomic_lock);
678             v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value;
679             ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value,
680                                 &nv);
681             rc = 1;
682             break;
683         }
684     }
685     if (rc == 1) {
686         ossl_rcu_call(h->lock, free_old_entry, v);
687         h->wpd.need_sync = 1;
688     }
689     return rc;
690 }
691 
692